Change e letters to i in words

From Rosetta Code
Change e letters to i in words is a draft programming task. It is not yet considered ready to be promoted as a complete task, for reasons that should be found in its talk page.
Task

Use the dictionary   unixdict.txt

Change letters   e   to   i   in words.

If the changed word is in the dictionary,   show it here on this page.

The length of any word shown should have a length   >  5.


Other tasks related to string operations:
Metrics
Counting
Remove/replace
Anagrams/Derangements/shuffling
Find/Search/Determine
Formatting
Song lyrics/poems/Mad Libs/phrases
Tokenize
Sequences



11l

Translation of: Nim
V words = File(‘unixdict.txt’).read().split("\n")
V words_set = Set(words)

L(word) words
   I word.len > 5
      V new_word = word.replace(‘e’, ‘i’)
      I new_word != word & new_word C words_set
         print(word‘ -> ’new_word)
Output:
analyses -> analysis
atlantes -> atlantis
bellow -> billow
breton -> briton
clench -> clinch
convect -> convict
crises -> crisis
diagnoses -> diagnosis
enfant -> infant
enquiry -> inquiry
frances -> francis
galatea -> galatia
harden -> hardin
heckman -> hickman
inequity -> iniquity
inflect -> inflict
jacobean -> jacobian
marten -> martin
module -> moduli
pegging -> pigging
psychoses -> psychosis
rabbet -> rabbit
sterling -> stirling
synopses -> synopsis
vector -> victor
welles -> willis

Ada

with Ada.Text_Io;
with Ada.Strings.Fixed;
with Ada.Strings.Maps;
with Ada.Containers.Indefinite_Ordered_Maps;

procedure Change_E_To_I is
   use Ada.Text_Io;
   use Ada.Strings;

   Filename : constant String := "unixdict.txt";
   Mapping  : constant Maps.Character_Mapping :=
     Maps.To_Mapping ("Ee", "Ii");

   package Dictionaries is
     new Ada.Containers.Indefinite_Ordered_Maps
       (Key_Type     => String,
        Element_Type => String);

   Dict    : Dictionaries.Map;
   File    : File_Type;
begin
   Open (File, In_File, Filename);
   while not End_Of_File (File) loop
      declare
         Word : constant String := Get_Line (File);
      begin
         Dict.Insert (Word, Word);
      end;
   end loop;
   Close (File);

   for Word of Dict loop
      declare
         Trans : constant String := Fixed.Translate (Word, Mapping);
      begin
         if Word /= Trans and Dict.Contains (Trans) and Word'Length >= 6 then
            Put (Word); Put (" -> "); Put (Trans); New_Line;
         end if;
      end;
   end loop;
end Change_E_To_I;
Output:
analyses -> analysis
atlantes -> atlantis
bellow -> billow
breton -> briton
clench -> clinch
convect -> convict
crises -> crisis
diagnoses -> diagnosis
enfant -> infant
enquiry -> inquiry
frances -> francis
galatea -> galatia
harden -> hardin
heckman -> hickman
inequity -> iniquity
inflect -> inflict
jacobean -> jacobian
marten -> martin
module -> moduli
pegging -> pigging
psychoses -> psychosis
rabbet -> rabbit
sterling -> stirling
synopses -> synopsis
vector -> victor
welles -> willis

ALGOL 68

# find words where replacing "e" with "i" results in another word    #
# use the associative array in the Associate array/iteration task    #
PR read "aArray.a68" PR
# read the list of words and store the words in an associative array #
IF  FILE input file;
    STRING file name = "unixdict.txt";
    open( input file, file name, stand in channel ) /= 0
THEN
    # failed to open the file                                        #
    print( ( "Unable to open """ + file name + """", newline ) )
ELSE
    # file opened OK                                                 #
    BOOL at eof := FALSE;
    # set the EOF handler for the file #
    on logical file end( input file, ( REF FILE f )BOOL:
                                     BEGIN
                                         # note that we reached EOF on the #
                                         # latest read #
                                         at eof := TRUE;
                                         # return TRUE so processing can continue #
                                         TRUE
                                     END
                       );
    # build an associative array of the words                        #
    REF AARRAY words := INIT LOC AARRAY;
    WHILE STRING word;
          get( input file, ( word, newline ) );
          NOT at eof
    DO
        words // word := word
    OD;
    close( input file );
    # find the words where replacing "e" with "i" is still a word    #
    # the words must be at least 6 characters long                   #
    REF AAELEMENT e := FIRST words;
    WHILE e ISNT nil element DO
        IF STRING word = key OF e;
           INT w len = ( UPB word + 1 ) - LWB word;
           w len >= 6
        THEN
            # the word is at least 6 characters long                 #
            [ LWB word : UPB word ]CHAR i word := word[ @ LWB word ];
            FOR w pos FROM LWB i word TO UPB i word DO
                IF i word[ w pos ] = "e" THEN i word[ w pos ] := "i" FI
            OD;
            IF i word /= word THEN
                # replacing "e" with "I" resulted in a new word      #
                IF  words CONTAINSKEY i word THEN
                    # the new word is still a word                   #
                    print( ( word ) );
                    FROM w len + 1 TO 18 DO print( ( " " ) ) OD;
                   print( ( "-> ", i word, newline ) )
                FI
            FI
        FI;
        e := NEXT words
    OD
FI
Output:

Note, the associative array is not traversed in lexicographical order, the output here has been sorted for ease of comparison with other samples.

analyses          -> analysis
atlantes          -> atlantis
bellow            -> billow
breton            -> briton
clench            -> clinch
convect           -> convict
crises            -> crisis
diagnoses         -> diagnosis
enfant            -> infant
enquiry           -> inquiry
frances           -> francis
galatea           -> galatia
harden            -> hardin
heckman           -> hickman
inequity          -> iniquity
inflect           -> inflict
jacobean          -> jacobian
marten            -> martin
module            -> moduli
pegging           -> pigging
psychoses         -> psychosis
rabbet            -> rabbit
sterling          -> stirling
synopses          -> synopsis
vector            -> victor
welles            -> willis

AppleScript

Core language

use AppleScript version "2.3.1" -- Mac OS X 10.9 (Mavericks) or later.
use sorter : script "Insertion sort" -- <https://rosettacode.org/wiki/Sorting_algorithms/Insertion_sort#AppleScript>
use scripting additions

on binarySearch(v, theList, l, r)
    script o
        property lst : theList
    end script
    
    repeat until (l = r)
        set m to (l + r) div 2
        if (o's lst's item m < v) then
            set l to m + 1
        else
            set r to m
        end if
    end repeat
    
    if (o's lst's item l = v) then return l
    return 0
end binarySearch

on replace(a, b, txt)
    set astid to AppleScript's text item delimiters
    set AppleScript's text item delimiters to a
    set txt to txt's text items
    set AppleScript's text item delimiters to b
    set txt to txt as text
    set AppleScript's text item delimiters to astid
    return txt
end replace

on task(minWordLength)
    set dictPath to (path to desktop as text) & "www.rosettacode.org:unixdict.txt"
    script o
        property wordList : paragraphs of (read file dictPath as «class utf8»)
        property iWords : {}
        property output : {}
    end script
    
    set wordCount to (count o's wordList)
    ignoring case
        tell sorter to sort(o's wordList, 1, wordCount) -- Not actually needed with unixdict.txt.
        
        set iWordCount to 0
        repeat with i from wordCount to 1 by -1
            set thisWord to o's wordList's item i
            if ((count thisWord) < minWordLength) then
            else if ((thisWord contains "e") and (iWordCount > 0)) then
                set changedWord to replace("e", "i", thisWord)
                if (binarySearch(changedWord, o's iWords, 1, iWordCount) > 0) then
                    set beginning of o's output to {thisWord, changedWord}
                end if
            else if (thisWord contains "i") then
                set beginning of o's iWords to thisWord
                set iWordCount to iWordCount + 1
            end if
        end repeat
    end ignoring
    
    return o's output
end task

task(6)
Output:
{{"analyses", "analysis"}, {"atlantes", "atlantis"}, {"bellow", "billow"}, {"breton", "briton"}, {"clench", "clinch"}, {"convect", "convict"}, {"crises", "crisis"}, {"diagnoses", "diagnosis"}, {"enfant", "infant"}, {"enquiry", "inquiry"}, {"frances", "francis"}, {"galatea", "galatia"}, {"harden", "hardin"}, {"heckman", "hickman"}, {"inequity", "iniquity"}, {"inflect", "inflict"}, {"jacobean", "jacobian"}, {"marten", "martin"}, {"module", "moduli"}, {"pegging", "pigging"}, {"psychoses", "psychosis"}, {"rabbet", "rabbit"}, {"sterling", "stirling"}, {"synopses", "synopsis"}, {"vector", "victor"}, {"welles", "willis"}}

AppleScriptObjC

The Foundation framework has very fast array filters, but reducing the checklist size and checking with the same binary search handler as above are also effective. This version takes about 0.37 seconds. As above, the case-sensitivity and sorting arrangements are superfluous with unixdict.txt, but are included for interest. Same result.

use AppleScript version "2.4" -- OS X 10.10 (Yosemite) or later
use framework "Foundation"
use scripting additions

on binarySearch(v, theList, l, r)
    script o
        property lst : theList
    end script
    
    repeat until (l = r)
        set m to (l + r) div 2
        if (item m of o's lst < v) then
            set l to m + 1
        else
            set r to m
        end if
    end repeat
    
    if (item l of o's lst is v) then return l
    return 0
end binarySearch

on task(minWordLength)
    set |⌘| to current application
    -- Read the unixdict.txt file.
    set dictPath to (POSIX path of (path to desktop)) & "www.rosettacode.org/unixdict.txt"
    set dictText to |⌘|'s class "NSString"'s stringWithContentsOfFile:(dictPath) ¬
        usedEncoding:(missing value) |error|:(missing value)
    -- Extract its words, which are known to be one per line.
    set newlineSet to |⌘|'s class "NSCharacterSet"'s newlineCharacterSet()
    set wordArray to dictText's componentsSeparatedByCharactersInSet:(newlineSet)
    -- Case-insensitively extract any words containing "e" whose length is at least minWordLength.
    set filter to |⌘|'s class "NSPredicate"'s ¬
        predicateWithFormat:("(self MATCHES '.{" & minWordLength & ",}+') && (self CONTAINS[c] 'e')")
    set eWords to wordArray's filteredArrayUsingPredicate:(filter)
    -- Case-insensitively extract and sort any words containing "i" but not "e" whose length is at least minWordLength.
    set filter to |⌘|'s class "NSPredicate"'s ¬
        predicateWithFormat:("(self MATCHES '.{" & minWordLength & ",}+') && (self CONTAINS[c] 'i') && !(self CONTAINS[c] 'e')")
    set iWords to (wordArray's filteredArrayUsingPredicate:(filter))'s sortedArrayUsingSelector:("localizedStandardCompare:")
    -- Replace the "e"s in the "e" words with (lower-case) "i"s.    
    set changedWords to ((eWords's componentsJoinedByString:(linefeed))'s ¬
        lowercaseString()'s stringByReplacingOccurrencesOfString:("e") withString:("i"))'s ¬
        componentsSeparatedByCharactersInSet:(newlineSet)
    
    -- Switch to vanilla to check the changed words.
    script o
        property eWordList : eWords as list
        property iWordList : iWords as list
        property changedWordList : changedWords as list
        property output : {}
    end script
    -- Case-insensitively (by default), search the "i" word list for each word in the changed word list.
    -- Where found, use the original-case version from the "i" word list.
    set iWordCount to (count o's iWordList)
    repeat with i from 1 to (count o's changedWordList)
        set matchIndex to binarySearch(item i of o's changedWordList, o's iWordList, 1, iWordCount)
        if (matchIndex > 0) then set end of o's output to {item i of o's eWordList, item matchIndex of o's iWordList}
    end repeat
    
    return o's output
end task

task(6)

Functional

Assuming a local copy of unixdict.txt (on the macOS Desktop),

using the Foundation libraries,

and defining the list of twins as an intersection of sets.

use framework "Foundation"


----- DICTIONARY WORDS TWINNED BY (E -> I) REPLACEMENT ---

-- ieTwins :: String -> [(String, String)]
on ieTwins(s)
    -- Pairs of dictionary words in s which
    -- are twinned by (e -> i) replacement
    
    set ca to current application
    
    set longWords to filteredLines("5 < length", s)
    set eWords to longWords's ¬
        filteredArrayUsingPredicate:(containsString("e"))
    
    set lexicon to ca's NSSet's ¬
        setWithArray:(longWords's ¬
            filteredArrayUsingPredicate:(containsString("i")))
    
    set possibles to (allReplaced("e", "i", ¬
        (eWords's componentsJoinedByString:(linefeed))))'s ¬
        componentsSeparatedByString:(linefeed)
    set possibleSet to ca's NSMutableSet's setWithArray:(possibles)
    possibleSet's intersectSet:(lexicon)
    
    
    -- Dictionary of possible words and their sources
    set dict to dictFromZip(possibles, eWords)
    
    -- Listing of candidate words which are found in the dictionary 
    -- (twinned with their sources)
    script pair
        on |λ|(k)
            {(dict's objectForKey:(k)) as string, k}
        end |λ|
    end script
    map(pair, ((possibleSet's allObjects())'s ¬
        sortedArrayUsingSelector:"compare:") as list)
end ieTwins


--------------------------- TEST -------------------------
on run
    script go
        on |λ|(ei)
            set {e, i} to ei
            
            i & " <- " & e
        end |λ|
    end script
    
    unlines(map(go, ¬
        ieTwins(readFile("~/Desktop/unixdict.txt"))))
end run


------------------------- GENERIC ------------------------

-- allRplaced :: String -> String -> NSString -> NSString
on allReplaced(needle, replacement, haystack)
    haystack's stringByReplacingOccurrencesOfString:(needle) ¬
        withString:(replacement)
end allReplaced


-- containsString :: String -> NSPredicate
on containsString(s)
    tell current application
        its (NSPredicate's ¬
            predicateWithFormat:("self contains '" & s & "'"))
    end tell
end containsString


-- dictFromZip :: NSArray -> NSArray -> NSDictionary
on dictFromZip(ks, vs)
    tell current application
        its (NSDictionary's ¬
            dictionaryWithObjects:vs forKeys:ks)
    end tell
end dictFromZip


-- filteredLines :: String -> NSString -> [a]
on filteredLines(predicateString, s)
    -- A list of lines filtered by an NSPredicate string
    tell current application
        set predicate to its (NSPredicate's ¬
            predicateWithFormat:predicateString)
        set array to its (NSArray's ¬
            arrayWithArray:(s's componentsSeparatedByString:(linefeed)))
    end tell
    
    (array's filteredArrayUsingPredicate:(predicate))
end filteredLines


-- map :: (a -> b) -> [a] -> [b]
on map(f, xs)
    -- The list obtained by applying f
    -- to each element of xs.
    tell mReturn(f)
        set lng to length of xs
        set lst to {}
        repeat with i from 1 to lng
            set end of lst to |λ|(item i of xs, i, xs)
        end repeat
        return lst
    end tell
end map


-- mReturn :: First-class m => (a -> b) -> m (a -> b)
on mReturn(f)
    -- 2nd class handler function lifted into 1st class script wrapper. 
    if script is class of f then
        f
    else
        script
            property |λ| : f
        end script
    end if
end mReturn


-- readFile :: FilePath -> IO NSString
on readFile(strPath)
    set ca to current application
    set e to reference
    set {s, e} to (ca's NSString's ¬
        stringWithContentsOfFile:((ca's NSString's ¬
            stringWithString:strPath)'s ¬
            stringByStandardizingPath) ¬
            encoding:(ca's NSUTF8StringEncoding) |error|:(e))
    if missing value is e then
        s
    else
        (localizedDescription of e) as string
    end if
end readFile


-- unlines :: [String] -> String
on unlines(xs)
    -- A single string formed by the intercalation
    -- of a list of strings with the newline character.
    set {dlm, my text item delimiters} to ¬
        {my text item delimiters, linefeed}
    set s to xs as text
    set my text item delimiters to dlm
    s
end unlines
Output:
analysis <- analyses
atlantis <- atlantes
billow <- bellow
briton <- breton
clinch <- clench
convict <- convect
crisis <- crises
diagnosis <- diagnoses
francis <- frances
galatia <- galatea
hardin <- harden
hickman <- heckman
infant <- enfant
inflict <- inflect
iniquity <- inequity
inquiry <- enquiry
jacobian <- jacobean
martin <- marten
moduli <- module
pigging <- pegging
psychosis <- psychoses
rabbit <- rabbet
stirling <- sterling
synopsis <- synopses
victor <- vector
willis <- welles

Arturo

words: map read.lines relative "unixdict.txt" => strip

loop words 'word [
    sw: new size word
    if 6 > size word ->
        continue

    iw: replace word "e" "i"
    if and? [contains? word "e"][contains? words iw] ->
        print [word "=>" iw]
]
Output:
analyses => analysis 
atlantes => atlantis 
bellow => billow 
breton => briton 
clench => clinch 
convect => convict 
crises => crisis 
diagnoses => diagnosis 
enfant => infant 
enquiry => inquiry 
frances => francis 
galatea => galatia 
harden => hardin 
heckman => hickman 
inequity => iniquity 
inflect => inflict 
jacobean => jacobian 
marten => martin 
module => moduli 
pegging => pigging 
psychoses => psychosis 
rabbet => rabbit 
sterling => stirling 
synopses => synopsis 
vector => victor 
welles => willis

AutoHotkey

FileRead, db, % A_Desktop "\unixdict.txt"
oWord := []
for i, word in StrSplit(db, "`n", "`r")
    if (StrLen(word) > 5)
        oWord[word] := true

for word in oWord
    if InStr(word, "e") && oWord[StrReplace(word, "e", "i")]
        result .= word . (StrLen(word) > 8 ? "`t" : "`t`t") . ": " StrReplace(word, "e", "i") "`n"

MsgBox, 262144, , % result
Output:
analyses	: analysis
atlantes	: atlantis
bellow		: billow
breton		: briton
clench		: clinch
convect		: convict
crises		: crisis
diagnoses	: diagnosis
enfant		: infant
enquiry		: inquiry
frances		: francis
galatea		: galatia
harden		: hardin
heckman		: hickman
inequity	: iniquity
inflect		: inflict
jacobean	: jacobian
marten		: martin
module		: moduli
pegging		: pigging
psychoses	: psychosis
rabbet		: rabbit
sterling	: stirling
synopses	: synopsis
vector		: victor
welles		: willis

AWK

# syntax: GAWK -f CHANGE_E_LETTERS_TO_I_IN_WORDS.AWK unixdict.txt
#
# sorting:
#   PROCINFO["sorted_in"] is used by GAWK
#   SORTTYPE is used by Thompson Automation's TAWK
#
{   if (length($0) < 6) {
      next
    }
    arr1[$0] = ""
}
END {
    PROCINFO["sorted_in"] = "@ind_str_asc" ; SORTTYPE = 1
    for (i in arr1) {
      word = i
      if (gsub(/e/,"i",word) > 0) {
        if (word in arr1) {
          arr2[i] = word
        }
      }
    }
    for (i in arr2) {
      printf("%-9s %s\n",i,arr2[i])
    }
    exit(0)
}
Output:
analyses  analysis
atlantes  atlantis
bellow    billow
breton    briton
clench    clinch
convect   convict
crises    crisis
diagnoses diagnosis
enfant    infant
enquiry   inquiry
frances   francis
galatea   galatia
harden    hardin
heckman   hickman
inequity  iniquity
inflect   inflict
jacobean  jacobian
marten    martin
module    moduli
pegging   pigging
psychoses psychosis
rabbet    rabbit
sterling  stirling
synopses  synopsis
vector    victor
welles    willis

C

#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#define MAX_WORD_SIZE 128
#define MIN_WORD_LENGTH 6

void fatal(const char* message) {
    fprintf(stderr, "%s\n", message);
    exit(1);
}

void* xmalloc(size_t n) {
    void* ptr = malloc(n);
    if (ptr == NULL)
        fatal("Out of memory");
    return ptr;
}

void* xrealloc(void* p, size_t n) {
    void* ptr = realloc(p, n);
    if (ptr == NULL)
        fatal("Out of memory");
    return ptr;
}

int string_compare(const void* p1, const void* p2) {
    const char* const* s1 = p1;
    const char* const* s2 = p2;
    return strcmp(*s1, *s2);
}

char* string_copy(const char* str) {
    size_t len = strlen(str);
    char* str2 = xmalloc(len + 1);
    memcpy(str2, str, len + 1);
    return str2;
}

char** load_dictionary(const char* filename, size_t* psize) {
    FILE* in = fopen(filename, "r");
    if (!in) {
        perror(filename);
        return NULL;
    }
    size_t size = 0, capacity = 1024;
    char** dictionary = xmalloc(sizeof(char*) * capacity);
    char line[MAX_WORD_SIZE];
    while (fgets(line, sizeof(line), in)) {
        size_t len = strlen(line);
        if (len > MIN_WORD_LENGTH) {
            line[len - 1] = '\0'; // discard newline
            char* str = string_copy(line);
            if (size == capacity) {
                capacity <<= 1;
                dictionary = xrealloc(dictionary, sizeof(char*) * capacity);
            }
            dictionary[size++] = str;
        }
    }
    fclose(in);
    qsort(dictionary, size, sizeof(char*), string_compare);
    *psize = size;
    return dictionary;
}

void free_dictionary(char** dictionary, size_t size) {
    for (size_t i = 0; i < size; ++i)
        free(dictionary[i]);
    free(dictionary);
}

bool find_word(char** dictionary, size_t size, const char* word) {
    return bsearch(&word, dictionary, size, sizeof(char*), string_compare) !=
           NULL;
}

int main(int argc, char** argv) {
    const char* filename = argc < 2 ? "unixdict.txt" : argv[1];
    size_t size = 0;
    char** dictionary = load_dictionary(filename, &size);
    if (dictionary == NULL)
        return EXIT_FAILURE;
    int count = 0;
    for (size_t i = 0; i < size; ++i) {
        const char* word1 = dictionary[i];
        if (strchr(word1, 'e') != NULL) {
            char* word2 = string_copy(word1);
            for (char* p = word2; *p; ++p) {
                if (*p == 'e')
                    *p = 'i';
            }
            if (find_word(dictionary, size, word2))
                printf("%2d. %-10s -> %s\n", ++count, word1, word2);
            free(word2);
        }
    }
    free_dictionary(dictionary, size);
    return EXIT_SUCCESS;
}
Output:
 1. analyses   -> analysis
 2. atlantes   -> atlantis
 3. bellow     -> billow
 4. breton     -> briton
 5. clench     -> clinch
 6. convect    -> convict
 7. crises     -> crisis
 8. diagnoses  -> diagnosis
 9. enfant     -> infant
10. enquiry    -> inquiry
11. frances    -> francis
12. galatea    -> galatia
13. harden     -> hardin
14. heckman    -> hickman
15. inequity   -> iniquity
16. inflect    -> inflict
17. jacobean   -> jacobian
18. marten     -> martin
19. module     -> moduli
20. pegging    -> pigging
21. psychoses  -> psychosis
22. rabbet     -> rabbit
23. sterling   -> stirling
24. synopses   -> synopsis
25. vector     -> victor
26. welles     -> willis

C++

#include <algorithm>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <set>
#include <string>

int main(int argc, char** argv) {
    const char* filename(argc < 2 ? "unixdict.txt" : argv[1]);
    std::ifstream in(filename);
    if (!in) {
        std::cerr << "Cannot open file '" << filename << "'.\n";
        return EXIT_FAILURE;
    }
    const int min_length = 6;
    std::string word;
    std::set<std::string> dictionary;
    while (getline(in, word)) {
        if (word.size() >= min_length)
            dictionary.insert(word);
    }
    int count = 0;
    for (const std::string& word1 : dictionary) {
        if (word1.find('e') == std::string::npos)
            continue;
        std::string word2(word1);
        std::replace(word2.begin(), word2.end(), 'e', 'i');
        if (dictionary.find(word2) != dictionary.end()) {
            std::cout << std::right << std::setw(2) << ++count
                << ". " << std::left << std::setw(10) << word1
                << " -> " << word2 << '\n';
        }
    }

    return EXIT_SUCCESS;
}
Output:
 1. analyses   -> analysis
 2. atlantes   -> atlantis
 3. bellow     -> billow
 4. breton     -> briton
 5. clench     -> clinch
 6. convect    -> convict
 7. crises     -> crisis
 8. diagnoses  -> diagnosis
 9. enfant     -> infant
10. enquiry    -> inquiry
11. frances    -> francis
12. galatea    -> galatia
13. harden     -> hardin
14. heckman    -> hickman
15. inequity   -> iniquity
16. inflect    -> inflict
17. jacobean   -> jacobian
18. marten     -> martin
19. module     -> moduli
20. pegging    -> pigging
21. psychoses  -> psychosis
22. rabbet     -> rabbit
23. sterling   -> stirling
24. synopses   -> synopsis
25. vector     -> victor
26. welles     -> willis

Delphi

program Change_e_letters_to_i_in_words;

{$APPTYPE CONSOLE}

uses
  System.SysUtils,
  System.Classes;

var
  Result: TStringList;

begin
  with TStringList.Create do
  begin
    LoadFromFile('unixdict.txt');
    for var i := Count - 1 downto 0 do
      if (Strings[i].Length < 6) then
        Delete(i);

    Result := TStringList.Create;

    for var i := Count - 1 downto 0 do
    begin
      var w_e := Strings[i];

      if w_e.IndexOf('e') = -1 then
        continue;

      var w_i := w_e.Replace('e', 'i', [rfReplaceAll]);
      if IndexOf(w_i) > -1 then
        Result.Add(format('%s ──► %s', [w_e.PadRight(12), w_i]));
    end;

    Result.Sort;
    writeln(Result.Text);
    Free;
  end;

  readln;
end.
Output:
analyses     ──► analysis
atlantes     ──► atlantis
bellow       ──► billow
breton       ──► briton
clench       ──► clinch
convect      ──► convict
crises       ──► crisis
diagnoses    ──► diagnosis
enfant       ──► infant
enquiry      ──► inquiry
frances      ──► francis
galatea      ──► galatia
harden       ──► hardin
heckman      ──► hickman
inequity     ──► iniquity
inflect      ──► inflict
jacobean     ──► jacobian
marten       ──► martin
module       ──► moduli
pegging      ──► pigging
psychoses    ──► psychosis
rabbet       ──► rabbit
sterling     ──► stirling
synopses     ──► synopsis
vector       ──► victor
welles       ──► willis

F#

// Change 'e' to 'i' in words. Nigel Galloway: February 18th., 2021
let g=[|use n=System.IO.File.OpenText("unixdict.txt") in while not n.EndOfStream do yield n.ReadLine()|]|>Array.filter(fun n->n.Length>5)
let fN g=(g,(Seq.map(fun n->if n='e' then 'i' else n)>>Array.ofSeq>>System.String)g)
g|>Array.filter(Seq.contains 'e')|>Array.map fN|>Array.filter(fun(_,n)-> Array.contains n g)|>Array.iter(fun(n,g)->printfn "%s ->  %s" n g)
Output:
analyses ->  analysis
atlantes ->  atlantis
bellow ->  billow
breton ->  briton
clench ->  clinch
convect ->  convict
crises ->  crisis
diagnoses ->  diagnosis
enfant ->  infant
enquiry ->  inquiry
frances ->  francis
galatea ->  galatia
harden ->  hardin
heckman ->  hickman
inequity ->  iniquity
inflect ->  inflict
jacobean ->  jacobian
marten ->  martin
module ->  moduli
pegging ->  pigging
psychoses ->  psychosis
rabbet ->  rabbit
sterling ->  stirling
synopses ->  synopsis
vector ->  victor
welles ->  willis

Factor

USING: assocs binary-search formatting io.encodings.ascii
io.files kernel literals math sequences splitting ;

CONSTANT: words $[ "unixdict.txt" ascii file-lines ]

words
[ length 5 > ] filter
[ CHAR: e swap member? ] filter
[ dup "e" "i" replace ] map>alist
[ nip words sorted-member? ] assoc-filter   ! binary search
[ "%-9s -> %s\n" printf ] assoc-each
Output:
analyses  -> analysis
atlantes  -> atlantis
bellow    -> billow
breton    -> briton
clench    -> clinch
convect   -> convict
crises    -> crisis
diagnoses -> diagnosis
enfant    -> infant
enquiry   -> inquiry
frances   -> francis
galatea   -> galatia
harden    -> hardin
heckman   -> hickman
inequity  -> iniquity
inflect   -> inflict
jacobean  -> jacobian
marten    -> martin
module    -> moduli
pegging   -> pigging
psychoses -> psychosis
rabbet    -> rabbit
sterling  -> stirling
synopses  -> synopsis
vector    -> victor
welles    -> willis

FreeBASIC

#define NULL 0

type node
    word as string*32   'enough space to store any word in the dictionary
    nxt as node ptr
end type

function addword( tail as node ptr, word as string ) as node ptr
    'allocates memory for a new node, links the previous tail to it,
    'and returns the address of the new node
    dim as node ptr newnode = allocate(sizeof(node))
    tail->nxt = newnode
    newnode->nxt = NULL
    newnode->word = word
    return newnode
end function

function length( word as string ) as uinteger
    'necessary replacement for the built-in len function, which in this
    'case would always return 32
    for i as uinteger = 1 to 32
        if asc(mid(word,i,1)) = 0 then return i-1
    next i
    return 999
end function

dim as string word
dim as node ptr tail = allocate( sizeof(node) )
dim as node ptr head = tail, curr = head, currj
tail->nxt = NULL
tail->word = "XXXXHEADER"

open "unixdict.txt" for input as #1
while true
    line input #1, word
    if word = "" then exit while
    if length(word)>5 then tail = addword( tail, word )
wend
close #1

dim as string tempword
dim as boolean changed

while curr->nxt <> NULL
    changed = false
    tempword = curr->word
    for i as uinteger = 1 to length(tempword)
        if mid(tempword,i,1) = "e" then
            tempword = left(tempword,i-1) + "i" + mid(tempword, i+1, length(tempword)-i)
            changed = true
        end if
    next i
    if changed = true then
        currj = head
        while currj->nxt <> NULL
            if currj->word = tempword then print curr->word, tempword
            currj=currj->nxt
        wend
    end if
    curr = curr->nxt
wend
Output:

analyses analysis atlantes atlantis bellow billow breton briton clench clinch convect convict crises crisis diagnoses diagnosis enfant infant enquiry inquiry frances francis galatea galatia harden hardin heckman hickman inequity iniquity inflect inflict jacobean jacobian marten martin module moduli pegging pigging psychoses psychosis rabbet rabbit sterling stirling synopses synopsis vector victor

welles willis

FutureBasic

include "NSLog.incl"

#plist NSAppTransportSecurity @{NSAllowsArbitraryLoads:YES}

void local fn DoIt
  CFURLRef          url
  CFStringRef       string, wd
  ErrorRef          err = NULL
  CFArrayRef        array
  CFMutableArrayRef mutArray
  
  url = fn URLWithString( @"https://web.archive.org/web/20180611003215/http://www.puzzlers.org/pub/wordlists/unixdict.txt" )
  string = fn StringWithContentsOfURL( url, NSUTF8StringEncoding, @err )
  if ( string )
    array = fn StringComponentsSeparatedByCharactersInSet( string, fn CharacterSetNewlineSet )
    mutArray = fn MutableArrayWithCapacity(0)
    for wd in array
      if ( len(wd) > 5 and fn StringContainsString( wd, @"e" ) )
        wd = fn StringByReplacingOccurrencesOfString( wd, @"e", @"i" )
        if ( fn ArrayContainsObject( array, wd ) )
          MutableArrayAddObject( mutArray, wd )
        end if
      end if
    next
    string = fn ArrayComponentsJoinedByString( mutArray, @"\n" )
    
    NSLog(@"%@",string)
    
  else
    NSLog(@"%@",err)
  end if
end fn

fn DoIt

HandleEvents
Output:
analysis
atlantis
billow
briton
clinch
convict
crisis
diagnosis
infant
inquiry
francis
galatia
hardin
hickman
iniquity
inflict
jacobian
martin
moduli
pigging
psychosis
rabbit
stirling
synopsis
victor
willis

Go

package main

import (
    "bytes"
    "fmt"
    "io/ioutil"
    "log"
    "sort"
    "strings"
    "unicode/utf8"
)

func main() {
    wordList := "unixdict.txt"
    b, err := ioutil.ReadFile(wordList)
    if err != nil {
        log.Fatal("Error reading file")
    }
    bwords := bytes.Fields(b)
    var words []string
    for _, bword := range bwords {
        s := string(bword)
        if utf8.RuneCountInString(s) > 5 {
            words = append(words, s)
        }
    }
    count := 0
    le := len(words)
    for _, word := range words {
        if strings.ContainsRune(word, 'e') {
            repl := strings.ReplaceAll(word, "e", "i")
            ix := sort.SearchStrings(words, repl) // binary search
            if ix < le && words[ix] == repl {
                count++
                fmt.Printf("%2d: %-9s -> %s\n", count, word, repl)
            }
        }
    }
}
Output:
 1: analyses  -> analysis
 2: atlantes  -> atlantis
 3: bellow    -> billow
 4: breton    -> briton
 5: clench    -> clinch
 6: convect   -> convict
 7: crises    -> crisis
 8: diagnoses -> diagnosis
 9: enfant    -> infant
10: enquiry   -> inquiry
11: frances   -> francis
12: galatea   -> galatia
13: harden    -> hardin
14: heckman   -> hickman
15: inequity  -> iniquity
16: inflect   -> inflict
17: jacobean  -> jacobian
18: marten    -> martin
19: module    -> moduli
20: pegging   -> pigging
21: psychoses -> psychosis
22: rabbet    -> rabbit
23: sterling  -> stirling
24: synopses  -> synopsis
25: vector    -> victor
26: welles    -> willis

Haskell

import qualified Data.Set as S

------ DICTIONARY WORDS TWINNED BY e -> i REPLACEMENT ----

ieTwins :: String -> [(String, String)]
ieTwins s =
  [ (w, twin)
    | w <- filter ('e' `elem`) longWords,
      let twin = iForE w,
      S.member twin lexicon
  ]
  where
    longWords = filter ((5 <) . length) (lines s)
    lexicon = S.fromList $ filter ('i' `elem`) longWords

iForE :: String -> String
iForE [] = []
iForE ('e' : cs) = 'i' : iForE cs
iForE (c : cs) = c : iForE cs

--------------------------- TEST -------------------------
main :: IO ()
main =
  readFile "unixdict.txt"
    >>= (mapM_ print . ieTwins)
Output:
("analyses","analysis")
("atlantes","atlantis")
("bellow","billow")
("breton","briton")
("clench","clinch")
("convect","convict")
("crises","crisis")
("diagnoses","diagnosis")
("enfant","infant")
("enquiry","inquiry")
("frances","francis")
("galatea","galatia")
("harden","hardin")
("heckman","hickman")
("inequity","iniquity")
("inflect","inflict")
("jacobean","jacobian")
("marten","martin")
("module","moduli")
("pegging","pigging")
("psychoses","psychosis")
("rabbet","rabbit")
("sterling","stirling")
("synopses","synopsis")
("vector","victor")
("welles","willis")

J

   >(([-.-.)rplc&'ei'&.>@(#~ ('e'&e. * 5<#)@>)) cutLF fread 'unixdict.txt'
analysis 
atlantis 
billow   
briton   
clinch   
convict  
crisis   
diagnosis
francis  
galatia  
hardin   
hickman  
infant   
inflict  
iniquity 
inquiry  
jacobian 
martin   
moduli   
pigging  
psychosis
rabbit   
stirling 
synopsis 
victor   
willis

JavaScript

Works with: macOS

ECMAScript defines no file access interface.

Here we use the ObjC interface of macOS JavaScript for Automation to define a readFile function.

(() => {
    "use strict";
 
    // ieTwins :: String -> [(String, String)]
    const ieTwins = s => {
        const
            longWords = s.split("\n")
            .filter(x => 5 < x.length),
            lexicon = new Set(
                longWords.filter(w => w.includes("i"))
            ),
            rgx = /e/gu;
 
        return longWords.flatMap(
            w => w.includes("e") ? (() => {
                const x = w.replace(rgx, "i");
 
                return lexicon.has(x) ? [
                    [w, x]
                ] : [];
            })() : []
        );
    };
 
    // ---------------------- TEST -----------------------
    // main :: IO ()
    const main = () => {
        const s = readFile("unixdict.txt");
 
        return ieTwins(s)
            .map(JSON.stringify)
            .join("\n");
    };
 
    // --------------------- GENERIC ---------------------
 
    // readFile :: FilePath -> IO String
    const readFile = fp => {
        // The contents of a text file at the
        // filepath fp.
        const
            e = $(),
            ns = $.NSString
            .stringWithContentsOfFileEncodingError(
                $(fp).stringByStandardizingPath,
                $.NSUTF8StringEncoding,
                e
            );
 
        return ObjC.unwrap(
            ns.isNil() ? (
                e.localizedDescription
            ) : ns
        );
    };
 
    // MAIN ---
    return main();
})();
Output:
["analyses","analysis"]
["atlantes","atlantis"]
["bellow","billow"]
["breton","briton"]
["clench","clinch"]
["convect","convict"]
["crises","crisis"]
["diagnoses","diagnosis"]
["enfant","infant"]
["enquiry","inquiry"]
["frances","francis"]
["galatea","galatia"]
["harden","hardin"]
["heckman","hickman"]
["inequity","iniquity"]
["inflect","inflict"]
["jacobean","jacobian"]
["marten","martin"]
["module","moduli"]
["pegging","pigging"]
["psychoses","psychosis"]
["rabbet","rabbit"]
["sterling","stirling"]
["synopses","synopsis"]
["vector","victor"]
["welles","willis"]

jq

Works with: jq

Works with gojq, the Go implementation of jq

For the sake of brevity, we confine attention to words longer than 6 characters.

def e2i($dict):
  select(index("e"))
  | gsub("e";"i")
  | select($dict[.]);
      
INDEX( inputs; . )
| . as $dict
| keys[] # keys_unsorted[] if using jq would be faster
| select(length>6)
| . as $w
| e2i($dict) 
| "\($w) → \(.)"

Invocation: jq -n -rR -f e2i.jq unixdict.txt

Output:
analyses → analysis
atlantes → atlantis
convect → convict
diagnoses → diagnosis
enquiry → inquiry
frances → francis
galatea → galatia
heckman → hickman
inequity → iniquity
inflect → inflict
jacobean → jacobian
pegging → pigging
psychoses → psychosis
sterling → stirling
synopses → synopsis

Julia

See Alternade_words for the foreachword function.

e2i(w, d) = (if 'e' in w   s = replace(w, "e" => "i"); haskey(d, s) && return "$w => $s" end; "")
foreachword("unixdict.txt", e2i, minlen=6, colwidth=23, numcols=4)
Output:
Word source: unixdict.txt

analyses => analysis   atlantes => atlantis   bellow => billow       breton => briton
clench => clinch       convect => convict     crises => crisis       diagnoses => diagnosis
enfant => infant       enquiry => inquiry     frances => francis     galatea => galatia
harden => hardin       heckman => hickman     inequity => iniquity   inflect => inflict
jacobean => jacobian   marten => martin       module => moduli       pegging => pigging     
psychoses => psychosis rabbet => rabbit       sterling => stirling   synopses => synopsis
vector => victor       welles => willis

Mathematica/Wolfram Language

dict = Once[Import["https://web.archive.org/web/20180611003215/http://www.puzzlers.org/pub/wordlists/unixdict.txt"]];
dict //= StringSplit[#, "\n"] &;
dict //= Select[StringLength /* GreaterThan[5]];
sel = Select[dict, StringContainsQ["e"]];
sel = Select[sel, MemberQ[dict, StringReplace[#, "e" -> "i"]] &];
{#, StringReplace[#, "e" -> "i"]} & /@ sel
Output:
{{analyses,analysis},{atlantes,atlantis},{bellow,billow},{breton,briton},{clench,clinch},{convect,convict},{crises,crisis},{diagnoses,diagnosis},{enfant,infant},{enquiry,inquiry},{frances,francis},{galatea,galatia},{harden,hardin},{heckman,hickman},{inequity,iniquity},{inflect,inflict},{jacobean,jacobian},{marten,martin},{module,moduli},{pegging,pigging},{psychoses,psychosis},{rabbet,rabbit},{sterling,stirling},{synopses,synopsis},{vector,victor},{welles,willis}}

Nim

import sets, strutils, sugar

# Build a set of words to speed up membership check.
let wordSet = collect(initHashSet, for word in "unixdict.txt".lines: {word})

for word in "unixdict.txt".lines:
  let newWord = word.replace('e', 'i')
  if newWord.len > 5 and newWord != word and newWord in wordSet:
    echo word, " → ", newWord
Output:
analyses → analysis
atlantes → atlantis
bellow → billow
breton → briton
clench → clinch
convect → convict
crises → crisis
diagnoses → diagnosis
enfant → infant
enquiry → inquiry
frances → francis
galatea → galatia
harden → hardin
heckman → hickman
inequity → iniquity
inflect → inflict
jacobean → jacobian
marten → martin
module → moduli
pegging → pigging
psychoses → psychosis
rabbet → rabbit
sterling → stirling
synopses → synopsis
vector → victor
welles → willis

Perl

#!/usr/bin/perl

use strict; # https://rosettacode.org/wiki/Change_e_letters_to_i_in_words
use warnings;
no warnings 'uninitialized';

my $file = do { local (@ARGV, $/) = 'unixdict.txt'; <> };
my %i = map { tr/i/e/r => sprintf "%30s  %s\n", tr/i/e/r, $_ }
  grep !/e/, grep 5 <= length, $file =~ /^.*i.*$/gm;
print @i{ split ' ', $file };
Output:
                      analyses  analysis
                      atlantes  atlantis
                         basel  basil
                        bellow  billow
                         belly  billy
                         berth  birth
                         blend  blind
                         bless  bliss
                        breton  briton
                         check  chick
                        clench  clinch
                       convect  convict
                         cress  criss
                        enfant  infant
                         faery  fairy
                         fetch  fitch
                         fleck  flick
                       frances  francis
                       galatea  galatia
                        harden  hardin
                       heckman  hickman
                      jacobean  jacobian
                        marten  martin
                         messy  missy
                        module  moduli
                         oases  oasis
                         peggy  piggy
                     psychoses  psychosis
                         quell  quill
                        rabbet  rabbit
                         ruben  rubin
                         share  shari
                         shell  shill
                         spell  spill
                         style  styli
                      synopses  synopsis
                         taper  tapir
                         tread  triad
                        vector  victor
                         vella  villa
                        welles  willis
                         wells  wills
                         wendy  windy
                         wrest  wrist

Phix

with javascript_semantics
sequence words = unix_dict()
function chei(string word) return substitute(word,"e","i") end function
function cheti(string word) return length(word)>5 and find('e',word) and find(chei(word),words) end function
sequence chetie = filter(words,cheti), chetei = columnize({chetie,apply(chetie,chei)})
printf(1,"%d words: %v\n",{length(chetei),shorten(chetei,"",2)})
Output:
26 words: {{"analyses","analysis"},{"atlantes","atlantis"},"...",{"vector","victor"},{"welles","willis"}}

Prolog

Works with: SWI Prolog
:- dynamic dictionary_word/1.

main:-
    load_dictionary_from_file("unixdict.txt", 6),
    forall((dictionary_word(Word1),
            string_chars(Word1, Chars1),
            memberchk('e', Chars1),
            replace('e', 'i', Chars1, Chars2),
            string_chars(Word2, Chars2),
            dictionary_word(Word2)),
            writef('%10l -> %w\n', [Word1, Word2])).

load_dictionary_from_file(File, Min_length):-
    open(File, read, Stream),
    retractall(dictionary_word(_)),
    load_dictionary_from_stream(Stream, Min_length),
    close(Stream).

load_dictionary_from_stream(Stream, Min_length):-
    read_line_to_string(Stream, String),
    String \= end_of_file,
    !,
    string_length(String, Length),
    (Length >= Min_length -> assertz(dictionary_word(String)) ; true),
    load_dictionary_from_stream(Stream, Min_length).
load_dictionary_from_stream(_, _).

replace(_, _, [], []):-!.
replace(Ch1, Ch2, [Ch1|Chars1], [Ch2|Chars2]):-
    !,
    replace(Ch1, Ch2, Chars1, Chars2).
replace(Ch1, Ch2, [Ch|Chars1], [Ch|Chars2]):-
    replace(Ch1, Ch2, Chars1, Chars2).
Output:
analyses   -> analysis
atlantes   -> atlantis
bellow     -> billow
breton     -> briton
clench     -> clinch
convect    -> convict
crises     -> crisis
diagnoses  -> diagnosis
enfant     -> infant
enquiry    -> inquiry
frances    -> francis
galatea    -> galatia
harden     -> hardin
heckman    -> hickman
inequity   -> iniquity
inflect    -> inflict
jacobean   -> jacobian
marten     -> martin
module     -> moduli
pegging    -> pigging
psychoses  -> psychosis
rabbet     -> rabbit
sterling   -> stirling
synopses   -> synopsis
vector     -> victor
welles     -> willis

Python

Needs Python 3.8 or above for the assignment operator in the list comprehension.

Works with: Python version 3.8
'''Dictionary words twinned by (e -> i) replacement'''


# ieTwins :: String -> [(String, String)]
def ieTwins(s):
    '''Words in the lines of s which are twinned
       with other words in s by replacement of
       'e' by 'i'.
    '''
    longWords = [
        w for w in s.splitlines()
        if 5 < len(w)
    ]
    lexicon = {
        w for w in longWords
        if 'i' in w
    }

    return [
        (w, twin) for w in longWords
        if 'e' in w and (
            twin := w.replace('e', 'i')
        ) in lexicon
    ]


# ------------------------- TEST -------------------------
# main :: IO ()
def main():
    '''Words twinned by ('e' -> 'i') replacement
       in unixdict.txt
    '''
    for pair in ieTwins(
        readFile("unixdict.txt")
    ):
        print(pair)


# ----------------------- GENERIC ------------------------

# readFile :: FilePath -> IO String
def readFile(fp):
    '''The contents of any file at the path fp.
    '''
    with open(fp, 'r', encoding='utf-8') as f:
        return f.read()


# MAIN ---
if __name__ == '__main__':
    main()
Output:
('analyses', 'analysis')
('atlantes', 'atlantis')
('bellow', 'billow')
('breton', 'briton')
('clench', 'clinch')
('convect', 'convict')
('crises', 'crisis')
('diagnoses', 'diagnosis')
('enfant', 'infant')
('enquiry', 'inquiry')
('frances', 'francis')
('galatea', 'galatia')
('harden', 'hardin')
('heckman', 'hickman')
('inequity', 'iniquity')
('inflect', 'inflict')
('jacobean', 'jacobian')
('marten', 'martin')
('module', 'moduli')
('pegging', 'pigging')
('psychoses', 'psychosis')
('rabbet', 'rabbit')
('sterling', 'stirling')
('synopses', 'synopsis')
('vector', 'victor')
('welles', 'willis')

Quackery

  [ [] swap ]'[ swap
    witheach [ 
      dup nested 
      unrot over do
      iff [ dip join ]
      else nip
    ] drop ]                   is filter ( [ --> [ )

  [ [] swap
    witheach 
      [ [] swap 
        witheach 
          [ dup char e = if 
              [ drop char i ]
            join ] 
    nested join ] ]           is e->i   ( [ --> [ )

  $ "rosetta/unixdict.txt" sharefile drop nest$
  filter [ size 5 > ]
  dup
  filter [ char e over find swap found ]
  e->i
  witheach
    [ tuck over find
      over found iff  
        [ swap echo$ cr ]
      else nip ]
  drop
Output:
analysis
atlantis
billow
briton
clinch
convict
crisis
diagnosis
infant
inquiry
francis
galatia
hardin
hickman
iniquity
inflict
jacobian
martin
moduli
pigging
psychosis
rabbit
stirling
synopsis
victor
willis

R

dict <- scan("https://web.archive.org/web/20180611003215/http://www.puzzlers.org/pub/wordlists/unixdict.txt", what = character())
changed <- chartr("e", "i", dict)
cbind(Before = dict, After = changed)[changed != dict & changed %in% dict & nchar(changed) > 5, ]
Output:
      Before      After         
 [1,] "analyses"  "analysis" 
 [2,] "atlantes"  "atlantis" 
 [3,] "bellow"    "billow"   
 [4,] "breton"    "briton"   
 [5,] "clench"    "clinch"   
 [6,] "convect"   "convict"  
 [7,] "crises"    "crisis"   
 [8,] "diagnoses" "diagnosis"
 [9,] "enfant"    "infant"   
[10,] "enquiry"   "inquiry"  
[11,] "frances"   "francis"  
[12,] "galatea"   "galatia"  
[13,] "harden"    "hardin"   
[14,] "heckman"   "hickman"  
[15,] "inequity"  "iniquity" 
[16,] "inflect"   "inflict"  
[17,] "jacobean"  "jacobian" 
[18,] "marten"    "martin"   
[19,] "module"    "moduli"   
[20,] "pegging"   "pigging"  
[21,] "psychoses" "psychosis"
[22,] "rabbet"    "rabbit"   
[23,] "sterling"  "stirling" 
[24,] "synopses"  "synopsis" 
[25,] "vector"    "victor"   
[26,] "welles"    "willis"

Raku

my %ei = 'unixdict.txt'.IO.words.grep({ .chars > 5 and /<[ie]>/ }).map: { $_ => .subst('e', 'i', :g) };
put %ei.grep( *.key.contains: 'e' ).grep({ %ei{.value}:exists }).sort.batch(4)».gist».fmt('%-22s').join: "\n";
Output:
analyses => analysis   atlantes => atlantis   bellow => billow       breton => briton      
clench => clinch       convect => convict     crises => crisis       diagnoses => diagnosis
enfant => infant       enquiry => inquiry     frances => francis     galatea => galatia    
harden => hardin       heckman => hickman     inequity => iniquity   inflect => inflict    
jacobean => jacobian   marten => martin       module => moduli       pegging => pigging    
psychoses => psychosis rabbet => rabbit       sterling => stirling   synopses => synopsis  
vector => victor       welles => willis

REXX

This REXX version doesn't care what order the words in the dictionary are in,   nor does it care what
case  (lower/upper/mixed)  the words are in,   the search for words is   caseless.

It also allows the minimum length to be specified on the command line (CL),   as well as the old character   (that is
to be changed),   the new character   (that is to be changed into),   and as well as the dictionary file identifier.

/*REXX pgm finds words with changed letter  E──►I  and is a word  (in a specified dict).*/
parse arg minL oldC newC iFID .                  /*obtain optional arguments from the CL*/
if minL=='' | minL=="," then minL=  6            /*Not specified?  Then use the default.*/
if oldC=='' | oldC=="," then oldC= 'e'           /* "      "         "   "   "     "    */
if newC=='' | newC=="," then newC= 'i'           /* "      "         "   "   "     "    */
if iFID=='' | iFID=="," then iFID='unixdict.txt' /* "      "         "   "   "     "    */
upper oldC newC                                  /*get uppercase versions of OLDC & NEWC*/
@.=                                              /*default value of any dictionary word.*/
           do #=1  while lines(iFID)\==0         /*read each word in the file  (word=X).*/
           x= strip( linein( iFID) )             /*pick off a word from the input line. */
           $.#= x;       upper x;     @.x= $.#   /*save: original case and the old word.*/
           end   /*#*/                           /*Note: the old word case is left as─is*/
#= # - 1                                         /*adjust word count because of DO loop.*/
finds= 0                                         /*count of changed words found (so far)*/
say copies('─', 30)      #      "words in the dictionary file: "       iFID
say
       do j=1  for #;           L= length($.j)   /*process all the words that were found*/
       if L<minL  then iterate                   /*Is word too short?   Then ignore it. */
       y = $.j;                 upper y          /*uppercase the dictionary word.       */
       if pos(oldC, y)==0  then iterate          /*Have the required character? No, skip*/
       new= translate(y, newC, oldC)             /*obtain a changed (translated) word.  */
       if @.new==''  then iterate                /*New word in the dict.?   No, skip it.*/
       finds= finds + 1                          /*bump the count of found changed words*/
       say right(left($.j, 20), 40) '──►' @.new  /*indent a bit, display the old & new. */
       end        /*j*/
say                                              /*stick a fork in it,  we're all done. */
say copies('─',30)    finds    " words found that were changed with "    oldC    '──►' ,
                      newC",  and with a minimum length of "     minL
output   when using the default inputs:
────────────────────────────── 25104 words in the dictionary file:  unixdict.txt

                    analyses             ──► analysis
                    atlantes             ──► atlantis
                    bellow               ──► billow
                    breton               ──► briton
                    clench               ──► clinch
                    convect              ──► convict
                    crises               ──► crisis
                    diagnoses            ──► diagnosis
                    enfant               ──► infant
                    enquiry              ──► inquiry
                    frances              ──► francis
                    galatea              ──► galatia
                    harden               ──► hardin
                    heckman              ──► hickman
                    inequity             ──► iniquity
                    inflect              ──► inflict
                    jacobean             ──► jacobian
                    marten               ──► martin
                    module               ──► moduli
                    pegging              ──► pigging
                    psychoses            ──► psychosis
                    rabbet               ──► rabbit
                    sterling             ──► stirling
                    synopses             ──► synopsis
                    vector               ──► victor
                    welles               ──► willis

────────────────────────────── 26  words found that were changed with  E ──► I,  and with a minimum length of  6

Ring

load "stdlib.ring"

cStr = read("unixdict.txt")
wordList = str2list(cStr)
num = 0

see "working..." + nl
see "Words are:" + nl

ln = len(wordList)
for n = ln to 1 step -1
    if len(wordList[n]) < 6
       del(wordList,n)
    ok
next

for n = 1 to len(wordList)
    ind = substr(wordList[n],"e") 
    if ind > 0
       str = substr(wordList[n],"e","i")
       indstr = find(wordList,str)
       if indstr > 0
          num = num + 1
          see "" + num + ". " + wordList[n] + " => " + str + nl
       ok
    ok 
next

see "done..." + nl
Output:
working...
Words are:
1. analyses => analysis
2. atlantes => atlantis
3. bellow => billow
4. breton => briton
5. clench => clinch
6. convect => convict
7. crises => crisis
8. diagnoses => diagnosis
9. enfant => infant
10. enquiry => inquiry
11. frances => francis
12. galatea => galatia
13. harden => hardin
14. heckman => hickman
15. inequity => iniquity
16. inflect => inflict
17. jacobean => jacobian
18. marten => martin
19. module => moduli
20. pegging => pigging
21. psychoses => psychosis
22. rabbet => rabbit
23. sterling => stirling
24. synopses => synopsis
25. vector => victor
26. welles => willis
done...

RPL

The only way to use unixdict.txt as input is to convert it into a list of 25104 strings. Fortunately, emulators can handle such a big data structure in RAM.

Works with: Halcyon Calc version 4.2.7

≪ → words

 ≪ { } 
    1 words EVAL SIZE FOR j
       words j GET 
       IF DUP SIZE 5 ≤ OVER "e" POS NOT OR THEN DROP ELSE
         ""
         1 3 PICK SIZE FOR j 
           OVER j DUP SUB
           NUM R→B #20h OR B→R CHR          @turn into lowercase
           DUP "e" == "i" ROT IFTE +
        NEXT
        IF words EVAL OVER POS THEN " → " SWAP + + + ELSE DROP2 END
     END NEXT
≫ ≫ ‘E→I’ STO
Output:
1: { "analyses → analysis" "atlantes → atlantis" "bellow → billow" "breton → briton" "clench → clinch" "convect → convict" "crises → crisis" "diagnoses → diagnosis" "enfant → infant" "enquiry → inquiry" "frances → francis" "galatea → galatia" "harden → hardin" "heckman → hickman" "inequity → iniquity" "inflect → inflict" "jacobean → jacobian" "marten → martin" "module → moduli" "pegging → pigging" "psychoses → psychosis" "rabbet → rabbit" "sterling → stirling" "synopses → synopsis" "vector → victor" "welles → willis" }

Ruby

words = File.readlines("unixdict.txt").map(&:chomp)
words.each do |word|
  next if word.size < 6 
  next unless word.match?(/e/)
  e2i = word.tr("e", "i")
  next unless words.bsearch{|w| e2i <=> w}
  puts "#{word.ljust(10)} -> #{e2i}"
end
Output:
analyses   -> analysis
atlantes   -> atlantis
bellow     -> billow
breton     -> briton
clench     -> clinch
convect    -> convict
crises     -> crisis
diagnoses  -> diagnosis
enfant     -> infant
enquiry    -> inquiry
frances    -> francis
galatea    -> galatia
harden     -> hardin
heckman    -> hickman
inequity   -> iniquity
inflect    -> inflict
jacobean   -> jacobian
marten     -> martin
module     -> moduli
pegging    -> pigging
psychoses  -> psychosis
rabbet     -> rabbit
sterling   -> stirling
synopses   -> synopsis
vector     -> victor
welles     -> willis

Rust

use std::collections::BTreeSet;
use std::fs::File;
use std::io::{self, BufRead};

fn load_dictionary(filename: &str, min_length: usize) -> std::io::Result<BTreeSet<String>> {
    let file = File::open(filename)?;
    let mut dict = BTreeSet::new();
    for line in io::BufReader::new(file).lines() {
        let word = line?;
        if word.len() >= min_length {
            dict.insert(word);
        }
    }
    Ok(dict)
}

fn main() {
    match load_dictionary("unixdict.txt", 6) {
        Ok(dictionary) => {
            let mut count = 0;
            for word in dictionary.iter().filter(|x| x.contains("e")) {
                let word2 = word.replace("e", "i");
                if dictionary.contains(&word2) {
                    count += 1;
                    println!("{:2}. {:<9} -> {}", count, word, word2);
                }
            }
        }
        Err(error) => eprintln!("{}", error),
    }
}
Output:
 1. analyses  -> analysis
 2. atlantes  -> atlantis
 3. bellow    -> billow
 4. breton    -> briton
 5. clench    -> clinch
 6. convect   -> convict
 7. crises    -> crisis
 8. diagnoses -> diagnosis
 9. enfant    -> infant
10. enquiry   -> inquiry
11. frances   -> francis
12. galatea   -> galatia
13. harden    -> hardin
14. heckman   -> hickman
15. inequity  -> iniquity
16. inflect   -> inflict
17. jacobean  -> jacobian
18. marten    -> martin
19. module    -> moduli
20. pegging   -> pigging
21. psychoses -> psychosis
22. rabbet    -> rabbit
23. sterling  -> stirling
24. synopses  -> synopsis
25. vector    -> victor
26. welles    -> willis

SETL

program change_e_letters_to_i_in_words;
    dictfile := open("unixdict.txt", "r");
    dict := {getline(dictfile) : until eof(dictfile)};
    close(dictfile);

    loop for word in dict | #word > 5 do
        if "e" notin word then continue; end if;
        iword := replaceall(word, "e", "i");
        if iword notin dict then continue; end if;
        print([word, iword]);
    end loop;

    proc replaceall(word, x, y);
        loop while x in word do
            word(x) := y;
        end loop;
        return word;
    end proc;
end program;
Output:
[analyses analysis]
[atlantes atlantis]
[bellow billow]
[breton briton]
[clench clinch]
[convect convict]
[crises crisis]
[diagnoses diagnosis]
[enfant infant]
[enquiry inquiry]
[frances francis]
[galatea galatia]
[harden hardin]
[heckman hickman]
[inequity iniquity]
[inflect inflict]
[jacobean jacobian]
[marten martin]
[module moduli]
[pegging pigging]
[psychoses psychosis]
[rabbet rabbit]
[sterling stirling]
[synopses synopsis]
[vector victor]
[welles willis]

Sidef

var file = File("unixdict.txt")

if (!file.exists) {
    require('LWP::Simple')
    say ":: Retrieving #{file} from internet..."
    %S<LWP::Simple>.mirror(
        'https://web.archive.org/web/20180611003215if_/' +
        'http://www.puzzlers.org:80/pub/wordlists/unixdict.txt',
    'unixdict.txt')
}

var words = file.read.words
var dict  = Hash().set_keys(words...)
var count = 0

words.each {|word|

    word.len > 5 || next
    word.contains('e') || next

    var changed = word.gsub('e', 'i')

    if (dict.contains(changed)) {
        printf("%2d: %20s <-> %s\n", ++count, word, changed)
    }
}
Output:
 1:             analyses <-> analysis
 2:             atlantes <-> atlantis
 3:               bellow <-> billow
 4:               breton <-> briton
 5:               clench <-> clinch
 6:              convect <-> convict
 7:               crises <-> crisis
 8:            diagnoses <-> diagnosis
 9:               enfant <-> infant
10:              enquiry <-> inquiry
11:              frances <-> francis
12:              galatea <-> galatia
13:               harden <-> hardin
14:              heckman <-> hickman
15:             inequity <-> iniquity
16:              inflect <-> inflict
17:             jacobean <-> jacobian
18:               marten <-> martin
19:               module <-> moduli
20:              pegging <-> pigging
21:            psychoses <-> psychosis
22:               rabbet <-> rabbit
23:             sterling <-> stirling
24:             synopses <-> synopsis
25:               vector <-> victor
26:               welles <-> willis

Swift

import Foundation

func loadDictionary(path: String, minLength: Int) throws -> Set<String> {
    let contents = try String(contentsOfFile: path, encoding: String.Encoding.ascii)
    return Set<String>(contents.components(separatedBy: "\n").filter{$0.count >= minLength})
}

func pad(string: String, width: Int) -> String {
    return string.count >= width ? string
        : string + String(repeating: " ", count: width - string.count)
}

do {
    let dictionary = try loadDictionary(path: "unixdict.txt", minLength: 6)
    var words: [(String,String)] = []
    for word1 in dictionary {
        let word2 = word1.replacingOccurrences(of: "e", with: "i")
        if word1 != word2 && dictionary.contains(word2) {
            words.append((word1, word2))
        }
    }
    words.sort(by: {$0 < $1})
    for (n, (word1, word2)) in words.enumerated() {
        print(String(format: "%2d. %@ -> %@", n + 1, pad(string: word1, width: 10), word2))
    }
} catch {
    print(error.localizedDescription)
}
Output:
 1. analyses   -> analysis
 2. atlantes   -> atlantis
 3. bellow     -> billow
 4. breton     -> briton
 5. clench     -> clinch
 6. convect    -> convict
 7. crises     -> crisis
 8. diagnoses  -> diagnosis
 9. enfant     -> infant
10. enquiry    -> inquiry
11. frances    -> francis
12. galatea    -> galatia
13. harden     -> hardin
14. heckman    -> hickman
15. inequity   -> iniquity
16. inflect    -> inflict
17. jacobean   -> jacobian
18. marten     -> martin
19. module     -> moduli
20. pegging    -> pigging
21. psychoses  -> psychosis
22. rabbet     -> rabbit
23. sterling   -> stirling
24. synopses   -> synopsis
25. vector     -> victor
26. welles     -> willis

VBScript

Run it in CScript.

with createobject("ADODB.Stream")
  .charset ="UTF-8"
  .open
  .loadfromfile("unixdict.txt")
  s=.readtext
end with  
a=split (s,vblf)
set d=createobject("scripting.dictionary")
redim b(ubound(a))
i=0
for each x in a
  s=trim(x)
  if len(s)>5 then 
    if instr(s,"i") then d.add s,""
    if instr(s,"e") then b(i)=s:  i=i+1   
  end if
next
redim preserve b(i-1)

for i=0 to ubound(b)
  s=trim(b(i))
  s2=replace(s,"e","i")
  if d.exists(s2) then 
    wscript.echo left(s& space(10),10) & "-> " & s2
  end if 
next
Output:
analyses  -> analysis
atlantes  -> atlantis
bellow    -> billow
breton    -> briton
clench    -> clinch
convect   -> convict
crises    -> crisis
diagnoses -> diagnosis
enfant    -> infant
enquiry   -> inquiry
frances   -> francis
galatea   -> galatia
harden    -> hardin
heckman   -> hickman
inequity  -> iniquity
inflect   -> inflict
jacobean  -> jacobian
marten    -> martin
module    -> moduli
pegging   -> pigging
psychoses -> psychosis
rabbet    -> rabbit
sterling  -> stirling
synopses  -> synopsis
vector    -> victor
welles    -> willis

Wren

Library: Wren-sort
Library: Wren-fmt
import "io" for File
import "./sort" for Find
import "./fmt" for Fmt

var wordList = "unixdict.txt" // local copy
var count = 0
var words = File.read(wordList).trimEnd().split("\n").
    where { |w| w.count > 5 }.toList
for (word in words) {
    if (word.contains("e")) {
        var repl = word.replace("e", "i")
        if (Find.first(words, repl) >= 0) {  // binary search
            count = count + 1
            Fmt.print("$2d: $-9s -> $s", count, word, repl)
        }
    }
}
Output:
 1: analyses  -> analysis
 2: atlantes  -> atlantis
 3: bellow    -> billow
 4: breton    -> briton
 5: clench    -> clinch
 6: convect   -> convict
 7: crises    -> crisis
 8: diagnoses -> diagnosis
 9: enfant    -> infant
10: enquiry   -> inquiry
11: frances   -> francis
12: galatea   -> galatia
13: harden    -> hardin
14: heckman   -> hickman
15: inequity  -> iniquity
16: inflect   -> inflict
17: jacobean  -> jacobian
18: marten    -> martin
19: module    -> moduli
20: pegging   -> pigging
21: psychoses -> psychosis
22: rabbet    -> rabbit
23: sterling  -> stirling
24: synopses  -> synopsis
25: vector    -> victor
26: welles    -> willis

XPL0

string  0;              \use zero-terminated strings
int     Dict(26000);    \pointers to words (enough for unixdict.txt)
int     DictSize;       \actual number of pointers in Dict

func    StrCmp(A, B);   \Compare string A to B
char    A, B;           \Returns: >0 if A>B, =0 if A=B, and <0 if A<B
int     I;
[for I:= 0 to -1>>1 do
        [if A(I) # B(I) then return A(I) - B(I);
         if A(I) = 0 then return 0;
        ];
];      \StrCmp

func    LookUp(Word);   \Return 'true' if Word is in Dict
char    Word;
int     Lo, Hi, I, Cmp;
[Lo:= 0;  Hi:= DictSize-1;
loop    [I:= (Lo+Hi) / 2; \binary search
        Cmp:= StrCmp(Word, Dict(I));
        if Cmp < 0 then Hi:= I-1 else Lo:= I+1;
        if Cmp = 0 then return true;
        if Lo > Hi then return false;
        ];
];      \LookUp

int     I, DI, Ch, HasE;
char    Word, AltWord(25);      \(longest word in unixdict is 22 chars)
def     LF=$0A, CR=$0D, EOF=$1A;

[FSet(FOpen("unixdict.txt", 0), ^I); \load dictionary into Dict
OpenI(3);                        \assume alphabetical order and all lowercase
DI:= 0;                          \ignore non-alpha characters: 0..9, ' and &
repeat  Dict(DI):= Reserve(0);   \get pointer to memory used to store Word
        Word:= Dict(DI);
        I:= 0;
        loop    [repeat Ch:= ChIn(3) until Ch # CR;     \remove possible CR
                if Ch=LF or Ch=EOF then quit;
                Word(I):= Ch;
                I:= I+1;
                ];
        Word(I):= 0;            \terminate Word string
        I:= Reserve(I+1);       \reserve memory used for Word
        DI:= DI+1;              \next dictionary entry
until   Ch = EOF;
DictSize:= DI;

DI:= 0;
repeat  Word:= Dict(DI);
        I:= 0;  HasE:= false;
        loop    [Ch:= Word(I);
                AltWord(I):= Ch;
                if Ch = 0 then quit;
                if Ch = ^e then
                        [HasE:= true;
                        AltWord(I):= ^i;
                        ];
                I:= I+1;
                ];
        if HasE & I>5 then              \Word must be greater than 5 chars
                [if LookUp(AltWord) then
                        [Text(0, Word);
                        Text(0, " -> ");
                        Text(0, AltWord);
                        CrLf(0);
                        ];
                ];
        DI:= DI+1;
until   DI >= DictSize;
]
Output:
analyses -> analysis
atlantes -> atlantis
bellow -> billow
breton -> briton
clench -> clinch
convect -> convict
crises -> crisis
diagnoses -> diagnosis
enfant -> infant
enquiry -> inquiry
frances -> francis
galatea -> galatia
harden -> hardin
heckman -> hickman
inequity -> iniquity
inflect -> inflict
jacobean -> jacobian
marten -> martin
module -> moduli
pegging -> pigging
psychoses -> psychosis
rabbet -> rabbit
sterling -> stirling
synopses -> synopsis
vector -> victor
welles -> willis