Word wheel: Difference between revisions

From Rosetta Code
Content added Content deleted
m (→‎{{header|Raku}}: change deprecated parameter)
(→‎{{header|Wren}}: Added optional extra.)
Line 1,143: Line 1,143:
=={{header|Wren}}==
=={{header|Wren}}==
{{libheader|Wren-sort}}
{{libheader|Wren-sort}}
{{libheader|Wren-seq}}
<lang ecmascript>import "io" for File
<lang ecmascript>import "io" for File
import "/sort" for Find
import "/sort" for Find
import "/seq" for Lst


var letters = ["d", "e", "e", "g", "k", "l", "n", "o","w"]
var letters = ["d", "e", "e", "g", "k", "l", "n", "o","w"]
Line 1,167: Line 1,169:


System.print("The following %(found.count) words are the solutions to the puzzle:")
System.print("The following %(found.count) words are the solutions to the puzzle:")
System.print(found.join("\n"))</lang>
System.print(found.join("\n"))

// optional extra
var mostFound = 0
var mostWords9 = []
var mostLetters = []
// iterate through all 9 letter words in the dictionary
for (word9 in words.where { |w| w.count == 9 }) {
letters = word9.toList
Sort.quick(letters)
// get distinct letters
var distinctLetters = Lst.distinct(letters)
// place each distinct letter in the middle and see what we can do with the rest
for (letter in distinctLetters) {
found = []
for (word in words) {
if (word.count > 2 && word.count <= 9 && word.indexOf(letter) >= 0) {
var lets = letters.toList
var ok = true
for (c in word) {
var ix = Find.first(lets, c)
if (ix == - 1) {
ok = false
break
}
lets.removeAt(ix)
}
if (ok) found.add(word)
}
}
var count = found.count
if (count > mostFound) {
mostFound = count
mostWords9 = [word9]
mostLetters = [letter]
} else if (count == mostFound) {
mostWords9.add(word9)
mostLetters.add(letter)
}
}
}
System.print("\nMost words found = %(mostFound)")
System.print("Nine letter words producing this total:")
for (i in 0...mostWords9.count) {
System.print("%(mostWords9[i]) with central letter '%(mostLetters[i])'")
}</lang>


{{out}}
{{out}}
Line 1,189: Line 1,236:
wok
wok
woke
woke

Most words found = 215
Nine letter words producing this total:
claremont with central letter 'a'
spearmint with central letter 'a'
</pre>
</pre>

Revision as of 23:40, 24 July 2020

Word wheel is a draft programming task. It is not yet considered ready to be promoted as a complete task, for reasons that should be found in its talk page.

A "word wheel" is a type of word game commonly found on the "puzzle" page of newspapers. You are presented with nine letters arranged in a circle or 3×3 grid. The objective is to find as many words as you can using only the letters contained in the wheel or grid. Each word must contain the letter in the centre of the wheel or grid. Usually there will be a minimum word length of 3 or 4 characters. Each letter may only be used as many times as it appears in the wheel or grid.


An example
N D E
O K G
E L W
Task

Write a program to solve the above "word wheel" puzzle.

Specifically:

  • Find all words of 3 or more letters using only the letters in the string   ndeokgelw.
  • All words must contain the central letter   k.
  • Each letter may be used only as many times as it appears in the string.
  • For this task we'll use lowercase English letters exclusively.


A "word" is defined to be any string contained in the file located at   http://wiki.puzzlers.org/pub/wordlists/unixdict.txt.
If you prefer to use a different dictionary,   please state which one you have used.

Optional extra

Word wheel puzzles usually state that there is at least one nine-letter word to be found. Using the above dictionary, find the 3x3 grids with at least one nine-letter solution that generate the largest number of words of three or more letters.

C

<lang c>#include <stdbool.h>

  1. include <stdio.h>
  1. define MAX_WORD 80
  2. define LETTERS 26

bool is_letter(char c) { return c >= 'a' && c <= 'z'; }

int index(char c) { return c - 'a'; }

void word_wheel(const char* letters, char central, int min_length, FILE* dict) {

   int max_count[LETTERS] = { 0 };
   for (const char* p = letters; *p; ++p) {
       char c = *p;
       if (is_letter(c))
           ++max_count[index(c)];
   }
   char word[MAX_WORD + 1] = { 0 };
   while (fgets(word, MAX_WORD, dict)) {
       int count[LETTERS] = { 0 };
       for (const char* p = word; *p; ++p) {
           char c = *p;
           if (c == '\n') {
               if (p >= word + min_length && count[index(central)] > 0)
                   printf("%s", word);
           } else if (is_letter(c)) {
               int i = index(c);
               if (++count[i] > max_count[i]) {
                   break;
               }
           } else {
               break;
           }
       }
   }

}

int main(int argc, char** argv) {

   const char* dict = argc == 2 ? argv[1] : "unixdict.txt";
   FILE* in = fopen(dict, "r");
   if (in == NULL) {
       perror(dict);
       return 1;
   }
   word_wheel("ndeokgelw", 'k', 3, in);
   fclose(in);
   return 0;

}</lang>

Output:
eke
elk
keel
keen
keg
ken
keno
knee
kneel
knew
know
knowledge
kong
leek
week
wok
woke

C++

Library: Boost

The puzzle parameters can be set with command line options. The default values are as per the task description. <lang cpp>#include <array>

  1. include <cassert>
  2. include <iostream>
  3. include <fstream>
  4. include <map>
  5. include <string>
  6. include <vector>
  7. include <boost/program_options.hpp>

// A multiset specialized for strings consisting of lowercase // letters ('a' to 'z'). class letterset { public:

   explicit letterset(const std::string& str) {
       count_.fill(0);
       for (char c : str)
           ++count_[index(c)];
   }
   bool contains(const letterset& set) const {
       for (size_t i = 0; i < count_.size(); ++i) {
           if (set.count_[i] > count_[i])
               return false;
       }
       return true;
   }
   unsigned int count(char c) const {
       return count_[index(c)];
   }
   bool is_valid() const {
       return count_[0] == 0;
   }

private:

   static bool is_letter(char c) { return c >= 'a' && c <= 'z'; }
   static int index(char c) { return is_letter(c) ? c - 'a' + 1 : 0; }
   // elements 1..26 contain the number of times each lowercase
   // letter occurs in the word
   // element 0 is the number of other characters in the word
   std::array<unsigned int, 27> count_;

};

template <typename iterator, typename separator> std::string join(iterator begin, iterator end, separator sep) {

   std::string result;
   if (begin != end) {
       result += *begin++;
       for (; begin != end; ++begin) {
           result += sep;
           result += *begin;
       }
   }
   return result;

}

using dictionary = std::vector<std::pair<std::string, letterset>>;

dictionary load_dictionary(const std::string& filename, int min_length,

                          int max_length) {
   std::ifstream in(filename);
   if (!in)
       throw std::runtime_error("Cannot open file " + filename);
   std::string word;
   dictionary result;
   while (getline(in, word)) {
       if (word.size() < min_length)
           continue;
       if (word.size() > max_length)
           continue;
       letterset set(word);
       if (set.is_valid())
           result.emplace_back(word, set);
   }
   return result;

}

void word_wheel(const dictionary& dict, const std::string& letters,

               char central_letter)  {
   letterset set(letters);
   if (central_letter == 0 && !letters.empty())
       central_letter = letters.at(letters.size()/2);
   std::map<size_t, std::vector<std::string>> words;
   for (const auto& pair : dict) {
       const auto& word = pair.first;
       const auto& subset = pair.second;
       if (subset.count(central_letter) > 0 && set.contains(subset))
           words[word.size()].push_back(word);
   }
   size_t total = 0;
   for (const auto& p : words) {
       const auto& v = p.second;
       auto n = v.size();
       total += n;
       std::cout << "Found " << n << " " << (n == 1 ? "word" : "words")
           << " of length " << p.first << ": "
           << join(v.begin(), v.end(), ", ") << '\n';
   }
   std::cout << "Number of words found: " << total << '\n';

}

void find_max_word_count(const dictionary& dict, int word_length) {

   size_t max_count = 0;
   std::vector<std::pair<std::string, char>> max_words;
   for (const auto& pair : dict) {
       const auto& word = pair.first;
       if (word.size() != word_length)
           continue;
       const auto& set = pair.second;
       dictionary subsets;
       for (const auto& p : dict) {
           if (set.contains(p.second))
               subsets.push_back(p);
       }
       std::array<bool, 26> done{false};
       for (size_t index = 0; index < word_length; ++index) {
           char central_letter = word[index];
           assert(central_letter >= 'a' && central_letter <= 'z');
           if (done[central_letter - 'a'])
               continue;
           done[central_letter - 'a'] = true;
           size_t count = 0;
           for (const auto& p : subsets) {
               const auto& subset = p.second;
               if (subset.count(central_letter) > 0)
                   ++count;
           }
           if (count > max_count) {
               max_words.clear();
               max_count = count;
           }
           if (count == max_count)
               max_words.emplace_back(word, central_letter);
       }
   }
   std::cout << "Maximum word count: " << max_count << '\n';
   std::cout << "Words of " << word_length << " letters producing this count:\n";
   for (const auto& pair : max_words)
       std::cout << pair.first << " with central letter " << pair.second << '\n';

}

constexpr const char* option_filename = "filename"; constexpr const char* option_wheel = "wheel"; constexpr const char* option_central = "central"; constexpr const char* option_min_length = "min-length"; constexpr const char* option_part2 = "part2";

int main(int argc, char** argv) {

   const int word_length = 9;
   int min_length = 3;
   std::string letters = "ndeokgelw";
   std::string filename = "unixdict.txt";
   char central_letter = 0;
   bool do_part2 = false;
   
   namespace po = boost::program_options;
   po::options_description desc("Allowed options");
   desc.add_options()
       (option_filename, po::value<std::string>(), "name of dictionary file")
       (option_wheel, po::value<std::string>(), "word wheel letters")
       (option_central, po::value<char>(), "central letter (defaults to middle letter of word)")
       (option_min_length, po::value<int>(), "minimum word length")
       (option_part2, "include part 2");
   try {
       po::variables_map vm;
       po::store(po::parse_command_line(argc, argv, desc), vm);
       po::notify(vm);
       if (vm.count(option_filename))
           filename = vm[option_filename].as<std::string>();
       if (vm.count(option_wheel))
           letters = vm[option_wheel].as<std::string>();
       if (vm.count(option_central))
           central_letter = vm[option_central].as<char>();
       if (vm.count(option_min_length))
           min_length = vm[option_min_length].as<int>();
       if (vm.count(option_part2))
           do_part2 = true;
       auto dict = load_dictionary(filename, min_length, word_length);
       // part 1
       word_wheel(dict, letters, central_letter);
       // part 2
       if (do_part2) {
           std::cout << '\n';
           find_max_word_count(dict, word_length);
       }
   } catch (const std::exception& ex) {
       std::cerr << ex.what() << '\n';
       return EXIT_FAILURE;
   }
   return EXIT_SUCCESS;

}</lang>

Output:

Output including optional part 2:

Found 5 words of length 3: eke, elk, keg, ken, wok
Found 10 words of length 4: keel, keen, keno, knee, knew, know, kong, leek, week, woke
Found 1 word of length 5: kneel
Found 1 word of length 9: knowledge
Number of words found: 17

Maximum word count: 215
Words of 9 letters producing this count:
claremont with central letter a
spearmint with central letter a

Delphi

Translation of: Wren

<lang Delphi> program Word_wheel;

{$APPTYPE CONSOLE}

{$R *.res}

uses

 System.SysUtils,
 System.Classes;

function IsInvalid(s: string): Boolean; var

 c: char;
 leters: set of char;
 firstE: Boolean;

begin

 Result := (s.Length < 3) or (s.IndexOf('k') = -1) or (s.Length > 9);
 if not Result then
 begin
   leters := ['d', 'e', 'g', 'k', 'l', 'n', 'o', 'w'];
   firstE := true;
   for c in s do
   begin
     if c in leters then
       if (c = 'e') and (firstE) then
         firstE := false
       else
         Exclude(leters, AnsiChar(c))
     else
       exit(true);
   end;
 end;

end;

var

 dict: TStringList;
 i: Integer;

begin

 dict := TStringList.Create;
 dict.LoadFromFile('unixdict.txt');
 for i := dict.count - 1 downto 0 do
   if IsInvalid(dict[i]) then
     dict.Delete(i);
 Writeln('The following ', dict.Count, ' words are the solutions to the puzzle:');
 Writeln(dict.Text);
 dict.Free;
 readln;

end.

</lang>

Factor

Works with: Factor version 0.99 2020-07-03

<lang factor>USING: assocs io.encodings.ascii io.files kernel math math.statistics prettyprint sequences sorting ;

! Only consider words longer than two letters and words that ! contain elt.

pare ( elt seq -- new-seq )
   [ [ member? ] keep length 2 > and ] with filter ;
words ( input-str path -- seq )
   [ [ midpoint@ ] keep nth ] [ ascii file-lines pare ] bi* ;
?<= ( m n/f -- ? ) dup f = [ nip ] [ <= ] if ;

! Can we make sequence 1 with the elements in sequence 2?

can-make? ( seq1 seq2 -- ? )
   [ histogram ] bi@ [ swapd at ?<= ] curry assoc-all? ;
solve ( input-str path -- seq )
   [ words ] keepd [ can-make? ] curry filter ;

"ndeokgelw" "unixdict.txt" solve [ length ] sort-with .</lang>

Output:
{
    "eke"
    "elk"
    "keg"
    "ken"
    "wok"
    "keel"
    "keen"
    "keno"
    "knee"
    "knew"
    "know"
    "kong"
    "leek"
    "week"
    "woke"
    "kneel"
    "knowledge"
}

Go

Translation of: Wren

<lang go>package main

import (

   "bytes"
   "fmt"
   "io/ioutil"
   "log"
   "sort"
   "strings"

)

func main() {

   b, err := ioutil.ReadFile("unixdict.txt")
   if err != nil {
       log.Fatal("Error reading file")
   }
   letters := "deegklnow"
   words := bytes.Split(b, []byte{'\n'})
   var found []string
   for _, word := range words {
       le := len(word)
       if le > 2 && le <= 9 && bytes.IndexByte(word, 'k') >= 0 {
           lets := letters
           ok := true
           for i := 0; i < le; i++ {
               c := word[i]
               ix := sort.Search(len(lets), func(i int) bool { return lets[i] >= c })
               if ix < len(lets) && lets[ix] == c {
                   lets = lets[0:ix] + lets[ix+1:]
               } else {
                   ok = false
                   break
               }
           }
           if ok {
               found = append(found, string(word))
           }
       }
   }
   fmt.Println("The following", len(found), "words are the solutions to the puzzle:")
   fmt.Println(strings.Join(found, "\n"))

}</lang>

Output:
The following 17 words are the solutions to the puzzle:
eke
elk
keel
keen
keg
ken
keno
knee
kneel
knew
know
knowledge
kong
leek
week
wok
woke

Haskell

<lang haskell>import System.IO (readFile) import Data.Char (toLower) import Data.List (sort)

gridWords :: [String] -> [String] -> [String] gridWords grid =

 filter (((&&) . (2 <) . length) <*> (((&&) . elem mid) <*> wheelFit wheel))
 where
   cs = toLower <$> concat grid
   wheel = sort cs
   mid = cs !! 4

wheelFit :: String -> String -> Bool wheelFit wheel word = go wheel (sort word)

 where
   go _ [] = True
   go [] _ = False
   go (w:ws) ccs@(c:cs)
     | w == c = go ws cs
     | otherwise = go ws ccs

main :: IO () main = do

 s <- readFile "unixdict.txt"
 mapM_ putStrLn $ gridWords ["NDE", "OKG", "ELW"] (lines s)</lang>
Output:
eke
elk
keel
keen
keg
ken
keno
knee
kneel
knew
know
knowledge
kong
leek
week
wok
woke

JavaScript

A version using local access to the dictionary, through the macOS JavaScript for Automation API.

Works with: JXA

<lang JavaScript>(() => {

   'use strict';
   // main :: IO ()
   const main = () =>
       console.log(unlines(
           gridWords(['NDE', 'OKG', 'ELW'])(
               lines(readFile('unixdict.txt'))
           )
       ));
   // gridWords :: [String] -> [String] -> [String]
   const gridWords = grid =>
       lexemes => {
           const
               wheel = sort(toLower(concat(grid))),
               wSet = new Set(wheel),
               mid = wheel[4];
           return lexemes.filter(w => {
               const cs = chars(w);
               return 2 < cs.length && cs.every(
                   c => wSet.has(c)
               ) && elem(mid)(cs) && (
                   wheelFit(wheel, cs)
               );
           });
       };
   // wheelFit :: [Char] -> [Char] -> Bool
   const wheelFit = (wheel, word) => {
       const go = (ws, cs) =>
           0 === cs.length ? (
               true
           ) : 0 === ws.length ? (
               false
           ) : ws[0] === cs[0] ? (
               go(ws.slice(1), cs.slice(1))
           ) : go(ws.slice(1), cs);
       return go(wheel, sort(word));
   };
   // ----------------- GENERIC FUNCTIONS -----------------
   // chars :: String -> [Char]
   const chars = s =>
       s.split();
   // concat :: a -> [a]
   // concat :: [String] -> String
   const concat = xs => (
       ys => 0 < ys.length ? (
           ys.every(Array.isArray) ? (
               []
           ) : 
       ).concat(...ys) : ys
   )(list(xs));
   // elem :: Eq a => a -> [a] -> Bool
   const elem = x =>
       // True if xs contains an instance of x.
       xs => xs.some(y => x === y);
   // lines :: String -> [String]
   const lines = s =>
       // A list of strings derived from a single
       // newline-delimited string.
       0 < s.length ? (
           s.split(/[\r\n]/)
       ) : [];
   // list :: StringOrArrayLike b => b -> [a]
   const list = xs =>
       // xs itself, if it is an Array,
       // or an Array derived from xs.
       Array.isArray(xs) ? (
           xs
       ) : Array.from(xs || []);
   // readFile :: FilePath -> IO String
   const readFile = fp => {
       // The contents of a text file at the
       // path file fp.
       const
           e = $(),
           ns = $.NSString
           .stringWithContentsOfFileEncodingError(
               $(fp).stringByStandardizingPath,
               $.NSUTF8StringEncoding,
               e
           );
       return ObjC.unwrap(
           ns.isNil() ? (
               e.localizedDescription
           ) : ns
       );
   };
   // sort :: Ord a => [a] -> [a]
   const sort = xs => list(xs).slice()
       .sort((a, b) => a < b ? -1 : (a > b ? 1 : 0));
   // toLower :: String -> String
   const toLower = s =>
       // Lower-case version of string.
       s.toLocaleLowerCase();
   // unlines :: [String] -> String
   const unlines = xs =>
       // A single string formed by the intercalation
       // of a list of strings with the newline character.
       xs.join('\n');
   // MAIN ---
   return main();

})();</lang>

Output:
eke
elk
keel
keen
keg
ken
keno
knee
kneel
knew
know
knowledge
kong
leek
week
wok
woke

Julia

<lang julia>using Combinatorics

const tfile = download("http://wiki.puzzlers.org/pub/wordlists/unixdict.txt") const wordlist = Dict(w => 1 for w in split(read(tfile, String), r"\s+"))

function wordwheel(wheel, central)

   returnlist = String[]
   for combo in combinations([string(i) for i in wheel])
       if central in combo && length(combo) > 2
           for perm in permutations(combo)
               word = join(perm)
               if haskey(wordlist, word) && !(word in returnlist)
                   push!(returnlist, word)
               end
           end
       end
   end
   return returnlist

end

println(wordwheel("ndeokgelw", "k"))

</lang>

Output:
["ken", "keg", "eke", "elk", "wok", "keno", "knee", "keen", "knew", "kong", "know", "woke", "keel", "leek", "week", "kneel", "knowledge"]

Faster but less general version

<lang julia>const tfile = download("http://wiki.puzzlers.org/pub/wordlists/unixdict.txt") const wordarraylist = [[string(c) for c in w] for w in split(read(tfile, String), r"\s+")]

function wordwheel2(wheel, central)

   warr, maxlen = [string(c) for c in wheel], length(wheel)
   returnarraylist = filter(a -> 2 < length(a) <= maxlen && central in a &&
           all(c -> sum(x -> x == c, a) <= sum(x -> x == c, warr), a), wordarraylist)
   return join.(returnarraylist)

end

println(wordwheel2("ndeokgelw", "k"))

</lang>

Output:
["eke", "elk", "keel", "keen", "keg", "ken", "keno", "knee", "kneel", "knew", "know", "knowledge", "kong", "leek", "week", "wok", "woke"]

Perl

<lang perl>#!/usr/bin/perl

use strict; # https://rosettacode.org/wiki/Word_wheel use warnings; use Path::Tiny;

$_ = lc <<END;

                   N  D  E
                   O  K  G
                   E  L  W

END

my @letters = /\w/g; my $center = $letters[@letters / 2]; my $regex = join '?', sort(@letters), ;

my @words = grep /$center/ && (join , sort /./g) =~ /^$regex$/,

 path('unixdict.txt')->slurp =~ /^[a-z]{3,}$/gm;

print "@words\n" =~ s/.{40}\K /\n/gr;</lang>

Output:
eke elk keel keen keg ken keno knee kneel
knew know knowledge kong leek week wok woke

Phix

Requires 0.8.2+ (fixed some glitches in join_by()) <lang Phix>constant wheel = "ndeokgelw",

        musthave = wheel[5]

sequence words = {} integer fn = open(join_path({"demo","unixdict.txt"}),"r") if fn=-1 then crash("unixdict.txt not found") end if while 1 do

   object word = lower(trim(gets(fn)))
   if atom(word) then exit end if -- eof
   if length(word)>=3 and find(musthave,word) then
       string remaining = wheel
       for i=length(word) to 1 by -1 do -- (-1 for 1==done test)
           integer k = find(word[i],remaining)
           if k=0 then exit end if
           if i=1 then words = append(words,word) exit end if
           remaining[k] = '\0' -- (prevent re-use)
       end for
   end if

end while close(fn) string jbw = join_by(words,1,9," ","\n ") printf(1, "The following %d words were found:\n %s\n",{length(words),jbw})</lang>

Output:
The following 17 words were found:
 eke elk keel keen keg ken keno knee kneel
 knew know knowledge kong leek week wok woke

Python

<lang python>import urllib.request from collections import Counter


GRID = """ N D E O K G E L W """


def getwords(url='http://wiki.puzzlers.org/pub/wordlists/unixdict.txt'):

   "Return lowercased words of 3 to 9 characters"
   words = urllib.request.urlopen(url).read().decode().strip().lower().split()
   return (w for w in words if 2 < len(w) < 10)

def solve(grid, dictionary):

   gridcount = Counter(grid)
   mid = grid[4]
   return [word for word in dictionary
           if mid in word and not (Counter(word) - gridcount)]


if __name__ == '__main__':

   chars = .join(GRID.strip().lower().split())
   found = solve(chars, dictionary=getwords())
   print('\n'.join(found))</lang>
Output:
eke
elk
keel
keen
keg
ken
keno
knee
kneel
knew
know
knowledge
kong
leek
week
wok
woke


Or, using a local copy of the dictionary, and a recursive test of wheel fit: <lang python>Word wheel

from os.path import expanduser


  1. gridWords :: [String] -> [String] -> [String]

def gridWords(grid):

   The subset of words in ws which contain the
      central letter of the grid, and can be completed
      by single uses of some or all of the remaining
      letters in the grid.
   
   def go(ws):
       cs = .join(grid).lower()
       wheel = sorted(cs)
       wset = set(wheel)
       mid = cs[4]
       return [
           w for w in ws
           if 2 < len(w) and (mid in w) and (
               all(c in wset for c in w)
           ) and wheelFit(wheel, w)
       ]
   return go


  1. wheelFit :: String -> String -> Bool

def wheelFit(wheel, word):

   True if a given word can be constructed
      from (single uses of) some subset of
      the letters in the wheel.
   
   def go(ws, cs):
       return True if not cs else (
           False if not ws else (
               go(ws[1:], cs[1:]) if ws[0] == cs[0] else (
                   go(ws[1:], cs)
               )
           )
       )
   return go(wheel, sorted(word))


  1. -------------------------- TEST --------------------------
  2. main :: IO ()

def main():

   Word wheel matches for a given grid in a copy of
      http://wiki.puzzlers.org/pub/wordlists/unixdict.txt
   
   print('\n'.join(
       gridWords(['NDE', 'OKG', 'ELW'])(
           readFile('~/unixdict.txt').splitlines()
       )
   ))


  1. ------------------------ GENERIC -------------------------
  1. readFile :: FilePath -> IO String

def readFile(fp):

   The contents of any file at the path
      derived by expanding any ~ in fp.
   
   with open(expanduser(fp), 'r', encoding='utf-8') as f:
       return f.read()


  1. MAIN ---

if __name__ == '__main__':

   main()</lang>
Output:
eke
elk
keel
keen
keg
ken
keno
knee
kneel
knew
know
knowledge
kong
leek
week
wok
woke

Raku

Works with: Rakudo version 2020.05

Everything is adjustable through command line parameters.

Defaults to task specified wheel, unixdict.txt, minimum 3 letters.

Using Terminal::Boxer from the Raku ecosystem.

<lang perl6>use Terminal::Boxer;

my %*SUB-MAIN-OPTS = :named-anywhere;

unit sub MAIN ($wheel = 'ndeokgelw', :$dict = './unixdict.txt', :$min = 3);

my $must-have = $wheel.comb[4].lc;

my $has = $wheel.comb».lc.Bag;

my %words; $dict.IO.slurp.words».lc.map: {

   next if not .contains($must-have) or .chars < $min;
   %words{.chars}.push: $_ if .comb.Bag ⊆ $has;

};

say "Using $dict, minimum $min letters.";

print rs-box :3col, :3cw, :indent("\t"), $wheel.comb».uc;

printf "%d letters: %s\n", .key, .value.sort.join(', ') for %words.sort; </lang>

Output:
Using defaults

<lang>raku word-wheel.raku</lang>

Using ./unixdict.txt, minimum 3 letters.
	╭───┬───┬───╮
	│ N │ D │ E │
	├───┼───┼───┤
	│ O │ K │ G │
	├───┼───┼───┤
	│ E │ L │ W │
	╰───┴───┴───╯
3 letters:  eke, elk, keg, ken, wok
4 letters:  keel, keen, keno, knee, knew, know, kong, leek, week, woke
5 letters:  kneel
9 letters:  knowledge
Larger dictionary

Using the much larger dictionary words.txt file from https://github.com/dwyl/english-words

<lang>raku word-wheel.raku --dict=./words.txt</lang>

Using ./words.txt, minimum 3 letters.
	╭───┬───┬───╮
	│ N │ D │ E │
	├───┼───┼───┤
	│ O │ K │ G │
	├───┼───┼───┤
	│ E │ L │ W │
	╰───┴───┴───╯
3 letters:  dkg, dkl, eek, egk, eke, ekg, elk, gok, ked, kee, keg, kel, ken, keo, kew, kln, koe, kol, kon, lek, lgk, nek, ngk, oke, owk, wok
4 letters:  deek, deke, doek, doke, donk, eked, elke, elko, geek, genk, gonk, gowk, keel, keen, keld, kele, kend, keno, keon, klee, knee, knew, know, koel, koln, kone, kong, kwon, leek, leke, loke, lonk, okee, oken, week, welk, woke, wolk, wonk
5 letters:  dekle, dekow, gleek, kedge, kendo, kleon, klong, kneed, kneel, knowe, konde, oklee, olnek, woken
6 letters:  gowked, keldon, kelwen, knowle, koleen
8 letters:  weeklong
9 letters:  knowledge
Exercise adjustable parameters

<lang>raku word-wheel.raku iuymslleb --dict=./words.txt --min=4</lang>

Using ./words.txt, minimum 4 letters.
	╭───┬───┬───╮
	│ I │ U │ Y │
	├───┼───┼───┤
	│ M │ S │ L │
	├───┼───┼───┤
	│ L │ E │ B │
	╰───┴───┴───╯
4 letters:  bels, beys, bise, blus, bmus, bsem, bsie, bslm, bsme, bums, busy, buys, byes, eisb, elis, ells, elms, elsi, elsy, elys, emus, emys, ills, ilse, imsl, isle, islm, islu, ismy, leis, leys, libs, lies, lise, lues, luis, lums, lyes, lyse, mels, mibs, mils, mise, misy, msie, musb, muse, sbli, sell, semi, siey, sile, sill, sime, sium, slbm, sleb, sley, slim, slub, slue, slum, suey, suiy, sull, sumi, sumy, syli, syll, uims
5 letters:  belis, bells, belus, bemis, biles, bills, bisme, blues, bulls, bulse, busey, buyse, eblis, ellis, embus, emuls, eulis, ileus, illus, ilyse, isbel, iseum, lesiy, lesli, lesly, lieus, liles, limbs, limes, limsy, lisle, lubes, luise, lusby, lyles, melis, mells, miles, mills, misly, mlles, mules, mulls, mulse, musie, musil, myles, mysel, sebum, selby, selim, selli, selly, sibel, sible, sibyl, silly, silyl, simul, slily, slime, slimy, smell, smile, smily, sully, sybil, syble, yells, yills, ylems, yules, yusem
6 letters:  bellis, bisley, bluesy, blueys, bluism, blumes, bulies, bullis, busily, elymus, embusy, illyes, imbues, libels, libuse, limbus, limeys, milles, milsey, muesli, muleys, musily, mysell, sibell, sibley, simule, slimly, smelly, smiley, umbels, umbles
7 letters:  besully, bullies, bullism, elysium, illumes, mulleys, sibylle, silybum, sublime, sybille
8 letters:  bullyism, semibull
9 letters:  sublimely

REXX

Quite a bit of boilerplate was included in this REXX example.

No assumption was made as the "case" of the words (upper/lower/mixed case).   Duplicate words were detected and
eliminated   (god and God),   as well as words that didn't contain all Roman (Latin) letters.

The number of minimum letters can be specified,   as well as the dictionary fileID and the letters in the word wheel (grid).

Additional information is also provided concerning how many words have been skipped due to the various filters. <lang rexx>/*REXX pgm finds (dictionary) words which can be found in a specified word wheel (grid).*/ parse arg grid minL iFID . /*obtain optional arguments from the CL*/ if grid==|grid=="," then grid= 'ndeokgelw' /*Not specified? Then use the default.*/ if minL==|minL=="," then minL= 3 /* " " " " " " */ if iFID==|iFID=="," then iFID= 'UNIXDICT.TXT' /* " " " " " " */ oMinL= minL; minL= abs(minL) /*if negative, then don't show a list. */ gridU= grid; upper gridU /*get an uppercase version of the grid.*/ Lg= length(grid); Hg= Lg % 2 + 1 /*get length of grid & the middle char.*/ ctr= substr(grid, Hg, 1); upper ctr /*get uppercase center letter in grid. */ wrds= 0 /*# words that are in the dictionary. */ wees= 0 /*" " " " too short. */ bigs= 0 /*" " " " too long. */ dups= 0 /*" " " " duplicates. */ ills= 0 /*" " " contain "not" letters.*/ good= 0 /*" " " contain center letter. */ say ' Reading the file: ' iFID @.= . /*uppercase non─duplicated dict. words.*/ $= /*the list of dictionary words in grid.*/

    do recs=0  while lines(iFID)\==0            /*process all words in the dictionary. */
    u= space( linein(iFID), 0);   upper u       /*elide blanks;  uppercase the word.   */
    L= length(u)                                /*obtain the length of the word.       */
    if @.u\==.           then do; dups= dups+1; iterate; end  /*is this a duplicate?   */
    if L<minL            then do; wees= wees+1; iterate; end  /*is the word too short? */
    if L>Lg              then do; bigs= bigs+1; iterate; end  /*is the word too long?  */
    if \datatype(u,'M')  then do; ills= ills+1; iterate; end  /*has word non─letters?  */
    @.u=                                        /*signify that  U  is a dictionary word*/
    wrds= wrds + 1                              /*bump the number of "good" dist. words*/
    if pos(ctr, u)==0        then iterate       /*word doesn't have center grid letter.*/
    good= good + 1                              /*bump # center─letter words in dict.  */
    if verify(u, gridU)\==0  then iterate       /*word contains a letter not in grid.  */
    if pruned(u, gridU)      then iterate       /*have all the letters not been found? */
    $= $ u                                      /*add this word to the "found" list.   */
    end   /*recs*/

say say ' number of records (words) in the dictionary: ' right( commas(recs), 9) say ' number of ill─formed words in the dictionary: ' right( commas(ills), 9) say ' number of duplicate words in the dictionary: ' right( commas(dups), 9) say ' number of too─small words in the dictionary: ' right( commas(wees), 9) say ' number of too─long words in the dictionary: ' right( commas(bigs), 9) say ' number of acceptable words in the dictionary: ' right( commas(wrds), 9) say ' number center─letter words in the dictionary: ' right( commas(good), 9) say ' the minimum length of words that can be used: ' right( commas(minL), 9) say ' the word wheel (grid) being used: ' grid say ' center of the word wheel (grid) being used: ' right('↑', Hg) say; #= words($); $= strip($) say ' number of word wheel words in the dictionary: ' right( commas(# ), 9) if #==0 | oMinL<0 then exit # say say ' The list of word wheel words found:'; say copies('─', length($)); say lower($) exit # /*stick a fork in it, we're all done. */ /*──────────────────────────────────────────────────────────────────────────────────────*/ lower: arg aa; @='abcdefghijklmnopqrstuvwxyz'; @u=@; upper @u; return translate(aa,@,@U) commas: parse arg _; do jc=length(_)-3 to 1 by -3; _=insert(',', _, jc); end; return _ /*──────────────────────────────────────────────────────────────────────────────────────*/ pruned: procedure; parse arg aa,gg /*obtain word to be tested, & the grid.*/

          do n=1  for length(aa);    p= pos( substr(aa,n,1), gg);  if p==0  then return 1
          gg= overlay(., gg, p)                 /*"rub out" the found character in grid*/
          end   /*n*/;               return 0   /*signify that the  AA  passed the test*/</lang>
output   when using the default inputs:
                            Reading the file:  UNIXDICT.TXT

    number of  records (lines) in the dictionary:     25,105
    number of ill─formed words in the dictionary:        123
    number of  duplicate words in the dictionary:          0
    number of  too─small words in the dictionary:        159
    number of  too─long  words in the dictionary:      4,158
    number of acceptable words in the dictionary:     20,664
    number center─letter words in the dictionary:      1,630
    the minimum length of words that can be used:          3
                the word wheel (grid) being used:  ndeokgelw
      center of the word wheel (grid) being used:      ↑

    number of word wheel words in the dictionary:         17

    The list of word wheel words found:
─────────────────────────────────────────────────────────────────────────────────────
eke elk keel keen keg ken keno knee kneel knew know knowledge kong leek week wok woke

Note:   my "personal" dictionary that I built   (over   915,000   947,359   words),   there are   178   words that are in the (above) word wheel.


output   when using the inputs:     satRELinp   -3

(I am trying for a maximum word wheel count for the   UNIXDICT   dictionary;
the negative minimum word length indicates to   not   list the words found.)
Thanks to userid   Paddy3118,   a better grid was found.

                            Reading the file:  UNIXDICT.TXT

    number of  records (lines) in the dictionary:     25,105
    number of ill─formed words in the dictionary:        123
    number of  duplicate words in the dictionary:          0
    number of  too─small words in the dictionary:        159
    number of  too─long  words in the dictionary:      4,158
    number of acceptable words in the dictionary:     20,664
    number center─letter words in the dictionary:     11,623
    the minimum length of words that can be used:          3
                the word wheel (grid) being used:  satRELinp
      center of the word wheel (grid) being used:      ↑

    number of word wheel words in the dictionary:        234
output   when using the inputs:     setRALinp   -3

Thanks to userid   Simonjsaunders,   a better grid was found.

                            Reading the file:  UNIXDICT.TXT

    number of  records (words) in the dictionary:     25,104
    number of ill─formed words in the dictionary:        123
    number of  duplicate words in the dictionary:          0
    number of  too─small words in the dictionary:        159
    number of  too─long  words in the dictionary:      4,158
    number of acceptable words in the dictionary:     20,664
    number center─letter words in the dictionary:     10,369
    the minimum length of words that can be used:          3
                the word wheel (grid) being used:  setRALinp
      center of the word wheel (grid) being used:      ↑

    number of word wheel words in the dictionary:        248

Wren

Library: Wren-sort
Library: Wren-seq

<lang ecmascript>import "io" for File import "/sort" for Find import "/seq" for Lst

var letters = ["d", "e", "e", "g", "k", "l", "n", "o","w"]

var words = File.read("unixdict.txt").split("\n") var found = [] for (word in words) {

   if (word.count > 2 && word.count <= 9 && word.indexOf("k") >= 0) {
       var lets = letters.toList
       var ok = true
       for (c in word) {
           var ix = Find.first(lets, c)
           if (ix == - 1) {
               ok = false
               break
           }
           lets.removeAt(ix)
       }
       if (ok) found.add(word)
   }

}

System.print("The following %(found.count) words are the solutions to the puzzle:") System.print(found.join("\n"))

// optional extra var mostFound = 0 var mostWords9 = [] var mostLetters = [] // iterate through all 9 letter words in the dictionary for (word9 in words.where { |w| w.count == 9 }) {

   letters = word9.toList
   Sort.quick(letters)
   // get distinct letters
   var distinctLetters = Lst.distinct(letters)
   // place each distinct letter in the middle and see what we can do with the rest
   for (letter in distinctLetters) {
       found = []
       for (word in words) {
           if (word.count > 2 && word.count <= 9 && word.indexOf(letter) >= 0) {
               var lets = letters.toList
               var ok = true
               for (c in word) {
                   var ix = Find.first(lets, c)
                   if (ix == - 1) {
                       ok = false
                       break
                   }
                   lets.removeAt(ix)
               }
               if (ok) found.add(word)
           }
       }
       var count = found.count
       if (count > mostFound) {
           mostFound = count
           mostWords9 = [word9]
           mostLetters = [letter]
       } else if (count == mostFound) {
           mostWords9.add(word9)
           mostLetters.add(letter)
       }
   }

} System.print("\nMost words found = %(mostFound)") System.print("Nine letter words producing this total:") for (i in 0...mostWords9.count) {

   System.print("%(mostWords9[i]) with central letter '%(mostLetters[i])'")

}</lang>

Output:
The following 17 words are the solutions to the puzzle:
eke
elk
keel
keen
keg
ken
keno
knee
kneel
knew
know
knowledge
kong
leek
week
wok
woke

Most words found = 215
Nine letter words producing this total:
claremont with central letter 'a'
spearmint with central letter 'a'