Teacup rim text: Difference between revisions
m
→{{header|Wren}}: Minor tidy
Drkameleon (talk | contribs) |
m (→{{header|Wren}}: Minor tidy) |
||
(8 intermediate revisions by 4 users not shown) | |||
Line 34:
=={{header|11l}}==
<
R s[1..]‘’s[0]
Line 59:
print()
L.break</
{{out}}
Line 70:
=={{header|Arturo}}==
<
rotateable?: function [w][
loop 1..dec size w 'i [
rotated: rotate
if or? [rotated = w][not? contains? wordset rotated] ->
return false
Line 84:
loop select wordset 'word [3 =< size word] 'word [
if rotateable? word ->
'results ++ @[ sort map 1..size word 'i [ rotate
]
loop sort unique results 'result
print join.with: " -> " map 1..size root 'i [ rotate.left root i]
]</syntaxhighlight>
{{out}}
<pre>
=={{header|AutoHotkey}}==
<
oWord := [], oRes := [], n := 0
for i, w in StrSplit(wList, "`n", "`r")
Line 125 ⟶ 127:
rotate(w){
return SubStr(w, 2) . SubStr(w, 1, 1)
}</
Examples:<
result := ""
for i, v in Teacup_rim_text(wList)
result .= v "`n"
MsgBox % result
return</
{{out}}
<pre>apt,pta,tap
Line 138 ⟶ 140:
=={{header|AWK}}==
<syntaxhighlight lang="awk">
# syntax: GAWK -f TEACUP_RIM_TEXT.AWK UNIXDICT.TXT
#
Line 179 ⟶ 181:
exit(0)
}
</syntaxhighlight>
{{out}}
<p>using UNIXDICT.TXT</p>
Line 199 ⟶ 201:
=={{header|BaCon}}==
<
dict$ = LOAD$(DIRNAME$(ME$) & "/unixdict.txt")
Line 216 ⟶ 218:
PRINT result$
PRINT "Total words: ", AMOUNT(dict$, NL$), ", and ", AMOUNT(result$, NL$), " are circular."</
{{out}}
Using 'unixdict.txt':
Line 234 ⟶ 236:
=={{header|C}}==
{{libheader|GLib}}
<
#include <stdio.h>
#include <stdlib.h>
Line 340 ⟶ 342:
g_ptr_array_free(dictionary, TRUE);
return EXIT_SUCCESS;
}</
{{out}}
Line 359 ⟶ 361:
=={{header|C++}}==
<
#include <fstream>
#include <iostream>
Line 416 ⟶ 418:
}
return EXIT_SUCCESS;
}</
{{out}}
Line 435 ⟶ 437:
=={{header|F_Sharp|F#}}==
<
// Teacup rim text. Nigel Galloway: August 7th., 2019
let N=System.IO.File.ReadAllLines("dict.txt")|>Array.filter(fun n->String.length n=3 && Seq.length(Seq.distinct n)>1)|>Set.ofArray
let fG z=Set.map(fun n->System.String(Array.ofSeq (Seq.permute(fun g->(g+z)%3)n))) N
Set.intersectMany [N;fG 1;fG 2]|>Seq.distinctBy(Seq.sort>>Array.ofSeq>>System.String)|>Seq.iter(printfn "%s")
</syntaxhighlight>
{{out}}
<pre>
Line 451 ⟶ 453:
=={{header|Factor}}==
<
http.client kernel math prettyprint sequences sequences.extras
sets sorting splitting ;
Line 458 ⟶ 460:
"\n" split [ { [ length 3 < ] [ all-equal? ] } 1|| ] reject
[ [ all-rotations ] map ] [ >hash-set ] bi
'[ [ _ in? ] all? ] filter [ natural-sort ] map members .</
{{out}}
<pre>
Line 471 ⟶ 473:
=={{header|Go}}==
<
import (
Line 540 ⟶ 542:
fmt.Println()
}
}</
{{out}}
Line 562 ⟶ 564:
===Using Data.Set===
Circular words of more than 2 characters in a local copy of a word list.
<
import qualified Data.Set as S
import Data.Ord (comparing)
Line 592 ⟶ 594:
filter
((1 <) . length)
(groupBy (on (==) fst) (sortBy (comparing fst) (((,) =<< sort) <$> xs)))</
{{Out}}
<pre>arc -> car -> rca
Line 603 ⟶ 605:
Or taking a different approach, we can avoid the use of Data.Set by obtaining the groups of anagrams (of more than two characters) in the lexicon, and filtering out a circular subset of these:
<
import Data.List (groupBy, intercalate, sort, sortOn)
import Data.Ord (comparing)
Line 649 ⟶ 651:
rotated :: [a] -> [a]
rotated [] = []
rotated (x : xs) = xs <> [x]</
{{Out}}
<pre>arc -> rca -> car
Line 658 ⟶ 660:
=={{header|J}}==
<syntaxhighlight lang="j"> >@{.@> (#~ (=&#>@{.)@> * 2 < #@>)(</.~ {.@/:~@(|."0 1~ i.@#)L:0)cutLF fread'unixdict.txt'
apt
arc
ate</syntaxhighlight>
In other words, group words by their canonical rotation (from all rotations: the earliest, alphabetically), select groups with at least three different words, where the word count matches the letter count, then extract the first word from each group.
=={{header|Java}}==
{{trans|C++}}
<
import java.util.*;
Line 796 ⟶ 728:
return ch;
}
}</
{{out}}
Line 818 ⟶ 750:
Reading a local dictionary with the macOS JS for Automation library:
{{Works with|JXA}}
<
'use strict';
Line 975 ⟶ 907:
// MAIN ---
return main();
})();</
{{Out}}
<pre>arc -> car -> rca
Line 986 ⟶ 918:
Reading a local dictionary with the macOS JS for Automation library:
{{Works with|JXA}}
<
'use strict';
Line 1,125 ⟶ 1,057:
// MAIN ---
return main();
})();</
{{Out}}
<pre>arc -> rca -> car
Line 1,140 ⟶ 1,072:
`keys`; this slows it down a lot.
<
def read_teacup:
. as $in
Line 1,170 ⟶ 1,102:
# The task:
teacup_words
| read_teacup</
{{out}}
Invocation example: jq -nRc -f teacup-rim.jq unixdict.txt
Line 1,182 ⟶ 1,114:
=={{header|Julia}}==
Using the MIT 10000 word list, and excluding words of less than three letters, to reduce output length.
<
rotate(s, n) = String(circshift(Vector{UInt8}(s), n))
Line 1,197 ⟶ 1,129:
foreach(println, getteawords("https://www.mit.edu/~ecprice/wordlist.10000"))
</
<pre>
["aim", "ima", "mai"]
Line 1,211 ⟶ 1,143:
Using https://www.mit.edu/~ecprice/wordlist.10000 as per the Julia example.
<
const wc = new CS.System.Net.WebClient();
const lines = wc.DownloadString("http://wiki.puzzlers.org/pub/wordlists/unixdict.txt");
Line 1,240 ⟶ 1,172:
.filter(key => collection[key].length > 1)
.forEach(key => console.log("%s", collection[key].join(", ")));
</syntaxhighlight>
<pre>
apt, pta, tap
Line 1,248 ⟶ 1,180:
=={{header|Mathematica}}/{{header|Wolfram Language}}==
<
TeacuppableHelper[set_List] := Module[{f, s},
f = First[set];
Line 1,268 ⟶ 1,200:
s = s[[All, All, 1]];
s //= Select[StringLength[First[#]] <= Length[#] &];
Flatten[Teacuppable /@ s, 1]</
{{out}}
<pre>{{"apt", "pta", "tap"}, {"arc", "car", "rca"}, {"ate", "eat", "tea"}}</pre>
Line 1,274 ⟶ 1,206:
=={{header|Nim}}==
<
let words = collect(initHashSet, for word in "unixdict.txt".lines: {word})
Line 1,303 ⟶ 1,235:
w.rotate()
stdout.write " → ", w
echo()</
{{out}}
Line 1,312 ⟶ 1,244:
=={{header|Perl}}==
{{trans|Raku}}
<
use warnings;
use feature 'say';
Line 1,346 ⟶ 1,278:
}
say join ', ', uniqstr @$_ for sort @teacups;</
{{out}}
<pre>ARC, RCA, CAR
Line 1,356 ⟶ 1,288:
=={{header|Phix}}==
Filters anagram lists
<!--<
<span style="color: #008080;">procedure</span> <span style="color: #000000;">filter_set</span><span style="color: #0000FF;">(</span><span style="color: #004080;">sequence</span> <span style="color: #000000;">anagrams</span><span style="color: #0000FF;">)</span>
<span style="color: #000080;font-style:italic;">-- anagrams is a (small) set of words that are all anagrams of each other
Line 1,428 ⟶ 1,360:
--teacup(join_path({"demo","rosetta","words.txt"}),4,true)
-- Note that allow_mono is needed to display eg {"agag","gaga"}</span>
<!--</
{{out}}
<pre>
Line 1,438 ⟶ 1,370:
=={{header|PicoLisp}}==
<
(let W (chop W)
(unless (or (apply = W) (not (cddr W)))
Line 1,468 ⟶ 1,400:
Lst )
Lst ) ) )
Words ) )</
{{out}}
<pre>
Line 1,479 ⟶ 1,411:
=={{header|PureBasic}}==
<
dname:
Data.s "./Data/unixdict.txt"
Line 1,509 ⟶ 1,441:
bset="" : res="" : cw=0
Read.s dn
Wend</
{{out}}
<pre>apt pta tap
Line 1,526 ⟶ 1,458:
===Functional===
Composing generic functions, and considering only anagram groups.
<
from itertools import chain, groupby
Line 1,723 ⟶ 1,655:
# MAIN ---
if __name__ == '__main__':
main()</
{{Out}}
<pre>arc -> rca -> car
Line 1,740 ⟶ 1,672:
Defaults to unixdict.txt, minimum 3 characters and mono-character 'words' disallowed. Feed a file name to use a different word list, an integer to --min-chars and/or a truthy value to --mono to allow mono-chars.
<syntaxhighlight lang="raku"
unit sub MAIN ( $dict = 'unixdict.txt', :$min-chars = 3, :$mono = False );
Line 1,772 ⟶ 1,704:
}
say .unique.join(", ") for sort @teacups;</
{{out|Defaults}}
Command line: <tt>raku teacup.p6</tt>
Line 1,827 ⟶ 1,759:
The dictionary wasn't assumed to be sorted in any way.
<
parse arg iFID L . /*obtain optional arguments from the CL*/
if iFID==''|iFID=="," then iFID= 'wordlist.10k' /*Not specified? Then use the default.*/
Line 1,857 ⟶ 1,789:
end /*j*/
say
say cw ' circular words were found.' /*stick a fork in it, we're all done. */</
{{out|output|text= when using the default inputs:}}
<pre>
Line 1,870 ⟶ 1,802:
5 circular words were found.
</pre>
=={{header|Ruby}}==
"woordenlijst.txt" is a Dutch wordlist. It has 413125 words > 2 chars and takes about two minutes.
<syntaxhighlight lang="ruby">lists = ["unixdict.txt", "wordlist.10000", "woordenlijst.txt"]
lists.each do |list|
words = open(list).readlines( chomp: true).reject{|w| w.size < 3 }
grouped_by_size = words.group_by(&:size)
tea_words = words.filter_map do |word|
chars = word.chars
next unless chars.none?{|c| c < chars.first }
next if chars.uniq.size == 1
rotations = word.size.times.map {|i| chars.rotate(i).join }
rotations if rotations.all?{|rot| grouped_by_size[rot.size].include? rot }
end
puts "", list + ":"
tea_words.uniq(&:to_set).each{|ar| puts ar.join(", ") }
end
</syntaxhighlight>
{{out}}
<pre>
unixdict.txt:
apt, pta, tap
arc, rca, car
ate, tea, eat
wordlist.10000:
aim, ima, mai
arc, rca, car
asp, spa, pas
ate, tea, eat
ips, psi, sip
woordenlijst.txt:
ast, sta, tas
een, ene, nee
eer, ere, ree
</pre>
=={{header|Rust}}==
<
use std::collections::HashSet;
use std::fs::File;
Line 1,935 ⟶ 1,905:
Err(error) => eprintln!("Cannot open file {}: {}", &args[1], error),
}
}</
{{out}}
Line 1,954 ⟶ 1,924:
=={{header|Swift}}==
<
func loadDictionary(_ path: String) throws -> Set<String> {
Line 2,009 ⟶ 1,979:
} catch {
print(error)
}</
{{out}}
Line 2,022 ⟶ 1,992:
{{libheader|Wren-str}}
{{libheader|Wren-sort}}
<
import "./str" for Str
import "./sort" for Find
var readWords = Fn.new { |fileName|
Line 2,060 ⟶ 2,030:
}
System.print()
}</
{{out}}
Line 2,080 ⟶ 2,050:
=={{header|zkl}}==
<
// This is limited to the max items a Dictionary can hold
fcn teacut(wordFile){
Line 2,097 ⟶ 2,067:
}
}
}</
<
println("\nmit_wordlist_10000:"); teacut("mit_wordlist_10000.txt");</
{{out}}
<pre>
|