Tokenize a string: Difference between revisions

m
→‎{{header|Wren}}: Changed to Wren S/H
(Add min)
m (→‎{{header|Wren}}: Changed to Wren S/H)
 
(112 intermediate revisions by 62 users not shown)
Line 1:
{{task|String manipulation}}
[[Category:Simple]]
 
Separate the string "Hello,How,Are,You,Today" by commas into an array (or list) so that each element of it stores a different word.
 
Display the words to the 'user', in the simplest manner possible,
Display the words to the 'user', in the simplest manner possible, separated by a period.
 
To simplify, you may display a trailing period.
 
'''''Related tasks:'''''
 
{{Template:Strings}}
* [[Tokenize a string with escaping]]
<br><br>
 
=={{header|11l}}==
{{trans|Python}}
 
<syntaxhighlight lang="11l">V text = ‘Hello,How,Are,You,Today’
V tokens = text.split(‘,’)
print(tokens.join(‘.’))</syntaxhighlight>
 
{{out}}
<pre>
Hello.How.Are.You.Today
</pre>
 
=={{header|360 Assembly}}==
<langsyntaxhighlight lang="360asm">* Tokenize a string - 08/06/2018
TOKSTR CSECT
USING TOKSTR,R13 base register
Line 93 ⟶ 107:
PG DC CL80' ' buffer
YREGS
END TOKSTR</langsyntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
</pre>
 
=={{header|8080 Assembly}}==
<syntaxhighlight lang="8080asm">puts: equ 9
org 100h
jmp demo
;;; Split the string at DE by the character in C.
;;; Store pointers to the beginning of the elements starting at HL
;;; The amount of elements is returned in B.
split: mvi b,0 ; Amount of elements
sloop: mov m,e ; Store pointer at [HL]
inx h
mov m,d
inx h
inr b ; Increment counter
sscan: ldax d ; Get current character
inx d
cpi '$' ; Done?
rz ; Then stop
cmp c ; Place to split?
jnz sscan ; If not, keep going
dcx d
mvi a,'$' ; End the string here
stax d
inx d
jmp sloop ; Next part
;;; Test on the string given in the task
demo: lxi h,parts ; Parts array
lxi d,hello ; String
mvi c,','
call split ; Split the string
lxi h,parts ; Print each part
loop: mov e,m ; Load pointer into DE
inx h
mov d,m
inx h
push h ; Keep the array pointer
push b ; And the counter
mvi c,puts ; Print the string
call 5
lxi d,period ; And a period
mvi c,puts
call 5
pop b ; Restore the counter
pop h ; Restore the array pointer
dcr b ; One fewer string left
jnz loop
ret
period: db '. $'
hello: db 'Hello,How,Are,You,Today$'
parts: equ $</syntaxhighlight>
{{out}}
<pre>Hello. How. Are. You. Today.</pre>
=={{header|8086 Assembly}}==
<syntaxhighlight lang="asm"> cpu 8086
org 100h
section .text
jmp demo
;;; Split the string at DS:SI on the character in DL.
;;; Store pointers to strings starting at ES:DI.
;;; The amount of strings is returned in CX.
split: xor cx,cx ; Zero out counter
.loop: mov ax,si ; Store pointer to current location
stosw
inc cx ; Increment counter
.scan: lodsb ; Get byte
cmp al,'$' ; End of string?
je .done
cmp al,dl ; Character to split on?
jne .scan
mov [si-1],byte '$' ; Terminate string
jmp .loop
.done: ret
;;; Test on the string given in the task
demo: mov si,hello ; String to split
mov di,parts ; Place to store pointers
mov dl,',' ; Character to split string on
call split
;;; Print the resulting strings, and periods
mov si,parts ; Array of string pointers
print: lodsw ; Load next pointer
mov dx,ax ; Print string using DOS
mov ah,9
int 21h
mov dx,period ; Then print a period
int 21h
loop print ; Loop while there are strings
ret
section .data
period: db '. $'
hello: db 'Hello,How,Are,You,Today$'
section .bss
parts: resw 10</syntaxhighlight>
{{out}}
<pre>Hello. How. Are. You. Today. </pre>
 
=={{header|AArch64 Assembly}}==
{{works with|as|Raspberry Pi 3B version Buster 64 bits}}
<syntaxhighlight lang="aarch64 assembly">
/* ARM assembly AARCH64 Raspberry PI 3B */
/* program strTokenize64.s */
 
/*******************************************/
/* Constantes file */
/*******************************************/
/* for this file see task include a file in language AArch64 assembly*/
.include "../includeConstantesARM64.inc"
.equ NBPOSTESECLAT, 20
 
/*******************************************/
/* Initialized data */
/*******************************************/
.data
szMessFinal: .asciz "Words are : \n"
szString: .asciz "Hello,How,Are,You,Today"
szMessError: .asciz "Error tokenize !!\n"
szCarriageReturn: .asciz "\n"
/*******************************************/
/* UnInitialized data */
/*******************************************/
.bss
/*******************************************/
/* code section */
/*******************************************/
.text
.global main
main:
ldr x0,qAdrszString // string address
mov x1,',' // separator
bl stTokenize
cmp x0,-1 // error ?
beq 99f
mov x2,x0 // table address
ldr x0,qAdrszMessFinal // display message
bl affichageMess
ldr x4,[x2] // number of areas
add x2,x2,8 // first area
mov x3,0 // loop counter
mov x0,x2
1: // display loop
ldr x0,[x2,x3, lsl 3] // address area
bl affichageMess
ldr x0,qAdrszCarriageReturn // display carriage return
bl affichageMess
add x3,x3,1 // counter + 1
cmp x3,x4 // end ?
blt 1b // no -> loop
b 100f
99: // display error message
ldr x0,qAdrszMessError
bl affichageMess
100: // standard end of the program
mov x0,0 // return code
mov x8,EXIT // request to exit program
svc 0 // perform the system call
qAdrszString: .quad szString
//qAdrszFinalString: .quad szFinalString
qAdrszMessFinal: .quad szMessFinal
qAdrszMessError: .quad szMessError
qAdrszCarriageReturn: .quad szCarriageReturn
 
/*******************************************************************/
/* Separate string by separator into an array */
/* areas are store on the heap Linux */
/*******************************************************************/
/* x0 contains string address */
/* x1 contains separator character (, or . or : ) */
/* x0 returns table address with first item = number areas */
/* and other items contains pointer of each string */
stTokenize:
stp x1,lr,[sp,-16]! // save registers
mov x16,x0
mov x9,x1 // save separator
mov x14,0
1: // compute length string for place reservation on the heap
ldrb w12,[x0,x14]
cbz x12, 2f
add x14,x14,1
b 1b
2:
ldr x12,qTailleTable
add x15,x12,x14
and x15,x15,0xFFFFFFFFFFFFFFF0
add x15,x15,16 // align word on the heap
// place reservation on the heap
mov x0,0 // heap address
mov x8,BRK // call system linux 'brk'
svc 0 // call system
cmp x0,-1 // error call system
beq 100f
mov x14,x0 // save address heap begin = begin array
add x0,x0,x15 // reserve x15 byte on the heap
mov x8,BRK // call system linux 'brk'
svc 0
cmp x0,-1
beq 100f
// string copy on the heap
add x13,x14,x12 // behind the array
mov x0,x16
mov x1,x13
3: // loop copy string
ldrb w12,[x0],1 // read one byte and increment pointer one byte
strb w12,[x1],1 // store one byte and increment pointer one byte
cbnz x12,3b // end of string ? no -> loop
mov x0,#0
str x0,[x14]
str x13,[x14,8]
mov x12,#1 // areas counter
4: // loop load string character
ldrb w0,[x13]
cbz x0,5f // end string
cmp x0,x9 // separator ?
cinc x13,x13,ne // no -> next location
bne 4b // and loop
strb wzr,[x13] // store zero final of string
add x13,x13,1 // next character
add x12,x12,1 // areas counter + 1
str x13,[x14,x12, lsl #3] // store address area in the table at index x2
b 4b // and loop
5:
str x12,[x14] // store number areas
mov x0,x14 // returns array address
100:
ldp x1,lr,[sp],16 // restaur 2 registers
ret // return to address lr x30
qTailleTable: .quad 8 * NBPOSTESECLAT
 
/********************************************************/
/* File Include fonctions */
/********************************************************/
/* for this file see task include a file in language AArch64 assembly */
.include "../includeARM64.inc"
</syntaxhighlight>
{{Output}}
<pre>
Words are :
Hello
How
Are
You
Today
</pre>
 
=={{header|ACL2}}==
<langsyntaxhighlight lang="lisp">(defun split-at (xs delim)
(if (or (endp xs) (eql (first xs) delim))
(mv nil (rest xs))
Line 128 ⟶ 389:
(progn$ (cw (first strs))
(cw (coerce (list delim) 'string))
(print-with (rest strs) delim))))</langsyntaxhighlight>
 
{{out}}
<pre>&gt; (print-with (split-str "Hello,How,Are,You,Today" #\,) #\.)
Hello.How.Are.You.Today.</pre>
 
=={{header|Action!}}==
The user must type in the monitor the following command after compilation and before running the program!<pre>SET EndProg=*</pre>
{{libheader|Action! Tool Kit}}
<syntaxhighlight lang="action!">CARD EndProg ;required for ALLOCATE.ACT
 
INCLUDE "D2:ALLOCATE.ACT" ;from the Action! Tool Kit. You must type 'SET EndProg=*' from the monitor after compiling, but before running this program!
 
DEFINE PTR="CARD"
 
BYTE FUNC Split(CHAR ARRAY s CHAR c PTR ARRAY items)
BYTE i,count,start,len
CHAR ARRAY item
 
IF s(0)=0 THEN RETURN (0) FI
 
i=1 count=0
WHILE i<s(0)
DO
start=i
WHILE i<=s(0) AND s(i)#c
DO
i==+1
OD
len=i-start
item=Alloc(len+1)
SCopyS(item,s,start,i-1)
items(count)=item
count==+1
i==+1
OD
RETURN (count)
 
PROC Join(PTR ARRAY items BYTE count CHAR c CHAR ARRAY s)
BYTE i,pos
CHAR POINTER srcPtr,dstPtr
CHAR ARRAY item
 
s(0)=0
IF count=0 THEN RETURN FI
 
pos=1
FOR i=0 TO count-1
DO
item=items(i)
srcPtr=item+1
dstPtr=s+pos
MoveBlock(dstPtr,srcPtr,item(0))
pos==+item(0)
IF i<count-1 THEN
s(pos)='.
pos==+1
FI
OD
s(0)=pos-1
RETURN
 
PROC Clear(PTR ARRAY items BYTE POINTER count)
BYTE i
CHAR ARRAY item
 
IF count^=0 THEN RETURN FI
 
FOR i=0 TO count^-1
DO
item=items(i)
Free(item,item(0)+1)
OD
count^=0
RETURN
 
PROC Main()
CHAR ARRAY s="Hello,How,Are,You,Today"
CHAR ARRAY r(256)
PTR ARRAY items(100)
BYTE i,count
 
Put(125) PutE() ;clear screen
AllocInit(0)
count=Split(s,',,items)
Join(items,count,'.,r)
 
PrintF("Input:%E""%S""%E%E",s)
PrintE("Split:")
FOR i=0 TO count-1
DO
PrintF("""%S""",items(i))
IF i<count-1 THEN
Print(", ")
ELSE
PutE() PutE()
FI
OD
PrintF("Join:%E""%S""%E",r)
Clear(items,@count)
RETURN</syntaxhighlight>
{{out}}
[https://gitlab.com/amarok8bit/action-rosetta-code/-/raw/master/images/Tokenize_a_string.png Screenshot from Atari 8-bit computer]
<pre>
Input:
"Hello,How,Are,You,Today"
 
Split:
"Hello", "How", "Are", "You", "Today"
 
Join:
"Hello.How.Are.You.Today"
</pre>
 
=={{header|ActionScript}}==
<langsyntaxhighlight lang="actionscript">var hello:String = "Hello,How,Are,You,Today";
var tokens:Array = hello.split(",");
trace(tokens.join("."));
 
// Or as a one-liner
trace("Hello,How,Are,You,Today".split(",").join("."));</langsyntaxhighlight>
 
=={{header|Ada}}==
<syntaxhighlight lang="ada">with Ada.Text_IO, Ada.Containers.Indefinite_Vectors, Ada.Strings.Fixed, Ada.Strings.Maps;
<lang ada>
withuse Ada.Text_IO, Ada.Containers, Ada.Strings, Ada.Strings.Fixed, Ada.Strings.Indefinite_VectorsMaps;
 
use Ada.Text_IO, Ada.Containers;
procedure Tokenize is
package String_Vectors is new Indefinite_Vectors (Positive, String);
procedure tokenize is
use String_Vectors;
package String_Vector is new Indefinite_Vectors (Natural,String); use String_Vector;
s Input : String := "Hello,How,Are,You,Today" & ",";
current Start : Positive := sInput'First;
v Finish : Natural := Vector0;
Output : Vector := Empty_Vector;
begin
for iwhile inStart s<= Input'rangeLast loop
if s Find_Token (i)Input, =To_Set (','), orStart, iOutside, = s'last thenStart, Finish);
v.appendexit (swhen (currentStart ..> i-1))Finish;
currentOutput.Append :=(Input i(Start +.. 1Finish));
end if Start := Finish + 1;
end loop;
for sS of v loop put(s & "."); endOutput loop;
Put (S & ".");
end tokenize;
end loop;
</lang>
end Tokenize;</syntaxhighlight>
 
=={{header|ALGOL 68}}==
<langsyntaxhighlight lang="algol68">main:(
 
OP +:= = (REF FLEX[]STRING in out, STRING item)VOID:(
Line 212 ⟶ 585:
printf(($g"."$, string split(beetles, ", "),$l$));
printf(($g"."$, char split(beetles, ", "),$l$))
)</langsyntaxhighlight>
{{out}}
<pre>
Line 219 ⟶ 592:
</pre>
 
=={{header|Amazing Hopper}}==
 
Hopper provides instructions for separating and modifying tokens from a string.
Let "s" be a string; "n" token number:
 
1) {n}, $(s) ==> gets token "n" from string "s".
 
2) {"word", n} $$(s) ==> replace token "n" of "s", with "word".
 
Note: the "splitnumber" macro cannot separate a number converted to a string by the "XTOSTR" function, because this function "rounds" the number to the decimal position by default.
 
<syntaxhighlight lang="hopper">
#include <hopper.h>
 
#proto splitdate(_DATETIME_)
#proto splitnumber(_N_)
#proto split(_S_,_T_)
 
main:
s="this string will be separated into parts with space token separator"
aS=0,let( aS :=_split(s," "))
{","}toksep // set a new token separator
{"String: ",s}
{"\nArray:\n",aS},
{"\nSize="}size(aS),println // "size" return an array: {dims,#rows,#cols,#pages}
{"\nOriginal number: ",-125.489922},println
w=0,let(w:=_split number(-125.489922) )
{"Integer part: "}[1]get(w) // get first element from array "w"
{"\nDecimal part: "}[2]get(w),println // get second element from array "w"
{"\nDate by DATENOW(TODAY) macro: "},print
dt=0, let( dt :=_splitdate(datenow(TODAY);!puts)) // "!" keep first element from stack
{"\nDate: "}[1]get(dt)
{"\nTime: "}[2]get(dt),println
 
exit(0)
 
.locals
splitdate(_DATETIME_)
_SEP_=0,gettoksep,mov(_SEP_) // "gettoksep" return actual token separator
{","}toksep, // set a new token separator
_NEWARRAY_={}
{1},$( _DATETIME_ ),
{2},$( _DATETIME_ ),pushall(_NEWARRAY_)
{_SEP_}toksep // restore ols token separator
{_NEWARRAY_}
back
 
splitnumber(_X_)
part_int=0,part_dec=0,
{_X_},!trunc,mov(part_int),
minus(part_int), !sign,mul
xtostr,mov(part_dec), part_dec+=2, // "part_dec+=2", delete "0." from "part_dec"
{part_dec}xtonum,mov(part_dec)
_NEWARRAY_={},{part_int,part_dec},pushall(_NEWARRAY_)
{_NEWARRAY_}
back
 
split(_S_,_T_)
_NEWARRAY_={},_VAR1_=0,_SEP_=0,gettoksep,mov(_SEP_)
{_T_}toksep,totaltoken(_S_),
mov(_VAR1_), // for total tokens
_VAR2_=1, // for real position of tokens into the string
___SPLIT_ITER:
{_VAR2_}$( _S_ ),push(_NEWARRAY_)
++_VAR2_,--_VAR1_
{ _VAR1_ },jnz(___SPLIT_ITER) // jump to "___SPLIT_ITER" if "_VAR1_" is not zero.
clear(_VAR2_),clear(_VAR1_)
{_SEP_}toksep
{_NEWARRAY_}
back
 
</syntaxhighlight>
{{Out}}
<pre>Output:
 
String: this string will be separated into parts with space token separator
Array:
this,string,will,be,separated,into,parts,with,space,token,separator
Size=1,11
 
Original number: -125.49
Integer part: -125
Decimal part: 489922
 
Date by DATENOW(TODAY) macro: 22/11/2021,18:41:20:13
Date: 22/11/2021
Time: 18:41:20:13
 
</pre>
 
=={{header|APL}}==
<syntaxhighlight lang="apl"> '.',⍨¨ ','(≠⊆⊢)'abc,123,X' ⍝ [1] Do the split: ','(≠⊆⊢)'abc,123,X'; [2] append the periods: '.',⍨¨
abc. 123. X. ⍝ 3 strings (char vectors), each with a period at the end.
</syntaxhighlight>
 
=={{header|AppleScript}}==
 
<lang AppleScript>on run {}
<syntaxhighlight lang="applescript">on run
intercalate(".", splitOn(",", "Hello,How,Are,You,Today"))
end run
 
 
-- splitOn :: String -> String -> [String]
on splitOn(strDelim, strMain)
Line 233 ⟶ 706:
return lstParts
end splitOn
 
-- intercalate :: String -> [String] -> String
on intercalate(strText, lstText)
Line 240 ⟶ 713:
set my text item delimiters to dlm
return strJoined
end intercalate</langsyntaxhighlight>
{{Out}}
<pre>Hello.How.Are.You.Today</pre>
 
Or,
 
<syntaxhighlight lang="applescript">set my text item delimiters to ","
set tokens to the text items of "Hello,How,Are,You,Today"
 
set my text item delimiters to "."
log tokens as text</syntaxhighlight>
 
{{Out}}
 
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|ARM Assembly}}==
{{works with|as|Raspberry Pi}}
<syntaxhighlight lang="arm assembly">
<lang ARM Assembly>
 
/* ARM assembly Raspberry PI */
Line 411 ⟶ 895:
bx lr
</syntaxhighlight>
</lang>
 
=={{header|Arturo}}==
<syntaxhighlight lang="rebol">str: "Hello,How,Are,You,Today"
 
print join.with:"." split.by:"," str</syntaxhighlight>
 
{{out}}
 
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Astro}}==
<langsyntaxhighlight lang="python">let text = 'Hello,How,Are,You,Today'
let tokens = text.split(||,||)
print tokens.join(with: '.')</langsyntaxhighlight>
 
=={{header|AutoHotkey}}==
<langsyntaxhighlight AutoHotkeylang="autohotkey">string := "Hello,How,Are,You,Today"
stringsplit, string, string, `,
loop, % string0
{
msgbox % string%A_Index%
}</langsyntaxhighlight>
 
=={{header|AWK}}==
 
<langsyntaxhighlight lang="awk">BEGIN {
s = "Hello,How,Are,You,Today"
split(s, arr, ",")
Line 435 ⟶ 928:
}
print
}</langsyntaxhighlight>
 
A more ''idiomatic'' way for AWK is
 
<langsyntaxhighlight lang="awk">BEGIN { FS = "," }
{
for(i=1; i <= NF; i++) printf $i ".";
print ""
}</langsyntaxhighlight>
 
which "tokenize" each line of input and this is achieved by using "," as field separator
Line 449 ⟶ 942:
=={{header|BASIC}}==
==={{header|Applesoft BASIC}}===
<langsyntaxhighlight ApplesoftBasiclang="applesoftbasic">100 T$ = "HELLO,HOW,ARE,YOU,TODAY"
110 GOSUB 200"TOKENIZE
120 FOR I = 1 TO N
Line 468 ⟶ 961:
290 A$(N) = A$(N) + C$
300 NEXT TI
310 RETURN</langsyntaxhighlight>
 
==={{header|BaCon}}===
BaCon includes extensive support for ''delimited strings''.
<syntaxhighlight lang="bacon">OPTION BASE 1
<lang freebasic>' Tokenize a string
OPTION BASE 1
 
string$ = "Hello,How,Are,You,Today"
READ csv$
DATA "Hello,How,Are,You,Today"
 
' Tokenize a string into an array
SPLIT csv$ BY "," TO elements$ SIZE n
SPLIT string$ BY "," TO array$
 
' Print array elements with new delimiter
FOR i = 1 TO n
PRINT COIL$(i, UBOUND(array$), array$[i], ".")
PRINT elements$[i] FORMAT "%s"
 
IF i < n THEN PRINT "." FORMAT "%s"
' Or simply replace the delimiter
NEXT
PRINT DELIM$(string$, ",", ".")</syntaxhighlight>
PRINT</lang>
 
{{out}}
<pre>prompt$ ./tokenize
Hello.How.Are.You.Today
Hello.How.Are.You.Today</pre>
 
==={{header|BASIC256}}===
<syntaxhighlight lang="basic256">instring$ = "Hello,How,Are,You,Today"
 
tokens$ = explode(instring$,",")
for i = 0 to tokens$[?]-1
print tokens$[i]; ".";
next i
end</syntaxhighlight>
 
 
==={{header|BBC BASIC}}===
{{works with|BBC BASIC for Windows}}
<langsyntaxhighlight lang="bbcbasic"> INSTALL @lib$+"STRINGLIB"
text$ = "Hello,How,Are,You,Today"
Line 499 ⟶ 1,002:
PRINT array$(i%) "." ;
NEXT
PRINT</langsyntaxhighlight>
 
==={{header|Chipmunk Basic}}===
Solutions [[#Applesoft BASIC|Applesoft BASIC]] and [[#Commodore BASIC|Commodore BASIC]] work without changes.
 
==={{header|Commodore BASIC}}===
Based on the AppleSoft BASIC version.
<langsyntaxhighlight lang="commodorebasic">10 REM TOKENIZE A STRING ... ROSETTACODE.ORG
10 REM TOKENIZE A STRING ... ROSETTACODE.ORG
20 T$ = "HELLO,HOW,ARE,YOU,TODAY"
30 GOSUB 200, TOKENIZE
Line 520 ⟶ 1,025:
260 N = N + 1
270 NEXT L
280 RETURN</syntaxhighlight>
 
</lang>
==={{header|FreeBASIC}}===
<syntaxhighlight lang="freebasic">sub tokenize( instring as string, tokens() as string, sep as string )
redim tokens(0 to 0) as string
dim as string*1 ch
dim as uinteger t=0
for i as uinteger = 1 to len(instring)
ch = mid(instring,i,1)
if ch = sep then
t = t + 1
redim preserve tokens(0 to t)
else
tokens(t) = tokens(t) + ch
end if
next i
return
end sub
 
dim as string instring = "Hello,How,Are,You,Today"
redim as string tokens(-1)
tokenize( instring, tokens(), "," )
for i as uinteger = 0 to ubound(tokens)
print tokens(i);".";
next i</syntaxhighlight>
 
==={{header|Liberty BASIC}}===
<langsyntaxhighlight lang="lb">'Note that Liberty Basic's array usage can reach element #10 before having to DIM the array
For i = 0 To 4
array$(i) = Word$("Hello,How,Are,You,Today", (i + 1), ",")
Line 529 ⟶ 1,058:
Next i
 
Print Left$(array$, (Len(array$) - 1))</langsyntaxhighlight>
 
==={{header|PowerBASICMSX Basic}}===
The [[#Commodore BASIC|Commodore BASIC]] solution works without any changes.
 
==={{header|PowerBASIC}}===
PowerBASIC has a few keywords that make parsing strings trivial: <code>PARSE</code>, <code>PARSE$</code>, and <code>PARSECOUNT</code>. (<code>PARSE$</code>, not shown here, is for extracting tokens one at a time, while <code>PARSE</code> extracts all tokens at once into an array. <code>PARSECOUNT</code> returns the number of tokens found.)
 
<langsyntaxhighlight lang="powerbasic">FUNCTION PBMAIN () AS LONG
DIM parseMe AS STRING
parseMe = "Hello,How,Are,You,Today"
Line 549 ⟶ 1,080:
 
MSGBOX outP
END FUNCTION</langsyntaxhighlight>
 
==={{header|PureBasic}}===
 
'''As described
<langsyntaxhighlight PureBasiclang="purebasic">NewList MyStrings.s()
 
For i=1 To 5
Line 563 ⟶ 1,094:
ForEach MyStrings()
Print(MyStrings()+".")
Next</langsyntaxhighlight>
 
'''Still, easier would be
<langsyntaxhighlight PureBasiclang="purebasic">Print(ReplaceString("Hello,How,Are,You,Today",",","."))</langsyntaxhighlight>
 
==={{header|QBasic}}===
<langsyntaxhighlight lang="qbasic">DIM parseMe AS STRING
parseMe = "Hello,How,Are,You,Today"
 
Line 613 ⟶ 1,144:
PRINT "."; parsed(L0);
NEXT
END IF</langsyntaxhighlight>
 
==={{header|Run BASIC}}===
<langsyntaxhighlight lang="runbasic">text$ = "Hello,How,Are,You,Today"
FOR i = 1 to 5
textArray$(i) = word$(text$,i,",")
print textArray$(i);" ";
NEXT</langsyntaxhighlight>
 
==={{header|VBScript}}===
====One liner====
<langsyntaxhighlight lang="vb">WScript.Echo Join(Split("Hello,How,Are,You,Today", ","), ".")</langsyntaxhighlight>
 
In fact, the Visual Basic solution (below) could have done the same, as Join() is available.
Line 633 ⟶ 1,164:
Unlike PowerBASIC, there is no need to know beforehand how many tokens are in the string -- <code>Split</code> automagically builds the array for you.
 
<langsyntaxhighlight lang="vb">Sub Main()
Dim parseMe As String, parsed As Variant
parseMe = "Hello,How,Are,You,Today"
Line 646 ⟶ 1,177:
 
MsgBox outP
End Sub</langsyntaxhighlight>
 
=={{header|Batch File}}==
<langsyntaxhighlight lang="dos">@echo off
setlocal enabledelayedexpansion
call :tokenize %1 res
Line 660 ⟶ 1,191:
for %%i in (%str%) do set %2=!%2!.%%i
set %2=!%2:~1!
goto :eof</langsyntaxhighlight>
 
''Demo''
>tokenize.cmd "Hello,How,Are,You,Today"
Hello.How.Are.You.Today
 
=={{header|BQN}}==
Uses a splitting idiom from bqncrate.
<syntaxhighlight lang="bqn">Split ← (+`׬)⊸-∘= ⊔ ⊢
 
∾⟜'.'⊸∾´ ',' Split "Hello,How,Are,You,Today"</syntaxhighlight>
{{out}}
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|Bracmat}}==
Solution that employs string pattern matching to spot the commas
<langsyntaxhighlight lang="bracmat">( "Hello,How,Are,You,Today":?String
& :?ReverseList
& whl
Line 680 ⟶ 1,219:
)
& out$!List
)</langsyntaxhighlight>
Solution that starts by evaluating the input and employs the circumstance that the comma is a list constructing binary operator and that the string does not contain any other characters that are interpreted as operators on evaluation.
<langsyntaxhighlight lang="bracmat">( get$("Hello,How,Are,You,Today",MEM):?CommaseparatedList
& :?ReverseList
& whl
Line 694 ⟶ 1,233:
)
& out$!List
)</langsyntaxhighlight>
 
=={{header|C}}==
Line 703 ⟶ 1,242:
This example uses the ''strtok()'' function to separate the tokens. This function is destructive (replacing token separators with '\0'), so we have to make a copy of the string (using ''strdup()'') before tokenizing. ''strdup()'' is not part of [[ANSI C]], but is available on most platforms. It can easily be implemented with a combination of ''strlen()'', ''malloc()'', and ''strcpy()''.
 
<langsyntaxhighlight lang="c">#include<string.h>
#include<stdio.h>
#include<stdlib.h>
Line 724 ⟶ 1,263:
 
return 0;
}</langsyntaxhighlight>
 
Another way to accomplish the task without the built-in string functions is to temporarily modify the separator character. This method does not need any additional memory, but requires the input string to be writeable.
<langsyntaxhighlight lang="c">#include<stdio.h>
 
typedef void (*callbackfunc)(const char *);
Line 752 ⟶ 1,291:
tokenize(array, ',', doprint);
return 0;
}</langsyntaxhighlight>
 
=={{header|C sharp|C#}}==
<langsyntaxhighlight lang="csharp">string str = "Hello,How,Are,You,Today";
// or Regex.Split ( "Hello,How,Are,You,Today", "," );
// (Regex is in System.Text.RegularExpressions namespace)
string[] strings = str.Split(',');
Console.WriteLine(String.Join(".", sstrings));
</syntaxhighlight>
</lang>
 
=={{header|C++}}==
Line 767 ⟶ 1,306:
std::getline() is typically used to tokenize strings on a single-character delimiter
 
<langsyntaxhighlight lang="cpp">#include <string>
#include <sstream>
#include <vector>
Line 782 ⟶ 1,321:
copy(v.begin(), v.end(), std::ostream_iterator<std::string>(std::cout, "."));
std::cout << '\n';
}</langsyntaxhighlight>
 
{{works with|C++98}}
C++ allows the user to redefine what is considered whitespace. If the delimiter is whitespace, tokenization becomes effortless.
 
<langsyntaxhighlight lang="cpp">#include <string>
#include <locale>
#include <sstream>
Line 811 ⟶ 1,350:
copy(v.begin(), v.end(), std::ostream_iterator<std::string>(std::cout, "."));
std::cout << '\n';
}</langsyntaxhighlight>
 
{{works with|C++98}}
Line 817 ⟶ 1,356:
The boost library has multiple options for easy tokenization.
 
<langsyntaxhighlight lang="cpp">#include <string>
#include <vector>
#include <iterator>
Line 830 ⟶ 1,369:
copy(v.begin(), v.end(), std::ostream_iterator<std::string>(std::cout, "."))
std::cout << '\n';
}</langsyntaxhighlight>
 
{{works with|C++23}}
C++20 and C++23 drastically improve the ergonomics of simple manipulation of ranges.
 
<syntaxhighlight lang="cpp">#include <string>
#include <ranges>
#include <iostream>
int main() {
std::string s = "Hello,How,Are,You,Today";
s = s // Assign the final string back to the string variable
| std::views::split(',') // Produce a range of the comma separated words
| std::views::join_with('.') // Concatenate the words into a single range of characters
| std::ranges::to<std::string>(); // Convert the range of characters into a regular string
std::cout << s;
}</syntaxhighlight>
 
=={{header|Ceylon}}==
{{works with|Ceylon 1.2}}
<langsyntaxhighlight lang="ceylon">shared void tokenizeAString() {
value input = "Hello,How,Are,You,Today";
value tokens = input.split(','.equals);
print(".".join(tokens));
}</langsyntaxhighlight>
 
=={{header|CFEngine}}==
<syntaxhighlight lang="cfengine">bundle agent main
{
reports:
"${with}" with => join(".", splitstring("Hello,How,Are,You,Today", ",", 99));
}
</syntaxhighlight>
{{out}}
<pre>cf-agent -KIf ./tokenize-a-string.cf
R: Hello.How.Are.You.Today</pre>
 
See https://docs.cfengine.com/docs/master/reference-functions.html for a complete list of available functions.
 
=={{header|Clojure}}==
Using native Clojure functions and Java Interop:
<syntaxhighlight lang="clojure">(apply str (interpose "." (.split #"," "Hello,How,Are,You,Today")))</syntaxhighlight>
 
Using the clojure.string library:
<syntaxhighlight lang="clojure">(clojure.string/join "." (clojure.string/split "Hello,How,Are,You,Today" #","))</syntaxhighlight>
 
=={{header|CLU}}==
<syntaxhighlight lang="clu">% This iterator splits the string on a given character,
% and returns each substring in order.
tokenize = iter (s: string, c: char) yields (string)
while ~string$empty(s) do
next: int := string$indexc(c, s)
if next = 0 then
yield(s)
break
else
yield(string$substr(s, 1, next-1))
s := string$rest(s, next+1)
end
end
end tokenize
 
start_up = proc ()
po: stream := stream$primary_output()
str: string := "Hello,How,Are,You,Today"
for part: string in tokenize(str, ',') do
stream$putl(po, part || ".")
end
end start_up</syntaxhighlight>
{{out}}
<pre>Hello.
How.
Are.
You.
Today.</pre>
 
=={{header|COBOL}}==
This can be made to handle more complex cases; UNSTRING allows multiple delimiters, capture of which delimiter was used for each field, a POINTER for starting position (set on ending), along with match TALLYING.
 
<syntaxhighlight lang="cobol">
<lang COBOL>
identification division.
program-id. tokenize.
Line 874 ⟶ 1,479:
goback.
end program tokenize.
</syntaxhighlight>
</lang>
 
{{out}}
Line 884 ⟶ 1,489:
=={{header|CoffeeScript}}==
 
<langsyntaxhighlight lang="coffeescript">
arr = "Hello,How,Are,You,Today".split ","
console.log arr.join "."
</syntaxhighlight>
</lang>
 
=={{header|ColdFusion}}==
=== Classic tag based CFML ===
<langsyntaxhighlight lang="cfm">
<cfoutput>
<cfset wordListTag = "Hello,How,Are,You,Today">
#Replace( wordListTag, ",", ".", "all" )#
</cfoutput>
</syntaxhighlight>
</lang>
{{Output}}
<pre>
Line 903 ⟶ 1,508:
 
=== Script Based CFML ===
<langsyntaxhighlight lang="cfm"><cfscript>
wordList = "Hello,How,Are,You,Today";
splitList = replace( wordList, ",", ".", "all" );
writeOutput( splitList );
</cfscript></langsyntaxhighlight>
{{Output}}
<pre>
Line 917 ⟶ 1,522:
There are libraries out there that handle splitting (e.g., [http://www.cliki.net/SPLIT-SEQUENCE SPLIT-SEQUENCE], and the more-general [http://weitz.de/cl-ppcre/ CL-PPCRE]), but this is a simple one-off, too. When the words are written with write-with-periods, there is no final period after the last word.
 
<langsyntaxhighlight lang="lisp">(defun comma-split (string)
(loop for start = 0 then (1+ finish)
for finish = (position #\, string :start start)
Line 924 ⟶ 1,529:
 
(defun write-with-periods (strings)
(format t "~{~A~^.~}" strings))</langsyntaxhighlight>
 
=={{header|ClojureCowgol}}==
<syntaxhighlight lang="cowgol">include "cowgol.coh";
Using native Clojure functions and Java Interop:
include "strings.coh";
<lang clojure>(apply str (interpose "." (.split #"," "Hello,How,Are,You,Today")))</lang>
 
# Tokenize a string. Note: the string is modified in place.
Using the clojure.string library:
sub tokenize(sep: uint8, str: [uint8], out: [[uint8]]): (length: intptr) is
<lang clojure>(clojure.string/join "." (clojure.string/split "Hello,How,Are,You,Today" #","))</lang>
length := 0;
loop
[out] := str;
out := @next out;
length := length + 1;
while [str] != 0 and [str] != sep loop
str := @next str;
end loop;
if [str] == sep then
[str] := 0;
str := @next str;
else
break;
end if;
end loop;
end sub;
 
# The string
var string: [uint8] := "Hello,How,Are,You,Today";
 
# Make a mutable copy
var buf: uint8[64];
CopyString(string, &buf[0]);
 
# Tokenize the copy
var parts: [uint8][64];
var length := tokenize(',', &buf[0], &parts[0]) as @indexof parts;
 
# Print each string
var i: @indexof parts := 0;
while i < length loop
print(parts[i]);
print(".\n");
i := i + 1;
end loop;</syntaxhighlight>
{{out}}
<pre>Hello.
How.
Are.
You.
Today.</pre>
 
=={{header|Crystal}}==
<syntaxhighlight lang="crystal">puts "Hello,How,Are,You,Today".split(',').join('.')</syntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|D}}==
<langsyntaxhighlight lang="d">void main() {
import std.stdio, std.string;
 
"Hello,How,Are,You,Today".split(',').join('.').writeln;
}</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Delphi}}==
=== Using String.split ===
{{libheader| System.SysUtils}}
<syntaxhighlight lang="delphi">
program Tokenize_a_string;
 
{$APPTYPE CONSOLE}
 
uses
System.SysUtils;
 
var
Words: TArray<string>;
 
begin
Words := 'Hello,How,Are,You,Today'.Split([',']);
Writeln(string.Join(#10, Words));
 
Readln;
end.
 
</syntaxhighlight>
 
=== Using TStringList ===
<lang Delphi>
<syntaxhighlight lang="delphi">
program TokenizeString;
 
Line 977 ⟶ 1,650:
 
end.
</syntaxhighlight>
</lang>
 
The result is:
 
<syntaxhighlight lang="delphi">
<lang Delphi>
Hello
How
Line 987 ⟶ 1,660:
You
Today
</syntaxhighlight>
</lang>
 
=={{header|dt}}==
<syntaxhighlight lang="dt">"Hello,How,Are,You,Today" "," split "." join pl</syntaxhighlight>
 
=={{header|Dyalect}}==
<syntaxhighlight lang="dyalect">var str = "Hello,How,Are,You,Today"
var strings = str.Split(',')
print(values: strings, separator: ".")</syntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Déjà Vu}}==
<langsyntaxhighlight lang="dejavu">!print join "." split "Hello,How,Are,You,Today" ","</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|E}}==
<langsyntaxhighlight lang="e">".".rjoin("Hello,How,Are,You,Today".split(","))</langsyntaxhighlight>
 
=={{header|EasyLang}}==
<syntaxhighlight lang="easylang">
s$ = "Hello,How,Are,You,Today"
a$[] = strsplit s$ ","
for s$ in a$[]
write s$ & "."
.
</syntaxhighlight>
 
=={{header|Elena}}==
ELENA 46.x:
<langsyntaxhighlight lang="elena">import system'routines;
import extensions;
public program()
{
varauto string := "Hello,How,Are,You,Today";
string.splitBy:(",").forEach::(s)
{
console.print(s,".")
}
}</langsyntaxhighlight>
 
=={{header|Elixir}}==
<langsyntaxhighlight lang="elixir">
tokens = String.split("Hello,How,Are,You,Today", ",")
IO.puts Enum.join(tokens, ".")
</syntaxhighlight>
</lang>
 
=={{header|EMal}}==
<syntaxhighlight lang="emal">
text value = "Hello,How,Are,You,Today"
List tokens = value.split(",")
writeLine(tokens.join("."))
# single line version
writeLine("Hello,How,Are,You,Today".split(",").join("."))
</syntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
Hello.How.Are.You.Today
</pre>
 
=={{header|Erlang}}==
<langsyntaxhighlight lang="erlang">-module(tok).
-export([start/0]).
 
Line 1,025 ⟶ 1,731:
Lst = string:tokens("Hello,How,Are,You,Today",","),
io:fwrite("~s~n", [string:join(Lst,".")]),
ok.</langsyntaxhighlight>
 
=={{header|Euphoria}}==
<langsyntaxhighlight lang="euphoria">function split(sequence s, integer c)
sequence out
integer first, delim
Line 1,049 ⟶ 1,755:
for i = 1 to length(s) do
puts(1, s[i] & ',')
end for</langsyntaxhighlight>
 
 
=={{header|F_Sharp|F#}}==
<langsyntaxhighlight lang="fsharp">System.String.Join(".", "Hello,How,Are,You,Today".Split(','))</langsyntaxhighlight>
 
=={{header|Factor}}==
<langsyntaxhighlight lang="factor">"Hello,How,Are,You,Today" "," split "." join print</langsyntaxhighlight>
 
 
=={{header|Falcon}}==
'''VBA/Python programmer's approach to this solution, not sure if it's the most falconic way'''
<langsyntaxhighlight lang="falcon">
/* created by Aykayayciti Earl Lamont Montgomery
April 9th, 2018 */
Line 1,075 ⟶ 1,779:
 
> b
</syntaxhighlight>
</lang>
{{out}}
<pre>
Line 1,086 ⟶ 1,790:
A string can be split on a given character, returning a list of the intervening strings.
 
<langsyntaxhighlight lang="fantom">
class Main
{
Line 1,099 ⟶ 1,803:
}
}
</syntaxhighlight>
</lang>
 
=={{header|Fennel}}==
{{trans|Lua}}
<syntaxhighlight lang="fennel">(fn string.split [self sep]
(let [pattern (string.format "([^%s]+)" sep)
fields {}]
(self:gsub pattern (fn [c] (tset fields (+ 1 (length fields)) c)))
fields))
 
(let [str "Hello,How,Are,You,Today"]
(print (table.concat (str:split ",") ".")))</syntaxhighlight>
 
=={{header|Forth}}==
There is no standard string split routine, but it is easily written. The results are saved temporarily to the dictionary.
 
<langsyntaxhighlight lang="forth">: split ( str len separator len -- tokens count )
here >r 2swap
begin
Line 1,121 ⟶ 1,836:
1 ?do dup 2@ type ." ." cell+ cell+ loop 2@ type ;
 
s" Hello,How,Are,You,Today" s" ," split .tokens \ Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Fortran}}==
{{works with|Fortran|90 and later}}
<langsyntaxhighlight lang="fortran">PROGRAM Example
 
CHARACTER(23) :: str = "Hello,How,Are,You,Today"
Line 1,147 ⟶ 1,862:
END DO
END PROGRAM Example</langsyntaxhighlight>
 
=={{header|Frink}}==
<langsyntaxhighlight lang="frink">
println[join[".", split[",", "Hello,How,Are,You,Today"]]]
</syntaxhighlight>
</lang>
 
=={{header|FutureBasic}}==
<syntaxhighlight lang="futurebasic">
window 1, @"Tokenize a string"
 
void local fn DoIt
CFStringRef string = @"Hello,How,Are,You,Today"
CFArrayRef tokens = fn StringComponentsSeparatedByString( string, @"," )
print fn ArrayComponentsJoinedByString( tokens, @"." )
end fn
 
fn DoIt
 
HandleEvents
</syntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
</pre>
 
=={{header|Gambas}}==
'''[https://gambas-playground.proko.eu/?gist=218e240236cdf1419a405abfed906ed3 Click this link to run this code]'''
<langsyntaxhighlight lang="gambas">Public Sub Main()
Dim sString As String[] = Split("Hello,How,Are,You,Today")
 
Print sString.Join(".")
 
End</langsyntaxhighlight>
Output:
<pre>
Line 1,168 ⟶ 1,902:
 
=={{header|GAP}}==
<langsyntaxhighlight lang="gap">SplitString("Hello,How,Are,You,Today", ",");
# [ "Hello", "How", "Are", "You", "Today" ]
 
JoinStringsWithSeparator(last, ".");
# "Hello.How.Are.You.Today"</langsyntaxhighlight>
 
=={{header|Genie}}==
<syntaxhighlight lang="genie">[indent=4]
 
init
str:string = "Hello,How,Are,You,Today"
words:array of string[] = str.split(",")
joined:string = string.joinv(".", words)
print joined</syntaxhighlight>
 
{{out}}
<pre>prompt$ valac tokenize.gs
prompt$ ./tokenize
Hello.How.Are.You.Today</pre>
 
=={{header|Go}}==
<langsyntaxhighlight lang="go">package main
 
import (
Line 1,185 ⟶ 1,933:
s := "Hello,How,Are,You,Today"
fmt.Println(strings.Join(strings.Split(s, ","), "."))
}</langsyntaxhighlight>
 
=={{header|Groovy}}==
<langsyntaxhighlight lang="groovy">println 'Hello,How,Are,You,Today'.split(',').join('.')</langsyntaxhighlight>
 
=={{header|Haskell}}==
'''Using Data.Text'''
 
<langsyntaxhighlight lang="haskell">{-# OPTIONS_GHC -XOverloadedStrings #-}
import Data.Text (splitOn,intercalate)
import qualified Data.Text.IO as T (putStrLn)
 
main = T.putStrLn . intercalate "." $ splitOn "," "Hello,How,Are,You,Today"</langsyntaxhighlight>
 
Output: Hello.How.Are.You.Today
Line 1,205 ⟶ 1,953:
The necessary operations are unfortunately not in the standard library (yet), but simple to write:
 
<langsyntaxhighlight lang="haskell">splitBy :: (a -> Bool) -> [a] -> [[a]]
splitBy _ [] = []
splitBy f list = first : splitBy f (dropWhile f rest) where
Line 1,220 ⟶ 1,968:
-- using regular expression to split:
import Text.Regex
putStrLn $ joinWith "." $ splitRegex (mkRegex ",") $ "Hello,How,Are,You,Today"</langsyntaxhighlight>
 
Tokenizing can also be realized by using unfoldr and break:
<langsyntaxhighlight Haskelllang="haskell">*Main> mapM_ putStrLn $ takeWhile (not.null) $ unfoldr (Just . second(drop 1). break (==',')) "Hello,How,Are,You,Today"
Hello
How
Are
You
Today</langsyntaxhighlight>
* You need to import the modules Data.List and Control.Arrow
 
Line 1,234 ⟶ 1,982:
 
=={{header|HicEst}}==
<langsyntaxhighlight lang="hicest">CHARACTER string="Hello,How,Are,You,Today", list
 
nWords = INDEX(string, ',', 256) + 1
Line 1,246 ⟶ 1,994:
DO i = 1, nWords
WRITE(APPend) TRIM(CHAR(i, maxWordLength, list)), '.'
ENDDO</langsyntaxhighlight>
 
=={{header|Icon}} and {{header|Unicon}}==
<langsyntaxhighlight lang="icon">procedure main()
A := []
"Hello,How,Are,You,Today" ? while put(A, 1(tab(upto(',')|0),=",")){
while put(A, 1(tab(upto(',')),=","))
put(A,tab(0))
}
every writes(!A,".")
write()
end</langsyntaxhighlight>
 
{{out}}
<pre>
->ss
Hello.How.Are.You.Today.
->
</pre>
 
A Unicon-specific solution is:
<syntaxhighlight lang="unicon">import util
 
procedure main()
A := stringToList("Hello,How,Are,You,Today", ',')
every writes(!A,".")
write()
end</syntaxhighlight>
 
One wonders what the expected output should be with the input string ",,,,".
 
=={{header|Io}}==
<langsyntaxhighlight lang="io">"Hello,How,Are,You,Today" split(",") join(".") println</langsyntaxhighlight>
 
=={{header|J}}==
<langsyntaxhighlight lang="j"> s=: 'Hello,How,Are,You,Today'
] t=: <;._1 ',',s
+-----+---+---+---+-----+
Line 1,276 ⟶ 2,038:
 
'.' (I.','=s)}s NB. two steps combined
Hello.How.Are.You.Today</langsyntaxhighlight>
 
Alternatively using the system library/script <tt>strings</tt>
<langsyntaxhighlight lang="j"> require 'strings'
',' splitstring s
+-----+---+---+---+-----+
Line 1,286 ⟶ 2,048:
 
'.' joinstring ',' splitstring s
Hello.How.Are.You.Today</langsyntaxhighlight>
 
<tt>splitstring</tt> and <tt>joinstring</tt> also work with longer "delimiters":
<langsyntaxhighlight lang="j"> '"'([ ,~ ,) '","' joinstring ',' splitstring s
"Hello","How","Are","You","Today"</langsyntaxhighlight>
 
But, of course, this could be solved with simple string replacement:
 
<langsyntaxhighlight Jlang="j"> rplc&',.' s
Hello.How.Are.You.Today</langsyntaxhighlight>
 
The task asks us to ''Separate the string "Hello,How,Are,You,Today" by commas into an array (or list) so that each element of it stores a different word.'' but for many purposes the original string is an adequate data structure. Note also that given a string, a list of "word start" indices and "word length" integers can be logically equivalent to having an "array of words" -- and, depending on implementation details may be a superior or inferior choice to some other representation. But, in current definition of this task, the concept of "word length" plays no useful role.
Line 1,301 ⟶ 2,063:
Note also that J provides several built-in concepts of parsing: split on leading delimiter, split on trailing delimiter, split J language words. Also, it's sometimes more efficient to append to a string than to prepend to it. So a common practice for parsing on an embedded delimiter is to append a copy of the delimiter to the string and then use the appended result:
 
<langsyntaxhighlight Jlang="j"> fn;._2 string,','</langsyntaxhighlight>
 
Here '''fn''' is applied to each ',' delimited substring and the results are assembled into an array.
 
Or, factoring out the names:
<langsyntaxhighlight Jlang="j"> fn ((;._2)(@(,&','))) string</langsyntaxhighlight>
 
=={{header|Java}}==
Line 1,317 ⟶ 2,079:
{{works with|Java|1.8+}}
 
<langsyntaxhighlight lang="java5">String toTokenize = "Hello,How,Are,You,Today";
System.out.println(String.join(".", toTokenize.split(",")));</langsyntaxhighlight>
 
{{works with|Java|1.4+}}
<langsyntaxhighlight lang="java5">String toTokenize = "Hello,How,Are,You,Today";
 
String words[] = toTokenize.split(",");//splits on one comma, multiple commas yield multiple splits
Line 1,327 ⟶ 2,089:
for(int i=0; i<words.length; i++) {
System.out.print(words[i] + ".");
}</langsyntaxhighlight>
 
The other way is to use StringTokenizer. It will skip any empty tokens. So if two commas are given in line, there will be an empty string in the array given by the split function, but no empty string with the StringTokenizer object. This method takes more code to use, but allows you to get tokens incrementally instead of all at once.
 
{{works with|Java|1.0+}}
<langsyntaxhighlight lang="java5">String toTokenize = "Hello,How,Are,You,Today";
 
StringTokenizer tokenizer = new StringTokenizer(toTokenize, ",");
while(tokenizer.hasMoreTokens()) {
System.out.print(tokenizer.nextToken() + ".");
}</langsyntaxhighlight>
 
=={{header|JavaScript}}==
<syntaxhighlight lang="javascript">console.log(
{{works with|Firefox|2.0}}
"Hello,How,Are,You,Today"
.split(",")
.join(".")
);</syntaxhighlight>A more advanced program to tokenise strings:<syntaxhighlight lang="javascript" line="1">
const Tokeniser = (function () {
const numberRegex = /-?(\d+\.d+|\d+\.|\.\d+|\d+)((e|E)(\+|-)?\d+)?/g;
return {
settings: {
operators: ["<", ">", "=", "+", "-", "*", "/", "?", "!"],
separators: [",", ".", ";", ":", " ", "\t", "\n"],
groupers: ["(", ")", "[", "]", "{", "}", '"', '"', "'", "'"],
keepWhiteSpacesAsTokens: false,
trimTokens: true
},
isNumber: function (value) {
if (typeof value === "number") {
return true;
} else if (typeof value === "string") {
return numberRegex.test(value);
}
return false;
},
closeGrouper: function (grouper) {
if (this.settings.groupers.includes(grouper)) {
return this.settings.groupers[this.settings.groupers.indexOf(grouper) + 1];
}
return null;
},
tokenType: function (char) {
if (this.settings.operators.includes(char)) {
return "operator";
} else if (this.settings.separators.includes(char)) {
return "separator";
} else if (this.settings.groupers.includes(char)) {
return "grouper";
}
return "other";
},
parseString: function (str) {
if (typeof str !== "string") {
if (str === null) {
return "null";
} if (typeof str === "object") {
str = JSON.stringify(str);
} else {
str = str.toString();
}
}
let tokens = [], _tempToken = "";
for (let i = 0; i < str.length; i++) {
if (this.tokenType(_tempToken) !== this.tokenType(str[i]) || this.tokenType(str[i]) === "separator") {
if (_tempToken.trim() !== "") {
tokens.push(this.settings.trimTokens ? _tempToken.trim() : _tempToken);
} else if (this.settings.keepWhiteSpacesAsTokens) {
tokens.push(_tempToken);
}
_tempToken = str[i];
if (this.tokenType(_tempToken) === "separator") {
if (_tempToken.trim() !== "") {
tokens.push(this.settings.trimTokens ? _tempToken.trim() : _tempToken);
} else if (this.settings.keepWhiteSpacesAsTokens) {
tokens.push(_tempToken);
}
_tempToken = "";
}
} else {
_tempToken += str[i];
}
}
if (_tempToken.trim() !== "") {
tokens.push(this.settings.trimTokens ? _tempToken.trim() : _tempToken);
} else if (this.settings.keepWhiteSpacesAsTokens) {
tokens.push(_tempToken);
}
return tokens.filter((token) => token !== "");
}
};
})();
</syntaxhighlight>Output:<syntaxhighlight lang="javascript">
Tokeniser.parseString("Hello,How,Are,You,Today");
 
<lang// javascript->alert( "['Hello', ',', 'How', ',', 'Are', ',', 'You',Today".split(" ',',").join(".") );</lang>'Today']
</syntaxhighlight>
 
=={{header|jq}}==
<langsyntaxhighlight lang="jq">split(",") | join(".")</langsyntaxhighlight>Example:<langsyntaxhighlight lang="sh">$ jq -r 'split(",") | join(".")'
"Hello,How,Are,You,Today"
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Jsish}}==
Being in the ECMAScript family, Jsi is blessed with many easy to use character, string and array manipulation routines.
 
<langsyntaxhighlight lang="javascript">puts('Hello,How,Are,You,Today'.split(',').join('.'))</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Julia}}==
<syntaxhighlight lang="julia">
<lang Julia>
s = "Hello,How,Are,You,Today"
a = split(s, ",")
Line 1,365 ⟶ 2,208:
println("Splits into ", a)
println("Reconstitutes to \"", t, "\"")
</syntaxhighlight>
</lang>
 
{{out}}
Line 1,375 ⟶ 2,218:
 
=={{header|K}}==
<langsyntaxhighlight Klang="k">words: "," \: "Hello,How,Are,You,Today"
"." /: words</langsyntaxhighlight>
 
{{out}}
Line 1,382 ⟶ 2,225:
"Hello.How.Are.You.Today"
</pre>
 
{{works with|ngn/k}}<syntaxhighlight lang=K>","\"Hello,How,Are,You,Today"
("Hello"
"How"
"Are"
"You"
"Today")</syntaxhighlight>
 
=={{header|Klingphix}}==
<syntaxhighlight lang="klingphix">( "Hello,How,Are,You,Today" "," ) split len [ get print "." print ] for
 
nl "End " input</syntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today.
End</pre>
 
=={{header|Kotlin}}==
{{works with|Kotlin|1.0b4}}
<langsyntaxhighlight lang="scala">fun main(args: Array<String>) {
val input = "Hello,How,Are,You,Today"
println(input.split(',').joinToString("."))
}</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Ksh}}==
<syntaxhighlight lang="ksh">
#!/bin/ksh
 
# Tokenize a string
 
# # Variables:
#
string="Hello,How,Are,You,Today"
inputdelim=\, # a comma
outputdelim=\. # a period
 
# # Functions:
#
# # Function _tokenize(str, indelim, outdelim)
#
function _tokenize {
typeset _str ; _str="$1"
typeset _ind ; _ind="$2"
typeset _outd ; _outd="$3"
while [[ ${_str} != ${_str/${_ind}/${_outd}} ]]; do
_str=${_str/${_ind}/${_outd}}
done
 
echo "${_str}"
}
 
######
# main #
######
 
_tokenize "${string}" "${inputdelim}" "${outputdelim}"</syntaxhighlight>
{{out}}<pre>Hello.How.Are.You.Today</pre>
 
=={{header|LabVIEW}}==
To tokenize the string, we use the Search/Split String function to split the string by its first comma. Add the beginning (up to, but not including the comma) to the end of the array, remove the first comma from the rest of the string, and pass it back through the shift register to the loop's next iteration. This is repeated until the string is empty. Printing is a simple matter of concatenation.<br/>
{{VI solution|LabVIEW_Tokenize_a_string.png}}
 
=={{header|Lambdatalk}}==
<syntaxhighlight lang="scheme">
{S.replace , by . in Hello,How,Are,You,Today}.
-> Hello.How.Are.You.Today.
</syntaxhighlight>
 
=={{header|Lang}}==
<syntaxhighlight lang="lang">
$str = Hello,How,Are,You,Today
fn.println(fn.join(\., fn.split($str, \,)))
</syntaxhighlight>
 
=={{header|Lang5}}==
<syntaxhighlight lang="lang5">'Hello,How,Are,You,Today ', split '. join .</syntaxhighlight>
 
=={{header|LDPL}}==
<syntaxhighlight lang="ldpl">
DATA:
explode/words is text vector
explode/index is number
explode/string is text
explode/length is number
explode/stringlength is number
explode/current-token is text
explode/char is text
explode/separator is text
i is number
PROCEDURE:
# Ask for a sentence
display "Enter a sentence: "
accept explode/string
 
# Declare explode Subprocedure
# Splits a text into a text vector by a certain delimiter
# Input parameters:
# - explode/string: the string to explode (destroyed)
# - explode/separator: the character used to separate the string (preserved)
# Output parameters:
# - explode/words: vector of splitted words
# - explode/length: length of explode/words
sub-procedure explode
join explode/string and explode/separator in explode/string
store length of explode/string in explode/stringlength
store 0 in explode/index
store 0 in explode/length
store "" in explode/current-token
while explode/index is less than explode/stringlength do
get character at explode/index from explode/string in explode/char
if explode/char is equal to explode/separator then
store explode/current-token in explode/words:explode/length
add explode/length and 1 in explode/length
store "" in explode/current-token
else
join explode/current-token and explode/char in explode/current-token
end if
add explode/index and 1 in explode/index
repeat
subtract 1 from explode/length in explode/length
end sub-procedure
 
# Separate the entered string
store " " in explode/separator
call sub-procedure explode
while i is less than or equal to explode/length do
display explode/words:i crlf
add 1 and i in i
repeat
</syntaxhighlight>
 
=={{header|LFE}}==
 
<langsyntaxhighlight lang="lisp">
> (set split (string:tokens "Hello,How,Are,You,Today" ","))
("Hello" "How" "Are" "You" "Today")
> (string:join split ".")
"Hello.How.Are.You.Today"
</syntaxhighlight>
</lang>
 
=={{header|Lang5}}==
<lang lang5>'Hello,How,Are,You,Today ', split '. join .</lang>
 
=={{header|Lingo}}==
<langsyntaxhighlight lang="lingo">input = "Hello,How,Are,You,Today"
_player.itemDelimiter = ","
output = ""
Line 1,417 ⟶ 2,377:
delete the last char of output
put output
-- "Hello.How.Are.You.Today"</langsyntaxhighlight>
 
=={{header|Logo}}==
{{works with|UCB Logo}}
<langsyntaxhighlight lang="logo">to split :str :sep
output parse map [ifelse ? = :sep ["| |] [?]] :str
end</langsyntaxhighlight>
 
This form is more robust, doing the right thing if there are embedded spaces.
<langsyntaxhighlight lang="logo">to split :str :by [:acc []] [:w "||]
if empty? :str [output lput :w :acc]
ifelse equal? first :str :by ~
[output (split butfirst :str :by lput :w :acc)] ~
[output (split butfirst :str :by :acc lput first :str :w)]
end</langsyntaxhighlight>
 
<langsyntaxhighlight lang="logo">? show split "Hello,How,Are,You,Today ",
[Hello How Are You Today]</langsyntaxhighlight>
 
=={{header|Logtalk}}==
Using Logtalk built-in support for Definite Clause Grammars (DCGs) and representing the strings as atoms for readbility:
<langsyntaxhighlight lang="logtalk">
:- object(spliting).
 
Line 1,461 ⟶ 2,421:
 
:- end_object.
</syntaxhighlight>
</lang>
{{out}}
<pre>
Line 1,471 ⟶ 2,431:
=={{header|Lua}}==
Split function callously stolen from the lua-users wiki
<langsyntaxhighlight Lualang="lua">function string:split (sep)
local sep, fields = sep or ":", {}
local pattern = string.format("([^%s]+)", sep)
Line 1,479 ⟶ 2,439:
 
local str = "Hello,How,Are,You,Today"
print(table.concat(str:split(","), "."))</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|M2000 Interpreter}}==
<syntaxhighlight lang="m2000 interpreter">
Module CheckIt {
Function Tokenize$(s){
\\ letter$ pop a string from stack of values
\\ shift 2 swap top two values on stack of values
fold1=lambda m=1 ->{
shift 2 :if m=1 then m=0:drop: push letter$ else push letter$+"."+letter$
}
=s#fold$(fold1)
}
Print Tokenize$(piece$("Hello,How,Are,You,Today",",")) ="Hello.How.Are.You.Today" ' true
}
Checkit
</syntaxhighlight>
 
=={{header|M4}}==
<langsyntaxhighlight M4lang="m4">define(`s',`Hello,How,Are,You,Today')
define(`set',`define(`$1[$2]',`$3')')
define(`get',`defn($1[$2])')
Line 1,494 ⟶ 2,470:
define(`show',
`ifelse(eval(j<n),1,`get(a,j).`'define(`j',incr(j))`'show')')
show</langsyntaxhighlight>
 
{{out}}
Line 1,502 ⟶ 2,478:
 
=={{header|Maple}}==
<langsyntaxhighlight Maplelang="maple">StringTools:-Join(StringTools:-Split("Hello,How,Are,You,Today", ","),".");</langsyntaxhighlight>
{{Out|Output}}
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|Mathematica}}/{{header|Wolfram Language}}==
 
<syntaxhighlight lang="mathematica">StringJoin@StringSplit["Hello,How,Are,You,Today", "," -> "."]</syntaxhighlight>
=={{header|Mathematica}}==
<lang Mathematica>StringRiffle[StringSplit["Hello,How,Are,You,Today", ","], "."]</lang>
 
=={{header|MATLAB}} / {{header|Octave}}==
<syntaxhighlight lang="matlab">
<lang MATLAB>function tokenizeString(string,delimeter)
s=strsplit('Hello,How,Are,You,Today',',')
 
fprintf(1,'%s.',s{:})
tokens = {};
</syntaxhighlight>
while not(isempty(string))
[tokens{end+1},string] = strtok(string,delimeter);
end
for i = (1:numel(tokens)-1)
fprintf([tokens{i} '.'])
end
fprintf([tokens{end} '\n'])
end</lang>
 
{{out}}
<pre>
<pre>>> tokenizeString('Hello,How,Are,You,Today',',')
Hello.How.Are.You.Today</pre>.
</pre>
 
=={{header|Maxima}}==
<langsyntaxhighlight Maximalang="maxima">l: split("Hello,How,Are,You,Today", ",")$
printf(true, "~{~a~^.~}~%", l)$</langsyntaxhighlight>
 
A slightly different way
<syntaxhighlight lang="maxima">
split("Hello,How,Are,You,Today",",")$
simplode(%,".");
</syntaxhighlight>
{{out}}
<pre>
"Hello.How.Are.You.Today"
</pre>
 
=={{header|MAXScript}}==
<langsyntaxhighlight lang="maxscript">output = ""
for word in (filterString "Hello,How,Are,You,Today" ",") do
(
output += (word + ".")
)
format "%\n" output</langsyntaxhighlight>
 
=={{header|Mercury}}==
<syntaxhighlight lang="text">
:- module string_tokenize.
:- interface.
Line 1,556 ⟶ 2,532:
Tokens = string.split_at_char((','), "Hello,How,Are,You,Today"),
io.write_list(Tokens, ".", io.write_string, !IO),
io.nl(!IO).</langsyntaxhighlight>
 
=={{header|min}}==
{{works with|min|0.19.3}}
<langsyntaxhighlight lang="min">"Hello,How,Are,You,Today" "," split "." join print</langsyntaxhighlight>
 
=={{header|MiniScript}}==
<syntaxhighlight lang="miniscript">tokens = "Hello,How,Are,You,Today".split(",")
print tokens.join(".")</syntaxhighlight>
 
=={{header|MMIX}}==
<langsyntaxhighlight lang="mmix">sep IS ','
EOS IS 0
NL IS 10
Line 1,616 ⟶ 2,596:
LDBU t,tp
PBNZ t,2B % UNTIL EOB(uffer)
TRAP 0,Halt,0</langsyntaxhighlight>
{{out}}
<pre>
Line 1,628 ⟶ 2,608:
 
=={{header|Modula-3}}==
<langsyntaxhighlight lang="modula3">MODULE Tokenize EXPORTS Main;
 
IMPORT IO, TextConv;
Line 1,645 ⟶ 2,625:
END;
IO.Put("\n");
END Tokenize.</langsyntaxhighlight>
 
=={{header|MUMPS}}==
<langsyntaxhighlight MUMPSlang="mumps">TOKENS
NEW I,J,INP
SET INP="Hello,how,are,you,today"
Line 1,653 ⟶ 2,634:
NEW J FOR J=1:1:I WRITE INP(J) WRITE:J'=I "."
KILL I,J,INP // Kill is optional. "New" variables automatically are killed on "Quit"
QUIT</langsyntaxhighlight>
 
In use:
USER>D TOKENS^ROSETTA
Hello.how.are.you.today
 
=={{header|Nanoquery}}==
<syntaxhighlight lang="nanoquery">for word in "Hello,How,Are,You,Today".split(",")
print word + "."
end</syntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today.</pre>
 
=={{header|Nemerle}}==
<langsyntaxhighlight Nemerlelang="nemerle">using System;
using System.Console;
using Nemerle.Utility.NString;
Line 1,673 ⟶ 2,661:
// a quick in place list comprehension takes care of that
}
}</langsyntaxhighlight>
 
=={{header|NetRexx}}==
<langsyntaxhighlight NetRexxlang="netrexx">/*NetRexx program *****************************************************
* 20.08.2012 Walter Pachl derived from REXX Version 3
**********************************************************************/
Line 1,687 ⟶ 2,675:
Say ss.word(i)'.'
End
Say 'End-of-list.'</langsyntaxhighlight>
Output as in REXX version
 
=={{header|NewLISP}}==
<langsyntaxhighlight NewLISPlang="newlisp">(print (join (parse "Hello,How,Are,You,Today" ",") "."))</langsyntaxhighlight>
 
=={{header|Nial}}==
Line 1,699 ⟶ 2,687:
Define Array with input string:
 
<langsyntaxhighlight Niallang="nial"> s := 'Hello,How,Are,You,Today'
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|H|e|l|l|o|,|H|o|w|,|A|r|e|,|Y|o|u|,|T|o|d|a|y|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+</langsyntaxhighlight>
 
Split string at the commas:
 
<langsyntaxhighlight Niallang="nial"> t := s eachall = `, cut s
+-----------+-------+-------+-------+-----------+
|+-+-+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+-+-+|
||H|e|l|l|o|||H|o|w|||A|r|e|||Y|o|u|||T|o|d|a|y||
|+-+-+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+-+-+|
+-----------+-------+-------+-------+-----------+</langsyntaxhighlight>
 
Join string with <code>.</code> and remove last <code>.</code>
 
<langsyntaxhighlight Niallang="nial"> u := front content (cart t `.)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|H|e|l|l|o|.|H|o|w|.|A|r|e|.|Y|o|u|.|T|o|d|a|y|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+</langsyntaxhighlight>
 
Less cluttered display, using <code>set "sketch;set "nodecor</code> display switches.
 
<langsyntaxhighlight Niallang="nial"> s:='Hello,How,Are,You,Today'
Hello,How,Are,You,Today
t:= s eachall = `, cut s
Line 1,729 ⟶ 2,717:
+-----+---+---+---+-----+
u:=front content (cart t `.)
Hello.How.Are.You.Today</lang>
 
Hello.How.Are.You.Today</syntaxhighlight>
 
Or as a one-liner:
 
<syntaxhighlight lang="nial">
front content (cart (s eachall = `, cut s) `.)
</syntaxhighlight>
 
=={{header|Nim}}==
<langsyntaxhighlight lang="nim">import strutils
 
let text = "Hello,How,Are,You,Today"
let tokens = text.split(',')
echo tokens.join(" .")</langsyntaxhighlight>
 
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Objeck}}==
<langsyntaxhighlight lang="objeck">
class Parse {
function : Main(args : String[]) ~ Nil {
Line 1,749 ⟶ 2,745:
};
}
}</langsyntaxhighlight>
 
=={{header|Objective-C}}==
Line 1,756 ⟶ 2,752:
{{works with|Cocoa}}
 
<langsyntaxhighlight lang="objc">NSString *text = @"Hello,How,Are,You,Today";
NSArray *tokens = [text componentsSeparatedByString:@","];
NSString *result = [tokens componentsJoinedByString:@"."];
NSLog(result);</langsyntaxhighlight>
 
=={{header|OCaml}}==
To split on a single-character separator:
<langsyntaxhighlight lang="ocaml">let words = String.split_on_char ',' "Hello,How,Are,You,Today" in
String.concat "." words
</syntaxhighlight>
</lang>
 
The function split_on_char has been introduced in OCaml 4.04. In previous versions, it could be implemented by:
 
<langsyntaxhighlight lang="ocaml">let split_on_char sep s =
let r = ref [] in
let j = ref (String.length s) in
Line 1,778 ⟶ 2,774:
end
done;
String.sub s 0 !j :: !r</langsyntaxhighlight>
 
=={{header|Oforth}}==
 
<langsyntaxhighlight Oforthlang="oforth">"Hello,How,Are,You,Today" wordsWith(',') println</langsyntaxhighlight>
 
{{out}}
Line 1,790 ⟶ 2,786:
 
=={{header|ooRexx}}==
<langsyntaxhighlight ooRexxlang="oorexx">text='Hello,How,Are,You,Today'
do while text \= ''
parse var text word1 ',' text
call charout 'STDOUT:',word1'.'
end</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today.</pre>
 
=={{header|OpenEdge/Progress}}==
<langsyntaxhighlight lang="progress">FUNCTION tokenizeString RETURNS CHAR (
i_c AS CHAR
):
Line 1,822 ⟶ 2,818:
MESSAGE
tokenizeString( "Hello,How,Are,You,Today" )
VIEW-AS ALERT-BOX.</langsyntaxhighlight>
{{out}}
<pre>
Line 1,835 ⟶ 2,831:
 
=={{header|Oz}}==
<langsyntaxhighlight lang="oz">for T in {String.tokens "Hello,How,Are,You,Today" &,} do
{System.printInfo T#"."}
end</langsyntaxhighlight>
 
=={{header|PARI/GP}}==
Line 1,847 ⟶ 2,843:
{{Works with|PARI/GP|2.7.4 and above}}
 
<langsyntaxhighlight lang="parigp">
\\ Tokenize a string str according to 1 character delimiter d. Return a list of tokens.
\\ Using ssubstr() from http://rosettacode.org/wiki/Substring#PARI.2FGP
Line 1,865 ⟶ 2,861:
print("3.",tokenize(",Hello,,How,Are,You,Today",","));
}
</langsyntaxhighlight>
 
{{Output}}
Line 1,883 ⟶ 2,879:
{{Works with|PARI/GP|2.7.4 and above}}
 
<langsyntaxhighlight lang="parigp">
\\ Tokenize a string str according to 1 character delimiter d. Return a list of tokens.
\\ Using ssubstr() from http://rosettacode.org/wiki/Substring#PARI.2FGP
Line 1,912 ⟶ 2,908:
print("7. 0 pp: ", stok("",","));
}
</langsyntaxhighlight>
 
{{Output}}
Line 1,928 ⟶ 2,924:
=={{header|Pascal}}==
{{works with|Free_Pascal}}
<langsyntaxhighlight lang="pascal">program TokenizeString;
 
{$mode objfpc}{$H+}
Line 1,952 ⟶ 2,948:
Tokens.Free;
end;
end.</langsyntaxhighlight>
 
The result is:
Line 1,959 ⟶ 2,955:
 
=={{header|Perl}}==
<langsyntaxhighlight lang="perl">print join('.', split /,/, 'Hello,How,Are,You,Today'), "\n";</langsyntaxhighlight>
CLI one-liner form:
<langsyntaxhighlight lang="perl">echo "Hello,How,Are,You,Today" | perl -aplF/,/ -e '$" = "."; $_ = "@F";'</langsyntaxhighlight>
which is a compact way of telling Perl to do
<langsyntaxhighlight lang="perl">BEGIN { $/ = "\n"; $\ = "\n"; }
LINE: while (defined($_ = <ARGV>)) {
chomp $_;
Line 1,972 ⟶ 2,968:
continue {
die "-p destination: $!\n" unless print $_;
}</langsyntaxhighlight>
 
=={{header|Perl 6}}==
{{works with|Rakudo|#22 "Thousand Oaks"}}
<lang perl6>'Hello,How,Are,You,Today'.split(',').join('.').say;</lang>
 
Or with function calls:
 
<lang perl6>say join '.', split ',', 'Hello,How,Are,You,Today';</lang>
 
=={{header|Phix}}==
<!--<syntaxhighlight lang="phix">(phixonline)-->
<lang Phix>?join(split("Hello,How,Are,You,Today",","),".")</lang>
<span style="color: #0000FF;">?</span><span style="color: #7060A8;">join</span><span style="color: #0000FF;">(</span><span style="color: #7060A8;">split</span><span style="color: #0000FF;">(</span><span style="color: #008000;">"Hello,How,Are,You,Today"</span><span style="color: #0000FF;">,</span><span style="color: #008000;">","</span><span style="color: #0000FF;">),</span><span style="color: #008000;">"."</span><span style="color: #0000FF;">)</span>
<!--</syntaxhighlight>-->
{{Out}}
<pre>
"Hello.How.Are.You.Today"
</pre>
 
=={{header|Phixmonti}}==
<syntaxhighlight lang="phixmonti">/# "Hello,How,Are,You,Today" "," "." subst print #/
"Hello,How,Are,You,Today" "," " " subst split len for get print "." print endfor</syntaxhighlight>
 
=={{header|PHP}}==
{{works with|PHP|5.x}}
 
<langsyntaxhighlight lang="php"><?php
$str = 'Hello,How,Are,You,Today';
echo implode('.', explode(',', $str));
?></langsyntaxhighlight>
 
=={{header|Picat}}==
Using the built-in functions <code>split/2</code> and <code>join/2</code>.
<syntaxhighlight lang="picat">import util.
 
go =>
S = "Hello,How,Are,You,Today",
T = S.split(","),
println(T),
T.join(".").println(),
 
% As a one liner:
S.split(",").join(".").println().</syntaxhighlight>
 
{{out}}
<pre>[Hello,How,Are,You,Today]
Hello.How.Are.You.Today
Hello.How.Are.You.Today</pre>
 
=={{header|PicoLisp}}==
<langsyntaxhighlight PicoLisplang="picolisp">(mapcar pack
(split (chop "Hello,How,Are,You,Today") ",") )</langsyntaxhighlight>
 
=={{header|Pike}}==
<langsyntaxhighlight lang="pike">("Hello,How,Are,You,Today" / ",") * ".";</langsyntaxhighlight>
 
=={{header|PL/I}}==
<langsyntaxhighlight lang="pli">tok: Proc Options(main);
declare s character (100) initial ('Hello,How,Are,You,Today');
declare n fixed binary (31);
Line 2,027 ⟶ 3,039:
put skip list (string(table));
end;
end;</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|PL/M}}==
<syntaxhighlight lang="plm">100H:
/* CP/M CALLS */
BDOS: PROCEDURE (FN, ARG); DECLARE FN BYTE, ARG ADDRESS; GO TO 5; END BDOS;
EXIT: PROCEDURE; CALL BDOS(0,0); END EXIT;
PRINT: PROCEDURE (S); DECLARE S ADDRESS; CALL BDOS(9,S); END PRINT;
 
/* SPLIT A STRING ON CHARACTER 'SEP'.
THE 'PARTS' ARRAY WILL CONTAIN POINTERS TO THE START OF EACH ELEMENT.
THE AMOUNT OF PARTS IS RETURNED.
*/
TOKENIZE: PROCEDURE (SEP, STR, PARTS) ADDRESS;
DECLARE SEP BYTE, (STR, PARTS) ADDRESS;
DECLARE (N, P BASED PARTS) ADDRESS;
DECLARE CH BASED STR BYTE;
N = 0;
LOOP:
P(N) = STR;
N = N + 1;
DO WHILE CH <> '$' AND CH <> SEP;
STR = STR + 1;
END;
IF CH = '$' THEN RETURN N;
CH = '$';
STR = STR + 1;
GO TO LOOP;
END TOKENIZE;
 
/* TEST ON THE GIVEN INPUT */
DECLARE HELLO (24) BYTE INITIAL ('HELLO,HOW,ARE,YOU,TODAY$');
DECLARE PARTS (10) ADDRESS;
DECLARE (I, LEN) ADDRESS;
 
LEN = TOKENIZE(',', .HELLO, .PARTS);
DO I = 0 TO LEN-1;
CALL PRINT(PARTS(I));
CALL PRINT(.'. $');
END;
 
CALL EXIT;
EOF;</syntaxhighlight>
{{out}}
<pre>HELLO. HOW. ARE. YOU. TODAY. </pre>
 
=={{header|Plain English}}==
<syntaxhighlight lang="plainenglish">To run:
Start up.
Split "Hello,How,Are,You,Today" into some string things given the comma byte.
Join the string things with the period byte giving a string.
Destroy the string things.
Write the string on the console.
Wait for the escape key.
Shut down.
 
To join some string things with a byte giving a string:
Get a string thing from the string things.
Loop.
If the string thing is nil, exit.
Append the string thing's string to the string.
If the string thing's next is not nil, append the byte to the string.
Put the string thing's next into the string thing.
Repeat.</syntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
</pre>
 
=={{header|Pop11}}==
Line 2,038 ⟶ 3,117:
First show the use of sysparse_string to break up a string and make a list of strings.
 
<langsyntaxhighlight lang="pop11">;;; Make a list of strings from a string using space as separator
lvars list;
sysparse_string('the cat sat on the mat') -> list;
;;; print the list of strings
list =>
** [the cat sat on the mat]</langsyntaxhighlight>
 
By giving it an extra parameter 'true' we can make it recognize numbers and produce a list of strings and numbers
 
<langsyntaxhighlight lang="pop11">lvars list;
sysparse_string('one 1 two 2 three 3 four 4', true) -> list;
;;; print the list of strings and numbers
Line 2,056 ⟶ 3,135:
** <true>
isinteger(list(2))=>
** <true></langsyntaxhighlight>
 
Now show some uses of the built in procedure sys_parse_string, which allows more options:
 
<langsyntaxhighlight lang="pop11">;;; Make pop-11 print strings with quotes
true -> pop_pr_quotes;
;;;
Line 2,074 ⟶ 3,153:
;;; print the list of strings
strings =>
** ['Hello' 'How' 'Are' 'You' 'Today']</langsyntaxhighlight>
 
If {% ... %} were used instead of [% ... %] the result would be
a vector (i.e. array) of strings rather than a list of strings.
 
<langsyntaxhighlight lang="pop11">{% sys_parse_string(str, `,`) %} -> strings;
;;; print the vector
strings =>
** {'Hello' 'How' 'Are' 'You' 'Today'}</langsyntaxhighlight>
It is also possible to give sys_parse_string a 'conversion' procedure, which is applied to each of the tokens.
E.g. it could be used to produce a vector of numbers, using the conversion procedure 'strnumber', which converts a string to a number:
 
<langsyntaxhighlight lang="pop11">lvars numbers;
{% sys_parse_string('100 101 102 103 99.9 99.999', strnumber) %} -> numbers;
;;; the result is a vector containing integers and floats,
;;; which can be printed thus:
numbers =>
** {100 101 102 103 99.9 99.999}</langsyntaxhighlight>
 
Using lower level pop-11 facilities to tokenise the string:
 
<langsyntaxhighlight lang="pop11">;;; Declare and initialize variables
lvars str='Hello,How,Are,You,Today';
;;; Iterate over string
Line 2,113 ⟶ 3,192:
endif;
;;; Reverse the list
rev(ls) -> ls;</langsyntaxhighlight>
 
Since the task requires to use array we convert list to array
 
<langsyntaxhighlight lang="pop11">;;; Put list elements and lenght on the stack
destlist(ls);
;;; Build a vector from them
Line 2,125 ⟶ 3,204:
printf(ar(i), '%s.');
endfor;
printf('\n');</langsyntaxhighlight>
 
We could use list directly for printing:
 
<langsyntaxhighlight lang="pop11">for i in ls do
printf(i, '%s.');
endfor;</langsyntaxhighlight>
 
so the conversion to vector is purely to satisfy task formulation.
Line 2,137 ⟶ 3,216:
=={{header|PowerShell}}==
{{works with|PowerShell|1}}
<langsyntaxhighlight lang="powershell">$words = "Hello,How,Are,You,Today".Split(',')
[string]::Join('.', $words)</langsyntaxhighlight>
 
{{works with|PowerShell|2}}
<langsyntaxhighlight lang="powershell">$words = "Hello,How,Are,You,Today" -split ','
$words -join '.'</langsyntaxhighlight>
 
{{works with|PowerShell|2}}
The StringSplitOptions enumeration weeds out the return of empty elements.
<syntaxhighlight lang="powershell">
<lang PowerShell>
"Hello,How,Are,You,Today", ",,Hello,,Goodbye,," | ForEach-Object {($_.Split(',',[StringSplitOptions]::RemoveEmptyEntries)) -join "."}
</syntaxhighlight>
</lang>
{{Out}}
<pre>
Line 2,157 ⟶ 3,236:
=={{header|Prolog}}==
{{works with|SWI Prolog}}
<langsyntaxhighlight lang="prolog">splitup(Sep,[token(B)|BL]) --> splitup(Sep,B,BL).
splitup(Sep,[A|AL],B) --> [A], {\+ [A] = Sep }, splitup(Sep,AL,B).
splitup(Sep,[],[B|BL]) --> Sep, splitup(Sep,B,BL).
Line 2,165 ⟶ 3,244:
phrase(splitup(".",Tokens),Backtogether),
string_to_list(ABack,Backtogether),
writeln(ABack).</langsyntaxhighlight>
{{out}}
<pre>
Line 2,177 ⟶ 3,256:
this can be accomplished in a few lines in the top level:
 
<langsyntaxhighlight lang="prolog">
?- split_string("Hello,How,Are,You,Today", ",", "", Split),
| atomics_to_string(Split, ".", PeriodSeparated),
| writeln(PeriodSeparated).
Hello.How.Are.You.Today
</syntaxhighlight>
</lang>
 
=={{header|Python}}==
{{works with|Python|2.5}}{{works with|Python|3.0}}
 
<langsyntaxhighlight lang="python">text = "Hello,How,Are,You,Today"
tokens = text.split(',')
print ('.'.join(tokens))</langsyntaxhighlight>
 
Or if interpretation of the task description means you don't need to keep an intermediate array:
<langsyntaxhighlight lang="python">print ('.'.join('Hello,How,Are,You,Today'.split(',')))</langsyntaxhighlight>
 
=={{header|Q}}==
<langsyntaxhighlight Qlang="q">words: "," vs "Hello,How,Are,You,Today"
"." sv words</langsyntaxhighlight>
 
{{out}}
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|QB64}}==
''CBTJD'': 2020/03/12
<syntaxhighlight lang="vb">a$ = "Hello,How,Are,You,Today" ' | Initialize original string.
FOR na = 1 TO LEN(a$) ' | Start loop to count number of commas.
IF MID$(a$, na, 1) = "," THEN nc = nc + 1 ' | For each comma, increment nc.
NEXT ' | End of loop.
DIM t$(nc) ' | Dim t$ array with total number of commas (nc). Array base is 0.
FOR nb = 1 TO LEN(a$) ' | Start loop to find each word.
c$ = MID$(a$, nb, 1) ' | Look at each character in the string.
IF c$ = "," THEN ' | If the character is a comma, increase the t$ array for the next word.
t = t + 1 ' | t = token word count. Starts at 0 because array base is 0.
ELSE ' | Or...
t$(t) = t$(t) + c$ ' | Add each character to the current token (t$) word.
END IF ' | End of decision tree.
NEXT ' | End of loop.
FOR nd = 0 TO t ' | Start loop to create final desired output.
tf$ = tf$ + t$(nd) + "." ' | Add each token word from t$ followed by a period to the final tf$.
NEXT ' | End of loop.
PRINT LEFT$(tf$, LEN(tf$) - 1) ' | Print all but the last period of tf$.
END ' | Program end.
</syntaxhighlight>
 
'''Alternative method using word$ function:'''
----
''CBTJD'': 2020/03/12
<syntaxhighlight lang="vb">a$ = "Hello,How,Are,You,Today" ' | Initialize original string.
DIM t$(LEN(a$) / 2) ' | Create an overestimated sized array.
FOR nd = 1 TO LEN(a$) ' | Start loop to find each comma.
IF MID$(a$, nd, 1) = "," THEN ' | If a comma is found...
tc = tc + 1 ' | Increment tc for each found comma.
t$(tc) = word$(a$, tc, ",") ' | Assign tc word to t$(tc) array.
END IF ' | End decision tree.
NEXT ' | End loop.
t$(tc + 1) = word$(a$, tc + 1, ",") ' | Assign last word to next array position.
ft$ = t$(1) ' | Start final return string ft$ with first array value.
FOR ne = 2 TO tc + 1 ' | Start loop to add periods and array values.
ft$ = ft$ + "." + t$(ne) ' | Concatenate a period with subsequent array values.
NEXT ' | End loop.
PRINT ft$ ' | Print final return string ft$.
 
FUNCTION word$ (inSTG$, inDEC, inPRM$) ' | word$ function accepts original string, word number, and separator.
inSTG$ = inSTG$ + inPRM$ ' | Add a separator to the end of the original string.
FOR na = 1 TO LEN(inSTG$) ' | Start loop to count total number of separators.
IF MID$(inSTG$, na, 1) = inPRM$ THEN nc = nc + 1 ' | If separator found, increment nc.
NEXT ' | End loop.
IF inDEC > nc THEN word$ = "": GOTO DONE ' | If requested word number (inDEC) is greater than total words (nc), bail.
FOR nd = 1 TO inDEC ' | Start loop to find requested numbered word.
last = st ' | Remember the position of the last separator.
st = INSTR(last + 1, inSTG$, inPRM$) ' | Find the next separator.
NEXT ' | End loop.
word$ = MID$(inSTG$, last + 1, st - last - 1) ' | Return requested word.
DONE: ' | Label for bail destination of word count error check.
END FUNCTION ' | End of function.
</syntaxhighlight>
 
=={{header|Quackery}}==
 
<syntaxhighlight lang="quackery"> [ [] [] rot
witheach
[ dup char , = iff
[ drop nested join [] ]
else join ]
nested join ] is tokenise ( $ --> [ )
[ witheach [ echo$ say "." ] ] is display ( [ --> )
$ "Hello,How,Are,You,Today" tokenise display</syntaxhighlight>
 
{{Out}}
 
<pre>Hello.How.Are.You.Today.</pre>
 
=={{header|R}}==
<langsyntaxhighlight Rlang="r">text <- "Hello,How,Are,You,Today"
junk <- strsplit(text, split=",")
print(paste(unlist(junk), collapse="."))</langsyntaxhighlight>
 
or the one liner
 
<langsyntaxhighlight Rlang="r">paste(unlist(strsplit(text, split=",")), collapse=".")</langsyntaxhighlight>
 
=={{header|Racket}}==
 
<langsyntaxhighlight lang="racket">
#lang racket
(string-join (string-split "Hello,How,Are,You,Today" ",") ".")
;; -> "Hello.How.Are.You.Today"
</syntaxhighlight>
</lang>
 
=={{header|Raku}}==
(formerly Perl 6)
{{works with|Rakudo|#22 "Thousand Oaks"}}
<syntaxhighlight lang="raku" line>'Hello,How,Are,You,Today'.split(',').join('.').say;</syntaxhighlight>
 
Or with function calls:
 
<syntaxhighlight lang="raku" line>say join '.', split ',', 'Hello,How,Are,You,Today';</syntaxhighlight>
 
=={{header|Raven}}==
<langsyntaxhighlight lang="raven">'Hello,How,Are,You,Today' ',' split '.' join print</langsyntaxhighlight>
 
=={{header|REBOL}}==
<langsyntaxhighlight REBOLlang="rebol">print ["Original:" original: "Hello,How,Are,You,Today"]
tokens: parse original ","
dotted: "" repeat i tokens [append dotted rejoin [i "."]]
print ["Dotted: " dotted]</langsyntaxhighlight>
 
{{out}}
Line 2,234 ⟶ 3,394:
 
=={{header|Red}}==
<langsyntaxhighlight Redlang="red">str: "Hello,How,Are,You,Today"
>> tokens: split str ","
>> probe tokens
Line 2,241 ⟶ 3,401:
>> periods: replace/all form tokens " " "." ;The word FORM converts the list series to a string removing quotes.
>> print periods ;then REPLACE/ALL spaces with period
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Retro}}==
<syntaxhighlight lang="retro">{{
<lang Retro>{{
: char ( -$ ) " " ;
: tokenize ( $-$$ )
Line 2,256 ⟶ 3,416:
[ tokenize action dup 1 <> ] while drop
^buffer'get drop ;
}}</langsyntaxhighlight>
 
This will suffice to split a string into an array of substrings. It is used like this:
 
<langsyntaxhighlight Retrolang="retro">create strings 100 allot
"Hello,How,Are,You,Today" ', strings split</langsyntaxhighlight>
 
Since the buffer' vocabulary creates a zero-terminated buffer, we can display it using the each@ combinator and a simple quote:
 
<langsyntaxhighlight Retrolang="retro">strings [ @ "%s." puts ] ^types'STRING each@</langsyntaxhighlight>
 
=={{header|REXX}}==
===version 1===
This REXX version doesn't append a period to the last word in the list.
<langsyntaxhighlight lang="rexx">/*REXX program separates a string of comma-delimitedcomma─delimited words, and echoes. them ──► terminal*/
sssoriginal = 'Hello,How,Are,You,Today' /*some words seperatedseparated by commas (,). */
say 'The input string =:' sss original /*display the original string. ──► terminal.*/
new=sss original /*make a copy of the string. */
do #=1 until new=='' /*keep [↓]processing until string NEW is destroyedempty. */
do items=1 until new=='' parse var new @.# ',' new /*keepparse goingwords untildelineated by NEWa iscomma empty.(,)*/
end /*#*/ /* [↑] the new array is named @. */
parse var new a.items ',' new /*parse words delinated by comma.*/
say end /*items*/ /* [↑] the array is named A /* NEW is destructively parsed. [↑] */
say center(' Words in the string ', 40, "═") /*display a nice header for the list. */
do j=1 for # /*display all the words (one per line),*/
say @.j || left(., j\==#) /*maybe append a period (.) to a word. */
end /*j*/ /* [↑] don't append a period if last. */
say center(' End─of─list ', 40, "═") /*display a (EOL) trailer for the list.*/</syntaxhighlight>
{{out|output|text=&nbsp; when using the internal default input:}}
<pre>
The input string: Hello,How,Are,You,Today
 
═════════ Words in the string ══════════
say; say 'Words in the string:' /*display a header for the list. */
 
do j=1 for items /*now, display all the words. */
say a.j || left('.', j\==items) /*append period to word, maybe. */
end /*j*/ /* [↑] don't append "." if last.*/
 
say 'End-of-list.' /*display a trailer for the list.*/
/*stick a fork in it, we're done.*/</lang>
{{out}}
<pre>
input string = Hello,How,Are,You,Today
Words in the string:
Hello.
How.
Line 2,297 ⟶ 3,453:
You.
Today
═════════════ End─of─list ══════════════
End-of-list.
</pre>
 
===version 2===
This REXX version won't work if any of the words have an embedded blank (or possible a tab character) in them, as in:
<lang rexx>/*REXX program to separate a string of comma-delimited words and echo */
 
Hello,Betty Sue,How,Are,You,Today
<syntaxhighlight lang="rexx">/*REXX program to separate a string of comma-delimited words and echo */
sss='Hello,How,Are,You,Today'
say 'input string='sss
Line 2,312 ⟶ 3,471:
say word(ss,i)dot
End
say 'End-of-list.'</langsyntaxhighlight>
'''output''' is identicalsimilar to REXX version 1.
 
=={{header|Ring}}==
<langsyntaxhighlight lang="ring">
see substr("Hello,How,Are,You,Today", ",", ".")
</syntaxhighlight>
</lang>
 
=={{header|RPL}}==
The program below fully complies with the task requirements, e.g. the input string is converted to a list of words, then the list is converted to a string.
{{works with|Halcyon Calc|4.2.8}}
{| class="wikitable"
! RPL code
! Comment
|-
|
"}" + "{" SWAP + STR→
1 OVER SIZE '''FOR''' j
DUP j GET →STR 2 OVER SIZE 1 - SUB j SWAP PUT
'''NEXT'''
"" 1 3 PICK SIZE '''FOR''' j
OVER j GET +
'''IF''' OVER SIZE j ≠ '''THEN''' "." + '''END'''
'''NEXT''' SWAP DROP
≫ '<span style="color:blue">'''TOKNZ'''</span>' STO
|
<span style="color:blue">'''TOKNZ'''</span> ''<span style="color:grey">( "word,word" → "word.word" )</span> ''
convert string into list (words being between quotes)
loop for each list item
convert it to a string, remove quotes at beginning and end
loop for each list item
add item to output string
if not last item, append "."
clean stack
return output string
|}
 
"Hello,How,Are,You,Today" <span style="color:blue">'''TOKNZ'''</span>
</pre>
'''Output:'''
<span style="color:grey"> 1:</span> "Hello.How.Are.You.Today"
If direct string-to-string conversion is allowed, then this one-liner for HP-48+ will do the job:
≪ 1 OVER SIZE '''FOR''' j '''IF''' DUP j DUP SUB "," == '''THEN''' j "." REPL '''END NEXT''' ≫ '<span style="color:blue">'''TOKNZ'''</span>' STO
 
=={{header|Ruby}}==
<langsyntaxhighlight lang="ruby">puts "Hello,How,Are,You,Today".split(',').join('.')</langsyntaxhighlight>
 
=={{header|Rust}}==
<langsyntaxhighlight lang="rust">fn main() {
let s = "Hello,How,Are,You,Today";
let tokens: Vec<&str> = s.split(",").collect();
println!("{}", tokens.join("."));
}</langsyntaxhighlight>
 
 
=={{header|S-lang}}==
<langsyntaxhighlight Slang="s-lang">variable a = strchop("Hello,How,Are,You,Today", ',', 0);
print(strjoin(a, "."));</langsyntaxhighlight>
 
{{out}}
Line 2,339 ⟶ 3,535:
 
=={{header|Scala}}==
<langsyntaxhighlight lang="scala">println("Hello,How,Are,You,Today" split "," mkString ".")</langsyntaxhighlight>
 
=={{header|Scheme}}==
{{works with|Guile}}
<langsyntaxhighlight lang="scheme">(use-modules (ice-9 regex))
(define s "Hello,How,Are,You,Today")
(define words (map match:substring (list-matches "[^,]+" s)))
Line 2,350 ⟶ 3,546:
(display (list-ref words n))
(if (< n (- (length words) 1))
(display ".")))</langsyntaxhighlight>
 
(with SRFI 13)
<langsyntaxhighlight lang="scheme">(define s "Hello,How,Are,You,Today")
(define words (string-tokenize s (char-set-complement (char-set #\,))))
(define t (string-join words "."))</langsyntaxhighlight>
 
{{works with|Gauche Scheme}}
<langsyntaxhighlight Schemelang="scheme">(print
(string-join
(string-split "Hello,How,Are,You,Today" #\,)
".")) </langsyntaxhighlight>
{{output}}
<pre>
Line 2,368 ⟶ 3,564:
 
=={{header|Seed7}}==
<langsyntaxhighlight lang="seed7">var array string: tokens is 0 times "";
 
tokens := split("Hello,How,Are,You,Today", ",");</langsyntaxhighlight>
 
=={{header|Self}}==
<langsyntaxhighlight lang="self">| s = 'Hello,How,Are,You,Today' |
((s splitOn: ',') joinUsing: '.') printLine.
</syntaxhighlight>
</lang>
 
=={{header|Sidef}}==
<langsyntaxhighlight lang="ruby">'Hello,How,Are,You,Today'.split(',').join('.').say;</langsyntaxhighlight>
=={{header|Simula}}==
<syntaxhighlight lang="simula">BEGIN
 
CLASS TEXTARRAY(N); INTEGER N;
BEGIN
TEXT ARRAY ARR(1:N);
END TEXTARRAY;
 
REF(TEXTARRAY) PROCEDURE SPLIT(T,DELIM); TEXT T; CHARACTER DELIM;
BEGIN
INTEGER N, I, LPOS;
REF(TEXTARRAY) A;
 
N := 1;
T.SETPOS(1);
WHILE T.MORE DO
IF T.GETCHAR = DELIM THEN
N := N+1;
A :- NEW TEXTARRAY(N);
 
I := 0;
LPOS := 1;
T.SETPOS(LPOS);
WHILE T.MORE DO
IF T.GETCHAR = DELIM THEN
BEGIN
I := I+1;
A.ARR(I) :- T.SUB(LPOS,T.POS-LPOS-1);
LPOS := T.POS;
END;
I := I+1;
A.ARR(I) :- T.SUB(LPOS,T.LENGTH-LPOS+1);
SPLIT :- A;
END SPLIT;
 
BEGIN
TEXT S;
REF(TEXTARRAY) TA;
INTEGER I;
 
S :- "HELLO,HOW,ARE,YOU,TODAY";
TA :- SPLIT(S,',');
FOR I := 1 STEP 1 UNTIL TA.N DO
BEGIN
OUTTEXT(TA.ARR(I));
OUTCHAR('.');
END;
OUTIMAGE;
END;
 
END.
</syntaxhighlight>
{{out}}
<pre>HELLO.HOW.ARE.YOU.TODAY.</pre>
 
=={{header|Slate}}==
<langsyntaxhighlight lang="slate">('Hello,How,Are,You,Today' splitWith: $,) join &separator: '.'.</langsyntaxhighlight>
 
=={{header|Slope}}==
<syntaxhighlight lang="slope">(display
(list->string
(string->list
"Hello,How,Are,You,Today"
",")
"."))</syntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Smalltalk}}==
<langsyntaxhighlight lang="smalltalk">|array |
array := 'Hello,How,Are,You,Today' subStrings: $,.
array fold: [:concatenation :string | concatenation, '.', string ]</langsyntaxhighlight>
 
Some implementations also have a ''join:'' convenience method that allows the following shorter solution:
 
<langsyntaxhighlight lang="smalltalk">('Hello,How,Are,You,Today' subStrings: $,) join: '.'</langsyntaxhighlight>
 
The solution displaying a trailing period would be:
 
<langsyntaxhighlight lang="smalltalk">|array |
array := 'Hello,How,Are,You,Today' subStrings: $,.
array inject: '' into: [:concatenation :string | concatenation, string, '.' ]</langsyntaxhighlight>
 
=={{header|SNOBOL4}}==
Line 2,402 ⟶ 3,662:
For this task, it's convenient to define Perl-style split( ) and join( ) functions.
 
<langsyntaxhighlight SNOBOL4lang="snobol4"> define('split(chs,str)i,j,t,w2') :(split_end)
split t = table()
sp1 str pos(0) (break(chs) | rem) $ t<i = i + 1>
Line 2,418 ⟶ 3,678:
* # Test and display
output = join('.',split(',','Hello,How,Are,You,Today'))
end</langsyntaxhighlight>
 
{{out}}
Line 2,426 ⟶ 3,686:
 
=={{header|Standard ML}}==
<langsyntaxhighlight lang="sml">val splitter = String.tokens (fn c => c = #",");
val main = (String.concatWith ".") o splitter;</langsyntaxhighlight>
 
Test:
 
<langsyntaxhighlight lang="sml">- main "Hello,How,Are,You,Today"
val it = "Hello.How.Are.You.Today" : string</langsyntaxhighlight>
 
=={{header|Swift}}==
 
{{works with|Swift|3.x}}
<langsyntaxhighlight lang="swift">let text = "Hello,How,Are,You,Today"
let tokens = text.components(separatedBy: ",") // for single or multi-character separator
print(tokens)
let result = tokens.joined(separator: ".")
print(result)</langsyntaxhighlight>
 
{{works with|Swift|2.x}}
<langsyntaxhighlight lang="swift">let text = "Hello,How,Are,You,Today"
let tokens = text.characters.split(",").map{String($0)} // for single-character separator
print(tokens)
let result = tokens.joinWithSeparator(".")
print(result)</langsyntaxhighlight>
 
{{works with|Swift|1.x}}
<langsyntaxhighlight lang="swift">let text = "Hello,How,Are,You,Today"
let tokens = split(text, { $0 == "," }) // for single-character separator
println(tokens)
let result = ".".join(tokens)
println(result)</langsyntaxhighlight>
 
For multi-character separators:<langsyntaxhighlight lang="swift">import Foundation
 
let text = "Hello,How,Are,You,Today"
let tokens = text.componentsSeparatedByString(",")
print(tokens)</langsyntaxhighlight>
 
=={{header|Tcl}}==
Generating a list form a string by splitting on a comma:
<langsyntaxhighlight lang="tcl">split $string ","</langsyntaxhighlight>
 
Joining the elements of a list by a period:
<langsyntaxhighlight lang="tcl">join $list "."</langsyntaxhighlight>
 
Thus the whole thing would look like this:
<langsyntaxhighlight lang="tcl">puts [join [split "Hello,How,Are,You,Today" ","] "."]</langsyntaxhighlight>
 
If you'd like to retain the list in a variable with the name "words", it would only be marginally more complex:
<langsyntaxhighlight lang="tcl">puts [join [set words [split "Hello,How,Are,You,Today" ","]] "."]</langsyntaxhighlight>
 
(In general, the <tt>regexp</tt> command is also used in Tcl for tokenization of strings, but this example does not need that level of complexity.)
Line 2,481 ⟶ 3,741:
<code>tr</code> knows nothing about arrays, so this solution only changes each comma to a period.
 
<langsyntaxhighlight lang="bash">echo 'Hello,How,Are,You,Today' | tr ',' '.'</langsyntaxhighlight>
 
=={{header|Transd}}==
<syntaxhighlight lang="Scheme">#lang transd
 
MainModule: {
_start: (lambda locals: s "Hello,How,Are,You,Today"
(textout (join (split s ",") "."))
)
}</syntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
</pre>
 
=={{header|TUSCRIPT}}==
<langsyntaxhighlight lang="tuscript">
$$ MODE TUSCRIPT
SET string="Hello,How,Are,You,Today"
SET string=SPLIT (string,":,:")
SET string=JOIN (string,".")
</syntaxhighlight>
</lang>
 
=={{header|TXR}}==
Line 2,496 ⟶ 3,769:
sequences of non-commas.
 
<langsyntaxhighlight lang="txr">@(next :list "Hello,How,Are,You,Today")
@(coll)@{token /[^,]+/}@(end)
@(output)
@(rep)@token.@(last)@token@(end)
@(end)</langsyntaxhighlight>
 
Different approach. Collect tokens, each of
Line 2,506 ⟶ 3,779:
before a comma, or else extends to the end of the line.
 
<langsyntaxhighlight lang="txr">@(next :list "Hello,How,Are,You,Today")
@(coll)@(maybe)@token,@(or)@token@(end)@(end)
@(output)
@(rep)@token.@(last)@token@(end)
@(end)</langsyntaxhighlight>
 
Using TXR Lisp:
 
<langsyntaxhighlight lang="bash">txr -p '(cat-str (split-str "Hello,How,Are,You,Today" ",") ".")'
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|UNIX Shell}}==
{{works with|Bourne Shell}}
<langsyntaxhighlight lang="bash">string='Hello,How,Are,You,Today'
 
(IFS=,
printf '%s.' $string
echo)</langsyntaxhighlight>
 
----
{{works with|Bourne Again SHell}}
{{works with|Public Domain Korn SHell|5.2.14}}
<langsyntaxhighlight lang="bash">#! /bin/bash
stripchar-l ()
#removes the specified character from the left side of the string
Line 2,585 ⟶ 3,858:
join "$( split "$list" "$input_delimiter" )" \
"$contains_a_space" "$output_delimiter";
}</langsyntaxhighlight>
 
''Example''
 
<langsyntaxhighlight lang="bash"> strtokenize "Hello,How,Are,You,Today" "," "."
Hello.How.Are.You.Today </langsyntaxhighlight>
 
----
Line 2,598 ⟶ 3,871:
{{works with|ksh93}}
{{works with|zsh}}
<syntaxhighlight lang="sh">
<lang sh>
string1="Hello,How,Are,You,Today"
elements_quantity=$(echo $string1|tr "," "\n"|wc -l)
Line 2,611 ⟶ 3,884:
 
# or to cheat
echo "Hello,How,Are,You,Today"|tr "," "."</langsyntaxhighlight>
 
=={{header|UnixPipes}}==
{{works with|Bourne Shell}}
<langsyntaxhighlight lang="bash">token() {
(IFS=, read -r A B; echo "$A".; test -n "$B" && (echo "$B" | token))
}
 
echo "Hello,How,Are,You" | token</langsyntaxhighlight>
 
=={{header|Ursa}}==
<langsyntaxhighlight lang="ursa">decl string text
set text "Hello,How,Are,You,Today"
decl string<> tokens
Line 2,629 ⟶ 3,902:
out tokens<i> "." console
end for
out endl console</langsyntaxhighlight>
 
=={{header|Ursala}}==
Line 2,637 ⟶ 3,910:
second order function parameterized by the delimiter. Character
literals are preceded by a backquote.
<langsyntaxhighlight Ursalalang="ursala">#import std
 
token_list = sep`, 'Hello,How,Are,You,Today'
Line 2,643 ⟶ 3,916:
#cast %s
 
main = mat`. token_list</langsyntaxhighlight>
{{out}}
<pre>
Line 2,650 ⟶ 3,923:
 
=={{header|Vala}}==
<syntaxhighlight lang="vala">void main() {
<lang vala>// declare test string
string s = "Hello,How,Are,You,Today";
print(@"$(string.joinv(".", s.split(",")))");
// create array of strings, could use var words instead if desired
}</syntaxhighlight>
string[] words = s.split(",");
{{out}}
// create string by joining array of strings with .
<pre>Hello.How.Are.You.Today</pre>
string joined = string.joinv(".", words);</lang>
 
=={{header|VBA}}==
<langsyntaxhighlight lang="vb">Sub Main()
Dim temp() As String
temp = Tokenize("Hello,How,Are,You,Today", ",")
Line 2,670 ⟶ 3,943:
Private Sub Display(arr() As String, sep As String)
Debug.Print Join(arr, sep)
End Sub</langsyntaxhighlight>
{{Out}}
<pre>Hello How Are You Today</pre>
 
=={{header|VBScript}}==
<syntaxhighlight lang="vb">
<lang vb>
s = "Hello,How,Are,You,Today"
WScript.StdOut.Write Join(Split(s,","),".")
</syntaxhighlight>
</lang>
{{Out}}
<pre>Hello.How.Are.You.Today</pre>
Line 2,688 ⟶ 3,961:
The contents of each text register is then displayed to user, separated by a period.
 
<langsyntaxhighlight lang="vedit">Buf_Switch(Buf_Free)
Ins_Text("Hello,How,Are,You,Today")
 
Line 2,707 ⟶ 3,980:
}
 
Buf_Quit(OK)</langsyntaxhighlight>
 
=={{header|V (Vlang)}}==
<syntaxhighlight lang="go">// Tokenize a string, in V (Vlang)
// Tectonics: v run tokenize-a-string.v
module main
 
// starts here
pub fn main() {
println("Hello,How,Are,You,Today".split(',').join('.'))
}</syntaxhighlight>
{{out}}
<pre>prompt$ v run rosetta/tokenize-a-string.v
Hello.How.Are.You.Today</pre>
 
=={{header|WinBatch}}==
 
<langsyntaxhighlight WinBatchlang="winbatch">text = 'Hello,How,Are,You,Today'
result = ''
BoxOpen('WinBatch Tokenizing Example', '')
Line 2,719 ⟶ 4,005:
next
display(10, 'End of Program', 'Dialog and program will close momentarily.')
BoxShut()</langsyntaxhighlight>
 
{{out}}
Line 2,725 ⟶ 4,011:
 
=={{header|Wortel}}==
<langsyntaxhighlight lang="wortel">@join "." @split "," "Hello,How,Are,You,Today"</langsyntaxhighlight>
Returns
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|Wren}}==
<syntaxhighlight lang="wren">var s = "Hello,How,Are,You,Today"
var t = s.split(",").join(".") + "."
System.print(t)</syntaxhighlight>
 
{{out}}
<pre>
Hello.How.Are.You.Today.
</pre>
 
=={{header|XPath 2.0}}==
<langsyntaxhighlight XPathlang="xpath">string-join(tokenize("Hello,How,Are,You,Today", ","), ".")</langsyntaxhighlight>
 
{{out}}
Line 2,736 ⟶ 4,032:
 
=={{header|XPL0}}==
<langsyntaxhighlight XPL0lang="xpl0">string 0;
include c:\cxpl\codes;
int I, J, K, Char;
Line 2,755 ⟶ 4,051:
for K:= 4 downto 0 do [Text(0, addr Array(K,0)); ChOut(0, ^.)];
CrLf(0);
]</langsyntaxhighlight>
 
The 'addr' operator is used to fetch the 32-bit address of Array rather
Line 2,766 ⟶ 4,062:
 
=={{header|Yabasic}}==
<langsyntaxhighlight Yabasiclang="yabasic">dim s$(1)
 
n = token("Hello. How are you today?", s$(), ".? ")
Line 2,774 ⟶ 4,070:
if i < n print ".";
next
print</langsyntaxhighlight>
 
=={{header|Zig}}==
<syntaxhighlight lang="zig">const std = @import("std");
pub fn main() void {
const string = "Hello,How,Are,You,Today";
var tokens = std.mem.split(u8, string, ",");
std.debug.print("{s}", .{tokens.next().?});
while (tokens.next()) |token| {
std.debug.print(".{s}", .{token});
}
}</syntaxhighlight>
=={{header|zkl}}==
<langsyntaxhighlight lang="zkl">"Hello,How,Are,You,Today".split(",").concat(".").println();
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Zoea}}==
<syntaxhighlight lang="zoea">
program: tokenize_a_string
input: "Hello,How,Are,You,Today"
output: "Hello.How.Are.You.Today"
</syntaxhighlight>
 
=={{header|Zoea Visual}}==
[http://zoea.co.uk/examples/zv-rc/Tokenize_string.png Tokenize a string]
 
=={{header|Zsh}}==
<langsyntaxhighlight lang="zsh">str='Hello,How,Are,You,Today'
tokens=(${(s:,:)str})
print ${(j:.:)tokens}</langsyntaxhighlight>
 
Or, using SH_SPLIT_WORD:
 
<langsyntaxhighlight lang="zsh">str='Hello,How,Are,You,Today'
IFS=, echo ${(j:.:)${=str}}</langsyntaxhighlight>
 
{{omit from|PARI/GP|No real capacity for string manipulation}}
9,482

edits