Tokenize a string with escaping: Difference between revisions
m
→{{header|Wren}}: Changed to Wren S/H
(Added solution for Action!) |
m (→{{header|Wren}}: Changed to Wren S/H) |
||
(14 intermediate revisions by 10 users not shown) | |||
Line 67:
{{trans|Python}}
<
[String] result
V token = ‘’
Line 86:
R result
print(token_with_escape(‘one^|uno||three^^^^|four^^^|^cuatro|’).map(s -> ‘'’s‘'’).join(‘, ’))</
{{out}}
Line 95:
=={{header|8080 Assembly}}==
<
jmp demo
;;; Routine to split a 0-terminated string
Line 168:
pfx: db '> $' ; Prefix to make the output more obvious
nl: db 13,10,'$'
test: db 'one^|uno||three^^^^|four^^^|^cuatro|',0</
{{out}}
Line 180:
=={{header|Action!}}==
<
TYPE Tokens=[
Line 258:
Tokenize("one^|uno||three^^^^|four^^^|^cuatro|",'|,'^,t)
PrintTokens(t)
RETURN</
{{out}}
[https://gitlab.com/amarok8bit/action-rosetta-code/-/raw/master/images/Tokenize_a_string_with_escaping.png Screenshot from Atari 8-bit computer]
Line 270:
=={{header|Ada}}==
<
with Ada.Containers.Indefinite_Vectors;
with Ada.Strings.Unbounded;
Line 326:
begin
Put_Vector (Split ("one^|uno||three^^^^|four^^^|^cuatro|"));
end Tokenize;</
{{out}}
<pre>'one|uno'
Line 335:
=={{header|ALGOL 68}}==
<
# returns s parsed according to delimiter and escape #
PROC parse with escapes = ( STRING s, CHAR delimiter, escape )[]STRING:
Line 372:
[]STRING tokens = parse with escapes( "one^|uno||three^^^^|four^^^|^cuatro|", "|", "^" );
FOR t pos FROM LWB tokens TO UPB tokens DO print( ( "[", tokens[ t pos ], "]", newline ) ) OD
END</
{{out}}
<pre>
Line 385:
{{Trans|JavaScript}}
<
-- tokenize :: String -> Character -> Character -> [String]
Line 475:
g
end if
end cond</
{{Out}}
<pre>1: one|uno
Line 484:
=={{header|Arturo}}==
<
escaping: 0
Line 505:
str: "one^|uno||three^^^^|four^^^|^cuatro|"
tokenize str "|" "^"</
{{out}}
Line 515:
=={{header|AutoHotkey}}==
<
for i,v in x:=StrSplit(StrReplace(StrReplace(StrReplace(s,e e,Chr(0xFFFE)),e d,Chr(0xFFFF)),e),d)
x[i]:=StrReplace(StrReplace(v,Chr(0xFFFE),e),Chr(0xFFFF),d)
return x
}</
Examples:<
for i, v in Tokenize(str, "|", "^")
output .= i " : " v "`n"
MsgBox % output</
{{out}}
<pre>1 : one|uno
Line 532:
=={{header|BBC BASIC}}==
<
PROC_tokenize("one^|uno||three^^^^|four^^^|^cuatro|", "|", "^")
END
Line 560:
NEXT
PRINT
ENDPROC</
{{out}}
<pre> 1 one|uno
Line 567:
4 four^|cuatro
5 </pre>
=={{header|BQN}}==
<syntaxhighlight lang="bqn">str ← "one^|uno||three^^^^|four^^^|^cuatro|"
Split ← ((⊢-˜+`׬)∘=⊔⊢)
SplitE ← {
esc ← <`'^'=𝕩
rem ← »esc
spl ← (¬rem)∧'|'=𝕩
𝕩⊔˜(⊢-(esc∨spl)×1⊸+)+`spl
}
•Show SplitE str</syntaxhighlight>
<syntaxhighlight lang="text">⟨ "one|uno" ⟨⟩ "three^^" "four^|cuatro" ⟩</syntaxhighlight>
=={{header|C}}==
{{works with|C}}
<
#include <stdio.h>
Line 671 ⟶ 685:
return list;
}</
{{Out}}
Line 687 ⟶ 701:
=={{header|C sharp}}==
<
using System.Text;
using System.Collections.Generic;
Line 727 ⟶ 741:
return result;
}
}</
{{out}}
<pre>
Line 737 ⟶ 751:
=={{header|C++}}==
<
#include <stdexcept>
#include <string>
Line 780 ⟶ 794:
return 0;
}</
{{out}}
<pre>one^|uno||three^^^^|four^^^|^cuatro|
["one|uno", "", "three^^", "four^|cuatro", "", ]</pre>
=={{header|CLU}}==
<syntaxhighlight lang="clu">tokenize = iter (sep, esc: char, s: string) yields (string)
escape: bool := false
part: array[char] := array[char]$[]
for c: char in string$chars(s) do
if escape then
escape := false
array[char]$addh(part,c)
elseif c=esc then
escape := true
elseif c=sep then
yield(string$ac2s(part))
part := array[char]$[]
else
array[char]$addh(part,c)
end
end
yield(string$ac2s(part))
end tokenize
start_up = proc ()
po: stream := stream$primary_output()
testcase: string := "one^|uno||three^^^^|four^^^|^quatro|"
for part: string in tokenize('|', '^', testcase) do
stream$putl(po, "\"" || part || "\"")
end
end start_up</syntaxhighlight>
{{out}}
<pre>"one|uno"
""
"three^^"
"four^|quatro"
""</pre>
=={{header|COBOL}}==
<
identification division.
program-id. 'tokenizewithescaping'.
Line 921 ⟶ 970:
.
end program 'tokenizewithescaping'.
</syntaxhighlight>
{{out}}
Line 973 ⟶ 1,022:
=={{header|Common Lisp}}==
<
(flet ((make-string-buffer ()
(make-array 0 :element-type 'character :adjustable t :fill-pointer t)))
Line 995 ⟶ 1,044:
(defun main ()
(dolist (token (split "one^|uno||three^^^^|four^^^|^cuatro|" #\| #\^))
(format t "'~A'~%" token)))</
{{out}}
<pre>'one|uno'
Line 1,005 ⟶ 1,054:
=={{header|D}}==
{{trans|Java}}
<
void main() {
Line 1,039 ⟶ 1,088:
output.put(token.data.idup);
return output.data;
}</
{{out}}
Line 1,048 ⟶ 1,097:
{{trans|C#}}
<
var buffer = []
var escaping = false
for c in this {
if escaping {
buffer.
escaping = false
} else if c == escape {
escaping = true
} else if c == separator {
yield buffer.
} else {
buffer.
}
}
if buffer.
yield buffer.
}
}
func Array.
var str = String.
this.
str
}
let testcase = "one^|uno||three^^^^|four^^^|^cuatro|";
for token in testcase.
print(": \(token)")
}</
{{out}}
Line 1,090 ⟶ 1,139:
=={{header|Elena}}==
{{trans|C#}}
ELENA
<
import extensions'routines;
import system'collections;
Line 1,105 ⟶ 1,154:
bool escaping := false;
self.forEach::(ch)
{
if (escaping)
{
buffer.write
escaping := false
}
Line 1,123 ⟶ 1,172:
else
{
buffer.write
}
};
Line 1,135 ⟶ 1,184:
public program()
{
testcase.tokenize("|", "^").forEach
}</
{{out}}
<pre>
Line 1,146 ⟶ 1,195:
=={{header|F_Sharp|F#}}==
<
open System.Text.RegularExpressions
Line 1,173 ⟶ 1,222:
|> Seq.map (unescape esc)
|> Seq.iter (fun s -> printfn "'%s'" s)
0</
{{out}}
<pre>'one|uno'
Line 1,184 ⟶ 1,233:
This example uses Factor's <code>parser-combinators</code> vocabulary, which is modeled after Haskell's parser combinators. Page <tt>51</tt> of [https://bluishcoder.co.nz/factor-articles.pdf this pdf] contains a useful introduction to this vocabulary.
{{works with|Factor|0.99 2019-10-06}}
<
parser-combinators prettyprint sequences strings ;
Line 1,206 ⟶ 1,255:
"one^|uno||three^^^^|four^^^|^cuatro|"
CHAR: | CHAR: ^ tokenize .</
{{out}}
<pre>
Line 1,213 ⟶ 1,262:
=={{header|Forth}}==
<
variable #src
variable offset
Line 1,229 ⟶ 1,278:
page
cr ." #### start ####" cr tokenize cr ." #### End ####" cr
</syntaxhighlight>
{{output}}
<pre>
Line 1,245 ⟶ 1,294:
First Fortran (1958) offered no facilities for inspecting or manipulating text, until Fortran IV when the <code>A</code> format code was introduced whereby text could be read or written from numeric variables. The difficulties and incompatibilities between different computers were eased with F77 that offered CHARACTER*n variables, though they are not quite strings that have a varying length. F95 introduces the ability to define a compound entity such as a string and F2003 standardised a version of strings whereby with each assignment to such a variable, it would be re-allocated with the required amount of storage. Otherwise, one proceeds with CHARACTER variables and an associated variable containing its current length as with <code>TOKEN</code> and <code>L</code>. However, when passed to subroutines (or functions) as a parameter, a CHARACTER variable is supplied along with a secret additional parameter giving the size of the variable, and this is stringlike, so long as there is no need to change the length. Thus, the length of parameter TEXT to subroutine SPLIT can be found via LEN(TEXT).
The source style is F90 simply for the convenience of having subroutine SPLOT defined within subroutine SPLIT so as to gain access to certain variables. If separate subroutines were to be used, then there would have to be parameters or COMMON variables, or, one could just replicate the code within SPLIT. A further F90 feature involves declaring the size of internal variable <code>TOKEN</code> to be <code>LEN(TEXT)</code>, which is surely the largest it could be. Otherwise, one would have to select some "surely big enough" value.<
CHARACTER*(*) TEXT !To be scanned.
CHARACTER*(1) SEP !The only separator for tokens.
Line 1,288 ⟶ 1,337:
CALL SPLIT("one^|uno||three^^^^|four^^^|^cuatro|","|","^")
END</
The output has the text of the tokens marked >thus<
Line 1,303 ⟶ 1,352:
In this example the DO-loop relentlessly steps through the text, and in general this would not be convenient. Normally, token identification proceeds within a much larger context where one would not discard the token immediately after it is isolated, and rather than copying the text hither and thither, one might prefer to identify it in-place, say with variables <code>L1</code> and <code>L2</code> identifying the start and end positions within the working area. In such a case there would no longer be a need for a variable <code>TOKEN</code> and the angst of deciding on a suitable maximum size. This would also make it easier in any error messages to show context and provenance. However, the bizarre miscegnation of "escape" sequences (especially confusing within text ''literals''), means that the source text does not necessarily constitute the text of the token.
=={{header|FreeBASIC}}==
{{trans|Ring}}
<syntaxhighlight lang="freebasic">Sub tokenize(cadena As String, separador As String, escape As String)
Dim As Integer campo = 1
Dim As Boolean escapando = false
Dim As String char
Print ""; campo; " ";
For i As Integer = 1 To Len(cadena)
char = Mid(cadena, i, 1)
If escapando Then
Print char;
escapando = false
Else
Select Case char
Case separador
Print
campo += 1
Print ""; campo; " ";
Case escape
escapando = true
Case Else
Print char;
End Select
End If
Next i
Print
End Sub
tokenize("one^|uno||three^^^^|four^^^|^cuatro|", "|", "^")
Sleep</syntaxhighlight>
{{out}}
<pre>
Igual que la entrada de Ring.
</pre>
=={{header|Go}}==
<
import (
Line 1,348 ⟶ 1,435:
fmt.Printf("Tokens: %q\n", tokens)
}
}</
{{out}}
<pre>
Line 1,359 ⟶ 1,446:
=== Deterministic Finite Automaton ===
<
splitEsc sep esc = reverse . map reverse . snd . foldl process (0, [[]])
where process (st, r:rs) ch
Line 1,365 ⟶ 1,452:
| st == 0 && ch == sep = (0, []:r:rs)
| st == 1 && sep == esc && ch /= sep = (0, [ch]:r:rs)
| otherwise = (0, (ch:r):rs)</
{{out}}
Line 1,385 ⟶ 1,472:
Constant in space (~ O(k), where k -- is token length), as fast as DFA-based solution.
<
import Conduit
Line 1,395 ⟶ 1,482:
Just ch | notEsc && ch == esc -> go False b
| notEsc && ch == sep -> yield b >> go True []
| otherwise -> go True (ch:b)</
This new conduit could be used in a pipeline as follows:
<
yieldMany "one^|uno||three^^^^|four^^^|^cuatro|"
.| splitEscC '|' '^'
.| mapM_C print</
<pre>λ> main
Line 1,413 ⟶ 1,500:
===Alternative===
This is essentially equivalent to the first (DFA) example, but, though possibly less elegant than the guard idiom, appears to be fractionally faster with larger (eg 180k) test strings.
<
------------------ TOKENIZE WITH ESCAPING ----------------
Line 1,445 ⟶ 1,532:
'|'
'^'
"one^|uno||three^^^^|four^^^|^cuatro|"</
{{Out}}
<pre>"one|uno"
Line 1,456 ⟶ 1,543:
From the python example:
<syntaxhighlight lang="j">
tokenize1=: tokenize =: '^|'&$: :(4 : 0)
'ESC SEP' =. x
Line 1,479 ⟶ 1,566:
RESULT =. RESULT , < TOKEN
)
</syntaxhighlight>
<pre>
tokenize 'one^|uno||three^^^^|four^^^|^cuatro|'
Line 1,490 ⟶ 1,577:
Here's a somewhat more efficient approach (over 100 times faster on a 100k textual example):
<
'^|' tokenize2 y NB. task default escape and separator
:
Line 1,499 ⟶ 1,586:
T=. (#y){. 1,}.S NB. token beginnings
(T<;.1 K)#&.>T<;.1 y
)</
Example use:
<
┌───────┬┬───────┬────────────┬┐
│one|uno││three^^│four^|cuatro││
└───────┴┴───────┴────────────┴┘</
Solution invoking the sequential machine primitive verb.[[http://jsoftware.com/pipermail/programming/2014-December/040658.html|See this thread.]]<
splitTokens =: ((<,'|')&= <;._1 ])@:((<,'|'),])
removeExtra =: (}.^:(1<#)) L:0
tokenize3=: tokenize=: ; each @: (removeExtra @: splitTokens @: charTokens)</
tokenize t
Line 1,520 ⟶ 1,607:
$tokenize t
5</
Relative efficiencies:
<
(%"1 <./) timespacex every 'tokenize1 txt';'tokenize2 txt';'tokenize3 txt'
132.856 1
1 7.73534
8.29568 19.9766</
So tokenize2 is the fastest, while tokenize1 uses the least amount of memory. Also, tokenize1 is the slowest and tokenize3 uses the most memory. (First column is relative time used, second is relative space used, rows correspond to implementations.)
Line 1,536 ⟶ 1,623:
{{trans|Go}}
{{works with|Java|7}}
<
public class TokenizeStringWithEscaping {
Line 1,579 ⟶ 1,666:
return tokens;
}
}</
<pre>[one|uno, , three^^, four^|cuatro, ]</pre>
Line 1,586 ⟶ 1,673:
===ES5===
====Iterative====
<
for (var a=[], t='', i=0, e=s.length; i<e; i+=1) {
var c = s.charAt(i)
Line 1,599 ⟶ 1,686:
var s = 'one^|uno||three^^^^|four^^^|^cuatro|'
document.write(s, '<br>')
for (var a=tokenize(s,'^','|'), i=0; i<a.length; i+=1) document.write(i, ': ', a[i], '<br>')</
{{out}}
<pre>one^|uno||three^^^^|four^^^|^cuatro|
Line 1,610 ⟶ 1,697:
====Functional====
<
'use strict';
Line 1,646 ⟶ 1,733:
.join('\n');
})();</
{{Out}}
<pre>one|uno
Line 1,660 ⟶ 1,747:
{{Trans|Haskell}} (Single fold version)
<
// tokenize :: String -> Character -> Character -> [String]
Line 1,695 ⟶ 1,782:
.map(show)
.join('\n');
}))();</
{{Out}}
Line 1,707 ⟶ 1,794:
Defining the function as a composition of generics from a parser combinator library:
<
'use strict';
Line 2,126 ⟶ 2,213:
// MAIN ---
return main();
})();</
{{Out}}
<pre>[
Line 2,139 ⟶ 2,226:
=={{header|jq}}==
{{works with| jq|1.5}}
<
def tokenize(separator; escape):
Line 2,174 ⟶ 2,261:
| map( if type == "string" then split(escape) else . end)
| flatten
| reform ;</
'''Example:'''
<
{{out}}
<
[
"one|uno",
Line 2,187 ⟶ 2,274:
"four^|cuatro",
""
]</
=={{header|Julia}}==
Line 2,193 ⟶ 2,280:
{{trans|Kotlin}}
<
SPE = "\ufffe"
SPF = "\uffff"
Line 2,205 ⟶ 2,292:
end
@show tokenize2("one^|uno||three^^^^|four^^^|^cuatro|", '|', '^')</
{{out}}
Line 2,211 ⟶ 2,298:
=={{header|Kotlin}}==
<
const val SPE = "\ufffe" // unused unicode char in Specials block
Line 2,231 ⟶ 2,318:
val items = tokenize(str, sep, esc)
for (item in items) println(if (item.isEmpty()) "(empty)" else item)
}</
{{out}}
Line 2,243 ⟶ 2,330:
=={{header|Lingo}}==
<
on tokenize (str, sep, esc)
Line 2,285 ⟶ 2,372:
end repeat
return str
end</
<
sep = "|"
esc = "^"
put tokenize(str, sep, esc)
-- ["one|uno", "", "three^^", "four^|cuatro", ""]</
=={{header|Lua}}==
<
local strList, word, escaped, ch = {}, "", false
for pos = 1, #str do
Line 2,326 ⟶ 2,413:
for k, v in pairs(tokenise(testStr, testSep, testEsc)) do
print(k, v)
end</
{{out}}
<pre>1 one|uno
Line 2,335 ⟶ 2,422:
=={{header|Mathematica}} / {{header|Wolfram Language}}==
<
Tokenize[str_String, escape_String : "^", sep_String : "|"] :=
Module[{results = {}, token = "", state = 0, a},
Line 2,365 ⟶ 2,452:
results
]
Tokenize["one^|uno||three^^^^|four^^^|^cuatro|"]</
{{out}}
<pre>{"one|uno", "", "three^^", "four^|cuatro", ""}</pre>
=={{header|Nim}}==
<
proc tokenize(s: Stream, sep: static[char] = '|', esc: static[char] = '^'): seq[string] =
Line 2,388 ⟶ 2,475:
for i, s in tokenize(newStringStream "one^|uno||three^^^^|four^^^|^cuatro|"):
echo i, ":", s
</syntaxhighlight>
{{out}}
<pre>0:one|uno
Line 2,399 ⟶ 2,486:
=={{header|OCaml}}==
<
let len = String.length s in
let buf = Buffer.create 16 in
Line 2,416 ⟶ 2,503:
end
in
loop 0</
Example:
<
val res : string list = ["one|uno"; ""; "three^^"; "four^|cuatro"; ""]</
=={{header|Perl}}==
Line 2,427 ⟶ 2,514:
The built-in <code>split</code> function can be used with a regex that matches the delimiter ''(although [http://perldoc.perl.org/perlre.html#Special-Backtracking-Control-Verbs advanced backtracking control verbs] are needed to skip escaped delimiters)'':
<
my ($string, $sep, $esc) = (shift, quotemeta shift, quotemeta shift);
my @fields = split /$esc . (*SKIP)(*FAIL) | $sep/sx, $string, -1;
return map { s/$esc(.)/$1/gsr } @fields;
}</
A more traditional approach is to parse the input string step by step ''(using a repeatedly-matching regex of the form [http://perldoc.perl.org/perlretut.html#Global-matching <code>/\G.../g</code>])'', and throw away the separators ''(which can be done implicitly using [http://perldoc.perl.org/perlre.html#%28?%3C=pattern%29-\K \K])'':
<
In both cases, stripping the escape characters happens as a separate step.
Line 2,442 ⟶ 2,529:
Testing:
<
{{out}}
Line 2,454 ⟶ 2,541:
=={{header|Phix}}==
<!--<
<span style="color: #008080;">function</span> <span style="color: #000000;">tokenize</span><span style="color: #0000FF;">(</span><span style="color: #004080;">string</span> <span style="color: #000000;">s</span><span style="color: #0000FF;">,</span> <span style="color: #004080;">integer</span> <span style="color: #000000;">sep</span><span style="color: #0000FF;">,</span> <span style="color: #004080;">integer</span> <span style="color: #000000;">esc</span><span style="color: #0000FF;">)</span>
<span style="color: #004080;">sequence</span> <span style="color: #000000;">ret</span> <span style="color: #0000FF;">=</span> <span style="color: #0000FF;">{}</span>
Line 2,481 ⟶ 2,568:
<span style="color: #0000FF;">?</span><span style="color: #000000;">tokenize</span><span style="color: #0000FF;">(</span><span style="color: #008000;">"one^|uno||three^^^^|four^^^|^cuatro|"</span><span style="color: #0000FF;">,</span><span style="color: #008000;">'|'</span><span style="color: #0000FF;">,</span><span style="color: #008000;">'^'</span><span style="color: #0000FF;">)</span>
<!--</
{{Out}}
<pre>
Line 2,488 ⟶ 2,575:
=={{header|PicoLisp}}==
<
(split
(make
Line 2,497 ⟶ 2,584:
((= C Sep) (link 0))
(T (link C)) ) ) ) )
0 ) )</
Test:
<
(prinl I ": " S) )</
Output:
<pre>1: one|uno
Line 2,509 ⟶ 2,596:
=={{header|PowerShell}}==
<syntaxhighlight lang="powershell">
function Split-String ([string]$String, [char]$Separator, [char]$Escape)
{
Line 2,539 ⟶ 2,626:
if ($String[-1] -eq $Separator) {[String]::Empty}
}
</syntaxhighlight>
<syntaxhighlight lang="powershell">
Split-String "one^|uno||three^^^^|four^^^|^cuatro|" -Separator "|" -Escape "^" | ForEach-Object `
-Begin {$n = 0} `
-Process {$n+= 1; "{0}: {1}" -f $n, $_}
</syntaxhighlight>
{{Out}}
<pre>
Line 2,556 ⟶ 2,643:
=={{header|Python}}==
===Procedural===
<
'''
Issue python -m doctest thisfile.py to run the doctests.
Line 2,579 ⟶ 2,666:
state = 0
result.append(token)
return result</
===Functional===
{{Works with|Python|3}}
<
from functools import reduce
Line 2,624 ⟶ 2,711:
# MAIN ---
if __name__ == '__main__':
main()</
{{Out}}
<pre>['one|uno', '', 'three^^', 'four^|cuatro', '']</pre>
===Regex-based===
====Using <code>Scanner</code>====
The python <code>re</code> library has a handy class <code>Scanner</code> which is intended precisely for this use-case.
It takes a list of pairs '''regex, action''' and whenever it encounters '''regex''' in the input, it executes '''action'''.
This allows us to solve this task very efficiently with minimum effort, the hardest part being the correct definition of the regular expressions.
The following code also illustrates an important feature of Python ‒ nested functions with closures.
Owing to this feature, the inner functions, such as <code>start_new_token</code>, are able to access the local variable <code>tokens</code> of their enclosing function <code>tokenize</code>.
For the inner function, the name <code>tokens</code> is ''nonlocal'', and is in the ''enclosing scope'' of the inner function (as opposed to the parameters <code>scanner</code> and <code>substring</code>, which are in the local scope).
<syntaxhighlight lang="python">import re
STRING = 'one^|uno||three^^^^|four^^^|^cuatro|'
def tokenize(string=STRING, escape='^', separator='|'):
escape, separator = map(re.escape, (escape, separator))
tokens = ['']
def start_new_token(scanner, substring):
tokens.append('')
def add_escaped_char(scanner, substring):
char = substring[1]
tokens[-1] += char
def add_substring(scanner, substring):
tokens[-1] += substring
re.Scanner([
# an escape followed by a character produces that character
(fr'{escape}.', add_escaped_char),
# when encountering a separator not preceded by an escape,
# start a new token
(fr'{separator}', start_new_token),
# a sequence of regular characters (i.e. not escape or separator)
# is just appended to the token
(fr'[^{escape}{separator}]+', add_substring),
]).scan(string)
return tokens
if __name__ == '__main__':
print(list(tokenize()))</syntaxhighlight>
Output is the same as in the functional Python version above.
====Simpler version with preprocessing====
This version does not require any extra state, such as the <code>token</code> list in the Scanner-based version above.
It first preprocesses the input, since Python does not support variable-length lookbehind assertions.
Then it works only with the primitive regex operations <code>re.findall</code> and <code>re.sub</code>.
Note that the regex used here is compiled with the <code>re.VERBOSE</code> flag.
This allows us to write the regex on several lines (since unescaped whitespace is ignored in this mode), and use comments inside the regex (starting with <code>#</code>).
<syntaxhighlight lang="python">import re
STRING = 'one^|uno||three^^^^|four^^^|^cuatro|'
def tokenize(string=STRING, escape='^', separator='|'):
re_escape, re_separator = map(re.escape, (escape, separator))
# token regex
regex = re.compile(fr'''
# lookbehind: a token must be preceded by a separator
# (note that `(?<=^|{re_separator})` doesn't work in Python)
(?<={re_separator})
# a token consists either of an escape sequence,
# or a regular (non-escape, non-separator) character,
# repeated arbitrarily many times (even zero)
(?:{re_escape}.|[^{re_escape}{re_separator}])*
''',
flags=re.VERBOSE
)
# since each token must start with a separator,
# we must add an extra separator at the beginning of input
preprocessed_string = separator + string
for almost_token in regex.findall(preprocessed_string):
# now get rid of escape characters: '^^' -> '^' etc.
token = re.sub(fr'{re_escape}(.)', r'\1', almost_token)
yield token
if __name__ == '__main__':
print(list(tokenize()))</syntaxhighlight>
=={{header|Racket}}==
<
(require racket/match)
Line 2,653 ⟶ 2,836:
(report-input-output "|")
(report-input-output "^")
(report-input-output ".")</
{{out}}
Line 2,674 ⟶ 2,857:
(formerly Perl 6)
<syntaxhighlight lang="raku"
return $string.match(/([ <!before $sep | $esc> . | $esc . ]*)+ % $sep/)\
.[0].map(*.subst: /$esc )> ./, '', :g);
}
say "'$_'" for tokenize 'one^|uno||three^^^^|four^^^|^cuatro|', sep => '|', esc => '^';</
{{out}}
Line 2,699 ⟶ 2,882:
=={{header|REXX}}==
===IF/THEN logic===
<
str = 'one^|uno||three^^^^|four^^^|^cuatro|' /*the character string to be tokenized.*/
esc = '^' /* " escape character to be used. */
Line 2,716 ⟶ 2,899:
exit /*stick a fork in it, we're all done. */
/*──────────────────────────────────────────────────────────────────────────────────────*/
show: say '[length'right(length(out),4)"]" out; out=; return</
'''output'''
<pre>
Line 2,728 ⟶ 2,911:
===SELECT logic===
This REXX version also shows a scale in the output.
<
str = 'one^|uno||three^^^^|four^^^|^cuatro|' /*the character string to be tokenized.*/
esc = '^' /* " escape character to be used. */
Line 2,752 ⟶ 2,935:
exit /*stick a fork in it, we're all done. */
/*──────────────────────────────────────────────────────────────────────────────────────*/
show: say '[length'right(length($),4)"]" $; $=; return</
'''output'''
<pre>
Line 2,768 ⟶ 2,951:
=={{header|Ring}}==
<
tokenize("one^|uno||three^^^^|four^^^|^cuatro|", "|", "^")
Line 2,794 ⟶ 2,977:
next
see nl
</syntaxhighlight>
Output:
<pre>
Line 2,810 ⟶ 2,993:
{{trans|Perl}}
<
def tokenize(string, sep, esc)
sep = Regexp.escape(sep)
Line 2,821 ⟶ 3,004:
p tokenize('one^|uno||three^^^^|four^^^|^cuatro|', '|', '^')
</syntaxhighlight>
=={{header|Rust}}==
<
const ESCAPE: char = '^';
const STRING: &str = "one^|uno||three^^^^|four^^^|^cuatro|";
Line 2,852 ⟶ 3,035:
fn main() {
println!("{:#?}", tokenize(STRING));
}</
{{out}}
<pre>
Line 2,867 ⟶ 3,050:
===Old fashioned Imperative===
Imperative with removed (ugly) mutable variables.
{{Trans|Kotlin}}<
val (markerSpE,markerSpF) = ("\ufffe" , "\uffff")
Line 2,881 ⟶ 3,064:
tokenize(str, "|", "^").foreach(it => println(if (it.isEmpty) "<empty token>" else it))
}</
===Idiomatic===
====Functional with Tail recursion====
<
object TokenizeStringWithEscaping1 extends App {
Line 2,916 ⟶ 3,099:
println(
f"[length:${it.length}%3d] ${if (it.isEmpty) "<empty token>" else it}"))
}</
{{Out}}See it in running in your browser by [https://scalafiddle.io/sf/EsIjPQg/0 ScalaFiddle (JavaScript)] or by [https://scastie.scala-lang.org/O3DgMmuOSCS5DD6zQXK7MA Scastie (JVM)].
Line 2,922 ⟶ 3,105:
=={{header|Sidef}}==
{{trans|Perl}}
<
var fields = string.split(
Regex(esc.escape + '.(*SKIP)(*FAIL)|' + sep.escape, 's'), -1
Line 2,931 ⟶ 3,114:
tokenize("one^|uno||three^^^^|four^^^|^cuatro|", '|', '^').each { |str|
say str.dump
}</
{{out}}
<pre>
Line 2,942 ⟶ 3,125:
=={{header|Simula}}==
<
SIMSET
BEGIN
Line 3,006 ⟶ 3,189:
END.
</syntaxhighlight>
{{out}}
<pre>
Line 3,015 ⟶ 3,198:
PART4: 'FOUR^|CUATRO'
PART5: ''
</pre>
=={{header|SNOBOL4}}==
{{works with|SNOBOL4, SPITBOL for Linux}}
<syntaxhighlight lang="snobol4">
* Program: tokenize_with_escape.sbl
* To run: sbl tokenize_with_escape.sbl
* Description: Tokenize a string with escaping
* Comment: Tested using the Spitbol for Linux version of SNOBOL4
lf = substr(&alphabet,11,1) ;* New line or line feed
* Function tokenize will break parts out of a string, which are
* separated by c, which defaults to a comma, into
* an array. Parameter kp=1 to keep null parts, which is the default,
* and 0 to discard.
define('tokenize(s,c,kp)tokenizepat,part,t,i,j')
:(tokenize_end)
tokenize
c = (ident(c) ',', substr(c,1,1)) :f(freturn)
kp = (ident(kp) 1, eq(kp,0) 0, 1) :f(freturn)
t = table()
tokenizepat = breakx(c) . part c | (len(1) rem) . part
s ? eq(kp,1) rtab(1) c = s c
tokenize1
s ? tokenizepat = "" :f(tokenize2)
t[i = eq(kp,0) differ(part) i + 1] = part
t[i = eq(kp,1) i + 1] = part
:(tokenize1)
tokenize2
tokenize = array(i) :f(errr)
j = 0
tokenize3 tokenize[j = lt(j,i) j + 1] = t[j] :s(tokenize3)
:(return)
tokenize_end
* Function tokcan will a normalize a string by applying separator and escape
* rules to string ts. Parameter sep is the separator, while esc is the escape
* character. Parameter tesc is the new separator character to substitute for
* parameter sep. It defaults to a comma, ",".
define('tokcan(ts,sep,esc,tesc)tpat,part1,part2,notany') :(tokcan_end)
tokcan
tesc = (ident(tesc) ',', substr(tesc,1,1))
tpat = (breakx(sep esc) . part1
+ (sep | esc sep | esc esc | (esc len(1) . notany)) . part2
+ )
+ | (len(1) rem) . part1
tokcan1
ts ? tpat = :f(tokcan2)
part2 = (leq(part2,sep) tesc
+ ,leq(part2,esc sep) sep
+ ,leq(part2,esc esc) esc
+ ,differ(notany) leq(part2,esc notany) notany
+ )
tokcan = (ident(tokcan) "", tokcan) part1 part2
:(tokcan1)
tokcan2
:(return)
tokcan_end
test_string = "one^|uno||three^^^^|four^^^|^cuatro|"
sep = "|"
esc = "^"
hline = tokcan(test_string,sep,esc) :f(err)
output = " Input: " test_string lf
output = "Output1: " hline lf
output = "Output2: "
tokenized = tokenize(hline,",")
p1 output = "'" tokenized[z = z + 1] "'" :s(p1)
END
</syntaxhighlight>
{{out}}
<pre>
Input: one^|uno||three^^^^|four^^^|^cuatro|
Output1: one|uno,,three^^,four^|cuatro,
Output2:
'one|uno'
''
'three^^'
'four^|cuatro'
''
</pre>
Line 3,021 ⟶ 3,297:
{{trans|Rust}}
<
func tokenize(separator: Character, escape: Character) -> [String] {
var token = ""
Line 3,047 ⟶ 3,323:
}
print("one^|uno||three^^^^|four^^^|^cuatro|".tokenize(separator: "|", escape: "^"))</
{{out}}
Line 3,055 ⟶ 3,331:
=={{header|Tcl}}==
Putting a coroutine in a TclOO object following the "generator pattern" gives a nice structure:
<
constructor {s} {
puts [coroutine Next my Iter $s]
Line 3,088 ⟶ 3,364:
}
puts [tokenize one^|uno||three^^^^|four^^^|^cuatro| | ^]</
{{out}}
Line 3,095 ⟶ 3,371:
=={{header|TMG}}==
Unix TMG:
<
char(esc) *
str: smark
Line 3,109 ⟶ 3,385:
ch: 0;
sep: 0;
esc: 0;</
Input:
Line 3,124 ⟶ 3,400:
=={{header|VBA}}==
{{trans|Phix}}<
Dim ret As New Collection
Dim this As String
Line 3,162 ⟶ 3,438:
Next i
Debug.Print Join(outstring, ", ")
End Sub</
<pre>one|uno, , three^^, four^|cuatro, </pre>
=={{header|V (Vlang)}}==
{{trans|Go}}
<syntaxhighlight lang="ecmascript">fn tokenize_string(s string, sep u8, escape u8) ?[]string {
mut tokens := []string{}
mut runes := []u8{}
mut in_escape := false
for r in s {
if in_escape {
in_escape = false
runes << r
} else if r == escape {
in_escape = true
} else if r == sep {
tokens << runes.bytestr()
runes = runes[..0]
} else {
runes << r
}
}
tokens << runes.bytestr()
if in_escape {
return error("invalid terminal escape")
}
return tokens
}
const sample = "one^|uno||three^^^^|four^^^|^cuatro|"
const separator = `|`
const escape = `^`
fn main() {
println("Input: $sample")
tokens := tokenize_string(sample, separator, escape)?
println("Tokens: $tokens")
}</syntaxhighlight>
{{out}}
<pre>
Input: one^|uno||three^^^^|four^^^|^cuatro|
Tokens: ['one|uno', '', 'three^^', 'four^|cuatro', '']
)
</pre>
=={{header|Wren}}==
{{trans|Kotlin}}
<
var SPF = "\uffff" // ditto
Line 3,180 ⟶ 3,498:
var esc = "^"
var items = tokenize.call(str, sep, esc)
for (item in items) System.print((item == "") ? "(empty)" : item)</
{{out}}
Line 3,193 ⟶ 3,511:
=={{header|zkl}}==
Two simplifying assumptions (since their behavior is undefined): A string ending with an un-escaped escape is an error and 0xff is not an allowed character in the string.
<
sink:=Sink(String);
foreach c in (str){
Line 3,203 ⟶ 3,521:
}
sink.close().split("\xff");
}</
Or, if you prefer brevity:
<
sink:=Sink(String);
foreach c in (str){ sink.write( (c==esc and __cWalker.next()) or (c==sep and "\xff") or c ) }
sink.close().split("\xff");
}</
<
{{out}}
<pre>L("one|uno","","three^^","four^|cuatro","")</pre>
|