Compiler/lexical analyzer: Difference between revisions

m
m (Reverted edits by Jwells1213 (talk) to last revision by Petelomax)
m (→‎{{header|Wren}}: Minor tidy)
 
(4 intermediate revisions by 2 users not shown)
Line 158:
For example, the following two program fragments are equivalent, and should produce the same token stream except for the line and column positions:
 
* <langsyntaxhighlight lang="c">if ( p /* meaning n is prime */ ) {
print ( n , " " ) ;
count = count + 1 ; /* number of primes found so far */
}</langsyntaxhighlight>
* <langsyntaxhighlight lang="c">if(p){print(n," ");count=count+1;}</langsyntaxhighlight>
 
;Complete list of token names
Line 237:
| style="vertical-align:top" |
Test Case 1:
<langsyntaxhighlight lang="c">/*
Hello world
*/
print("Hello, World!\n");</langsyntaxhighlight>
 
| style="vertical-align:top" |
Line 255:
| style="vertical-align:top" |
Test Case 2:
<langsyntaxhighlight lang="c">/*
Show Ident and Integers
*/
phoenix_number = 142857;
print(phoenix_number, "\n");</langsyntaxhighlight>
 
| style="vertical-align:top" |
Line 280:
| style="vertical-align:top" |
Test Case 3:
<langsyntaxhighlight lang="c">/*
All lexical tokens - not syntactically correct, but that will
have to wait until syntax analysis
Line 301:
/* character literal */ '\n'
/* character literal */ '\\'
/* character literal */ ' '</langsyntaxhighlight>
 
| style="vertical-align:top" |
Line 344:
| style="vertical-align:top" |
Test Case 4:
<langsyntaxhighlight lang="c">/*** test printing, embedded \n and comments with lots of '*' ***/
print(42);
print("\nHello World\nGood Bye\nok\n");
print("Print a slash n - \\n.\n");</langsyntaxhighlight>
 
| style="vertical-align:top" |
Line 388:
 
=={{header|Ada}}==
<langsyntaxhighlight lang="ada">with Ada.Text_IO, Ada.Streams.Stream_IO, Ada.Strings.Unbounded, Ada.Command_Line,
Ada.Exceptions;
use Ada.Strings, Ada.Strings.Unbounded, Ada.Streams, Ada.Exceptions;
Line 648:
when error : others => IO.Put_Line("Error: " & Exception_Message(error));
end Main;
</syntaxhighlight>
</lang>
{{out}} Test case 3:
<pre>
Line 686:
23 1 END_OF_INPUT
</pre>
 
=={{header|ALGOL 68}}==
This is a simple ''token in, line out'' program. It doesn't keep an internal representation of tokens or anything like that, since that's not needed at all.
 
As an addition, it emits a diagnostic if integer literals are too big.
<syntaxhighlight lang="algol68">BEGIN
# implement C-like getchar, where EOF and EOLn are "characters" (-1 and 10 resp.). #
INT eof = -1, eoln = 10;
BOOL eof flag := FALSE;
STRING buf := "";
INT col := 1;
INT line := 0;
on logical file end (stand in, (REF FILE f)BOOL: eof flag := TRUE);
PROC getchar = INT:
IF eof flag THEN eof
ELIF col = UPB buf THEN col +:= 1; eoln
ELIF col > UPB buf THEN IF line > 0 THEN read(newline) FI;
line +:= 1;
read(buf);
IF eof flag THEN col := 1; eof
ELSE col := 0; getchar
FI
ELSE col +:= 1; ABS buf[col]
FI;
PROC nextchar = INT: IF eof flag THEN eof ELIF col >= UPB buf THEN eoln ELSE ABS buf[col+1] FI;
 
PROC is blank = (INT ch) BOOL: ch = 0 OR ch = 9 OR ch = 10 OR ch = 13 OR ch = ABS " ";
PROC is digit = (INT ch) BOOL: ch >= ABS "0" AND ch <= ABS "9";
PROC is ident start = (INT ch) BOOL: ch >= ABS "A" AND ch <= ABS "Z" OR
ch >= ABS "a" AND ch <= ABS "z" OR
ch = ABS "_";
PROC is ident = (INT ch) BOOL: is ident start(ch) OR is digit(ch);
 
PROC ident or keyword = (INT start char) VOID:
BEGIN
STRING w := REPR start char;
INT start col = col;
WHILE is ident (next char) DO w +:= REPR getchar OD;
IF w = "if" THEN output2("Keyword_if", start col)
ELIF w = "else" THEN output2("Keyword_else", start col)
ELIF w = "while" THEN output2("Keyword_while", start col)
ELIF w = "print" THEN output2("Keyword_print", start col)
ELIF w = "putc" THEN output2("Keyword_putc", start col)
ELSE output2("Identifier " + w, start col)
FI
END;
PROC char = VOID:
BEGIN
INT start col = col;
INT ch := getchar;
IF ch = ABS "'" THEN error("Empty character constant")
ELIF ch = ABS "\" THEN ch := getchar;
IF ch = ABS "n" THEN ch := 10
ELIF ch = ABS "\" THEN SKIP
ELSE error("Unknown escape sequence. \" + REPR ch)
FI
FI;
IF nextchar /= ABS "'" THEN error("Multi-character constant.") FI;
getchar;
output2("Integer " + whole(ch, 0), start col)
END;
PROC string = VOID:
BEGIN
INT start col = col;
STRING s := """";
WHILE INT ch := getchar; ch /= ABS """"
DO
IF ch = eoln THEN error("End-of-line while scanning string literal. Closing string character not found before end-of-line.")
ELIF ch = eof THEN error("End-of-file while scanning string literal. Closing string character not found.")
ELIF ch = ABS "\" THEN s +:= REPR ch; ch := getchar;
IF ch /= ABS "\" AND ch /= ABS "n" THEN error("Unknown escape sequence. \" + REPR ch) FI;
s +:= REPR ch
ELSE s +:= REPR ch
FI
OD;
output2("String " + s + """", start col)
END;
PROC comment = VOID:
BEGIN
WHILE INT ch := getchar; NOT (ch = ABS "*" AND nextchar = ABS "/")
DO IF ch = eof THEN error("End-of-file in comment. Closing comment characters not found.") FI
OD;
getchar
END;
PROC number = (INT first digit) VOID:
BEGIN
INT start col = col;
INT n := first digit - ABS "0";
WHILE is digit (nextchar) DO
INT u := getchar - ABS "0";
IF LENG n * 10 + LENG u > max int THEN error("Integer too big") FI;
n := n * 10 + u
OD;
IF is ident start (nextchar) THEN error("Invalid number. Starts like a number, but ends in non-numeric characters.") FI;
output2("Integer " + whole(n, 0), start col)
END;
 
PROC output = (STRING s) VOID: output2(s, col);
PROC output2 = (STRING s, INT col) VOID: print((whole(line,-8), whole(col,-8), " ", s, newline));
 
PROC if follows = (CHAR second, STRING longer, shorter) VOID:
IF nextchar = ABS second
THEN output(longer); getchar
ELSE output(shorter)
FI;
PROC error = (STRING s)VOID: (put(stand error, ("At ", whole(line,0), ":", whole(col,0), " ", s, new line)); stop);
PROC unrecognized = (INT char) VOID: error("Unrecognized character " + REPR char);
PROC double char = (INT first, STRING op) VOID:
IF nextchar /= first THEN unrecognized(first)
ELSE output2(op, col-1); getchar
FI;
 
WHILE INT ch := getchar; ch /= eof
DO
IF is blank(ch) THEN SKIP
ELIF ch = ABS "(" THEN output("LeftParen")
ELIF ch = ABS ")" THEN output("RightParen")
ELIF ch = ABS "{" THEN output("LeftBrace")
ELIF ch = ABS "}" THEN output("RightBrace")
ELIF ch = ABS ";" THEN output("Semicolon")
ELIF ch = ABS "," THEN output("Comma")
ELIF ch = ABS "*" THEN output("Op_multiply")
ELIF ch = ABS "/" THEN IF next char = ABS "*" THEN comment
ELSE output("Op_divide")
FI
ELIF ch = ABS "%" THEN output("Op_mod")
ELIF ch = ABS "+" THEN output("Op_add")
ELIF ch = ABS "-" THEN output("Op_subtract")
ELIF ch = ABS "<" THEN if follows("=", "Op_lessequal", "Op_less")
ELIF ch = ABS ">" THEN if follows("=", "Op_greaterequal", "Op_greater")
ELIF ch = ABS "=" THEN if follows("=", "Op_equal", "Op_assign")
ELIF ch = ABS "!" THEN if follows("=", "Op_notequal", "Op_not")
ELIF ch = ABS "&" THEN double char(ch, "Op_and")
ELIF ch = ABS "|" THEN double char(ch, "Op_or")
ELIF is ident start (ch) THEN ident or keyword (ch)
ELIF ch = ABS """" THEN string
ELIF ch = ABS "'" THEN char
ELIF is digit(ch) THEN number(ch)
ELSE unrecognized(ch)
FI
OD;
output("End_Of_Input")
END</syntaxhighlight>
 
=={{header|ALGOL W}}==
<langsyntaxhighlight lang="algolw">begin
%lexical analyser %
% Algol W strings are limited to 256 characters in length so we limit source lines %
Line 981 ⟶ 1,124:
while nextToken not = tEnd_of_input do writeToken;
writeToken
end.</langsyntaxhighlight>
{{out}} Test case 3:
<pre>
Line 1,026 ⟶ 1,169:
(One point of note: the C "EOF" pseudo-character is detected in the following code by looking for a negative number. That EOF has to be negative and the other characters non-negative is implied by the ISO C standard.)
 
<langsyntaxhighlight ATSlang="ats">(********************************************************************)
(* Usage: lex [INPUTFILE [OUTPUTFILE]]
If INPUTFILE or OUTPUTFILE is "-" or missing, then standard input
Line 1,898 ⟶ 2,041:
end
 
(********************************************************************)</langsyntaxhighlight>
 
{{out}}
Line 1,939 ⟶ 2,082:
=={{header|AWK}}==
Tested with gawk 4.1.1 and mawk 1.3.4.
<syntaxhighlight lang="awk">
<lang AWK>
BEGIN {
all_syms["tk_EOI" ] = "End_of_input"
Line 2,145 ⟶ 2,288:
}
}
</syntaxhighlight>
</lang>
{{out|case=count}}
<b>
Line 2,182 ⟶ 2,325:
=={{header|C}}==
Tested with gcc 4.81 and later, compiles warning free with -Wpedantic -pedantic -Wall -Wextra
<langsyntaxhighlight Clang="c">#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
Line 2,414 ⟶ 2,557:
run();
return 0;
}</langsyntaxhighlight>
 
{{out|case=test case 3}}
Line 2,458 ⟶ 2,601:
=={{header|C sharp|C#}}==
Requires C#6.0 because of the use of null coalescing operators.
<langsyntaxhighlight lang="csharp">
using System;
using System.IO;
Line 2,808 ⟶ 2,951:
}
}
</syntaxhighlight>
</lang>
 
{{out|case=test case 3}}
Line 2,852 ⟶ 2,995:
=={{header|C++}}==
Tested with GCC 9.3.0 (g++ -std=c++17)
<langsyntaxhighlight lang="cpp">#include <charconv> // std::from_chars
#include <fstream> // file_to_string, string_to_file
#include <functional> // std::invoke
Line 3,237 ⟶ 3,380:
});
}
</syntaxhighlight>
</lang>
 
{{out|case=test case 3}}
Line 3,282 ⟶ 3,425:
Using GnuCOBOL 2. By Steve Williams (with one change to get around a Rosetta Code code highlighter problem).
 
<langsyntaxhighlight lang="cobol"> >>SOURCE FORMAT IS FREE
*> this code is dedicated to the public domain
*> (GnuCOBOL) 2.3-dev.0
Line 3,688 ⟶ 3,831:
end-if
.
end program lexer.</langsyntaxhighlight>
 
{{out|case=test case 3}}
Line 3,730 ⟶ 3,873:
Lisp has a built-in reader and you can customize the reader by modifying its readtable. I'm also using the Gray stream, which is an almost standard feature of Common Lisp, for counting lines and columns.
 
<langsyntaxhighlight lang="lisp">(defpackage #:lexical-analyzer
(:use #:cl #:sb-gray)
(:export #:main))
Line 3,943 ⟶ 4,086:
 
(defun main ()
(lex *standard-input*))</langsyntaxhighlight>
{{out|case=test case 3}}
<pre> 5 16 KEYWORD-PRINT
Line 3,984 ⟶ 4,127:
{{trans|ATS}}
 
<langsyntaxhighlight Elixirlang="elixir">#!/bin/env elixir
# -*- elixir -*-
 
Line 4,452 ⟶ 4,595:
end ## module Lex
 
Lex.main(System.argv)</langsyntaxhighlight>
 
{{out}}
Line 4,498 ⟶ 4,641:
 
 
<langsyntaxhighlight lang="lisp">#!/usr/bin/emacs --script
;;
;; The Rosetta Code lexical analyzer in GNU Emacs Lisp.
Line 4,916 ⟶ 5,059:
(scan-text t))
 
(main)</langsyntaxhighlight>
 
 
Line 4,962 ⟶ 5,105:
 
 
<langsyntaxhighlight lang="erlang">#!/bin/env escript
%%%-------------------------------------------------------------------
 
Line 5,467 ⟶ 5,610:
%%% erlang-indent-level: 3
%%% end:
%%%-------------------------------------------------------------------</langsyntaxhighlight>
 
 
Line 5,509 ⟶ 5,652:
=={{header|Euphoria}}==
Tested with Euphoria 4.05.
<langsyntaxhighlight lang="euphoria">include std/io.e
include std/map.e
include std/types.e
Line 5,734 ⟶ 5,877:
end procedure
 
main(command_line())</langsyntaxhighlight>
 
{{out|case=test case 3}}
Line 5,778 ⟶ 5,921:
=={{header|Flex}}==
Tested with Flex 2.5.4.
<syntaxhighlight lang="c">%{
<lang C>%{
#include <stdio.h>
#include <stdlib.h>
Line 5,951 ⟶ 6,094:
} while (tok != tk_EOI);
return 0;
}</langsyntaxhighlight>
 
{{out|case=test case 3}}
Line 5,995 ⟶ 6,138:
=={{header|Forth}}==
Tested with Gforth 0.7.3.
<langsyntaxhighlight Forthlang="forth">CREATE BUF 0 , \ single-character look-ahead buffer
CREATE COLUMN# 0 ,
CREATE LINE# 1 ,
Line 6,117 ⟶ 6,260:
THEN THEN ;
: TOKENIZE BEGIN CONSUME AGAIN ;
TOKENIZE</langsyntaxhighlight>
 
{{out}}
Line 6,131 ⟶ 6,274:
 
The author has placed this Fortran code in the public domain.
<syntaxhighlight lang="fortran">!!!
<lang Fortran>!!!
!!! An implementation of the Rosetta Code lexical analyzer task:
!!! https://rosettacode.org/wiki/Compiler/lexical_analyzer
Line 7,209 ⟶ 7,352:
end subroutine print_usage
end program lex</langsyntaxhighlight>
 
{{out}}
Line 7,250 ⟶ 7,393:
=={{header|FreeBASIC}}==
Tested with FreeBASIC 1.05
<langsyntaxhighlight FreeBASIClang="freebasic">enum Token_type
tk_EOI
tk_Mul
Line 7,536 ⟶ 7,679:
print : print "Hit any to end program"
sleep
system</langsyntaxhighlight>
{{out|case=test case 3}}
<b>
Line 7,577 ⟶ 7,720:
=={{header|Go}}==
{{trans|FreeBASIC}}
<langsyntaxhighlight lang="go">package main
 
import (
Line 7,954 ⟶ 8,097:
initLex()
process()
}</langsyntaxhighlight>
 
{{out}}
Line 7,997 ⟶ 8,140:
=={{header|Haskell}}==
Tested with GHC 8.0.2
<langsyntaxhighlight lang="haskell">import Control.Applicative hiding (many, some)
import Control.Monad.State.Lazy
import Control.Monad.Trans.Maybe (MaybeT, runMaybeT)
Line 8,301 ⟶ 8,444:
where (Just t, s') = runState (runMaybeT lexer) s
(txt, _, _) = s'
</syntaxhighlight>
</lang>
 
{{out|case=test case 3}}
Line 8,353 ⟶ 8,496:
Global variables are avoided except for some constants that require initialization.
 
<syntaxhighlight lang="icon">#
<lang Icon>#
# The Rosetta Code lexical analyzer in Icon with co-expressions. Based
# upon the ATS implementation.
Line 8,851 ⟶ 8,994:
procedure max(x, y)
return (if x < y then y else x)
end</langsyntaxhighlight>
 
 
Line 8,900 ⟶ 9,043:
Implementation:
 
<langsyntaxhighlight Jlang="j">symbols=:256#0
ch=: {{1 0+x[symbols=: x (a.i.y)} symbols}}
'T0 token' =: 0 ch '%+-!(){};,<>=!|&'
Line 9,020 ⟶ 9,163:
keep=. (tokens~:<,'''')*-.comments+.whitespace+.unknown*a:=values
keep&#each ((1+lines),.columns);<names,.values
}}</langsyntaxhighlight>
 
Test case 3:
 
<syntaxhighlight lang="j">
<lang J>
flex=: {{
'A B'=.y
Line 9,090 ⟶ 9,233:
21 28 Integer 92
22 27 Integer 32
23 1 End_of_input </langsyntaxhighlight>
 
Here, it seems expedient to retain a structured representation of the lexical result. As shown, it's straightforward to produce a "pure" textual result for a hypothetical alternative implementation of the syntax analyzer, but the structured representation will be easier to deal with.
 
=={{header|Java}}==
<langsyntaxhighlight lang="java">
// Translated from python source
 
Line 9,336 ⟶ 9,479:
}
}
</syntaxhighlight>
</lang>
 
=={{header|JavaScript}}==
{{incorrect|Javascript|Please show output. Code is identical to [[Compiler/syntax_analyzer]] task}}
<langsyntaxhighlight lang="javascript">
/*
Token: type, value, line, pos
Line 9,553 ⟶ 9,696:
l.printTokens()
})
</syntaxhighlight>
</lang>
 
=={{header|Julia}}==
<langsyntaxhighlight lang="julia">struct Tokenized
startline::Int
startcol::Int
Line 9,711 ⟶ 9,854:
println(lpad(tok.startline, 3), lpad(tok.startcol, 5), lpad(tok.name, 18), " ", tok.value != nothing ? tok.value : "")
end
</langsyntaxhighlight>{{output}}<pre>
Line Col Name Value
5 16 Keyword_print
Line 9,750 ⟶ 9,893:
=={{header|kotlin}}==
{{trans|Java}}
<langsyntaxhighlight lang="kotlin">// Input: command line argument of file to process or console input. A two or
// three character console input of digits followed by a new line will be
// checked for an integer between zero and twenty-five to select a fixed test
Line 10,423 ⟶ 10,566:
System.exit(1)
} // try
} // main</langsyntaxhighlight>
{{out|case=test case 3: All Symbols}}
<b>
Line 10,471 ⟶ 10,614:
 
The first module is simply a table defining the names of tokens which don't have an associated value.
<langsyntaxhighlight Lualang="lua">-- module token_name (in a file "token_name.lua")
local token_name = {
['*'] = 'Op_multiply',
Line 10,500 ⟶ 10,643:
['putc'] = 'Keyword_putc',
}
return token_name</langsyntaxhighlight>
 
This module exports a function <i>find_token</i>, which attempts to find the next valid token from a specified position in a source line.
<langsyntaxhighlight Lualang="lua">-- module lpeg_token_finder
local M = {} -- only items added to M will be public (via 'return M' at end)
local table, concat = table, table.concat
Line 10,586 ⟶ 10,729:
end
return M</langsyntaxhighlight>
 
The <i>lexer</i> module uses <i>finder.find_token</i> to produce an iterator over the tokens in a source.
<langsyntaxhighlight Lualang="lua">-- module lexer
local M = {} -- only items added to M will publicly available (via 'return M' at end)
local string, io, coroutine, yield = string, io, coroutine, coroutine.yield
Line 10,668 ⟶ 10,811:
-- M._INTERNALS = _ENV
return M
</syntaxhighlight>
</lang>
 
This script uses <i>lexer.tokenize_text</i> to show the token sequence produced from a source text.
 
<langsyntaxhighlight Lualang="lua">lexer = require 'lexer'
format, gsub = string.format, string.gsub
 
Line 10,710 ⟶ 10,853:
-- etc.
end
</syntaxhighlight>
</lang>
 
===Using only standard libraries===
This version replaces the <i>lpeg_token_finder</i> module of the LPeg version with this <i>basic_token_finder</i> module, altering the <i>require</i> expression near the top of the <i>lexer</i> module accordingly. Tested with Lua 5.3.5. (Note that <i>select</i> is a standard function as of Lua 5.2.)
 
<langsyntaxhighlight lang="lua">-- module basic_token_finder
local M = {} -- only items added to M will be public (via 'return M' at end)
local table, string = table, string
Line 10,845 ⟶ 10,988:
 
-- M._ENV = _ENV
return M</langsyntaxhighlight>
 
=={{header|M2000 Interpreter}}==
<syntaxhighlight lang="m2000 interpreter">
<lang M2000 Interpreter>
Module lexical_analyzer {
a$={/*
Line 11,104 ⟶ 11,247:
}
lexical_analyzer
</syntaxhighlight>
</lang>
 
{{out}}
Line 11,149 ⟶ 11,292:
 
 
<langsyntaxhighlight Mercurylang="mercury">% -*- mercury -*-
%
% Compile with maybe something like:
Line 11,879 ⟶ 12,022:
 
:- func eof = int is det.
eof = -1.</langsyntaxhighlight>
 
{{out}}
Line 11,928 ⟶ 12,071:
Tested with Nim v0.19.4. Both examples are tested against all programs in [[Compiler/Sample programs]].
===Using string with regular expressions===
<langsyntaxhighlight lang="nim">
import re, strformat, strutils
 
Line 12,120 ⟶ 12,263:
 
echo input.tokenize.output
</syntaxhighlight>
</lang>
===Using stream with lexer library===
<langsyntaxhighlight lang="nim">
import lexbase, streams
from strutils import Whitespace
Line 12,433 ⟶ 12,576:
echo &"({l.lineNumber},{l.getColNumber l.bufpos + 1}) {l.error}"
main()
</syntaxhighlight>
</lang>
 
===Using nothing but system and strutils===
<langsyntaxhighlight lang="nim">import strutils
 
type
Line 12,656 ⟶ 12,799:
stdout.write('\n')
if token.kind == tokEnd:
break</langsyntaxhighlight>
 
=={{header|ObjectIcon}}==
Line 12,666 ⟶ 12,809:
 
 
<langsyntaxhighlight ObjectIconlang="objecticon"># -*- ObjectIcon -*-
#
# The Rosetta Code lexical analyzer in Object Icon. Based upon the ATS
Line 13,163 ⟶ 13,306:
write!([FileStream.stderr] ||| args)
exit(1)
end</langsyntaxhighlight>
 
 
Line 13,211 ⟶ 13,354:
(Much of the extra complication in the ATS comes from arrays being a linear type (whose "views" need tending), and from values of linear type having to be local to any function using them. This limitation could have been worked around, and arrays more similar to OCaml arrays could have been used, but at a cost in safety and efficiency.)
 
<langsyntaxhighlight OCamllang="ocaml">(*------------------------------------------------------------------*)
(* The Rosetta Code lexical analyzer, in OCaml. Based on the ATS. *)
 
Line 13,738 ⟶ 13,881:
main ()
 
(*------------------------------------------------------------------*)</langsyntaxhighlight>
 
{{out}}
Line 13,781 ⟶ 13,924:
Note: we do not print the line and token source code position for the simplicity.
 
<langsyntaxhighlight lang="scheme">
(import (owl parse))
 
Line 13,905 ⟶ 14,048:
(if (null? (cdr stream))
(print 'End_of_input))))
</syntaxhighlight>
</lang>
 
==== Testing ====
 
Testing function:
<langsyntaxhighlight lang="scheme">
(define (translate source)
(let ((stream (try-parse token-parser (str-iter source) #t)))
Line 13,916 ⟶ 14,059:
(if (null? (force (cdr stream)))
(print 'End_of_input))))
</syntaxhighlight>
</lang>
 
====== Testcase 1 ======
 
<langsyntaxhighlight lang="scheme">
(translate "
/*
Line 13,926 ⟶ 14,069:
*/
print(\"Hello, World!\\\\n\");
")</langsyntaxhighlight>
{{Out}}
<pre>
Line 13,939 ⟶ 14,082:
====== Testcase 2 ======
 
<langsyntaxhighlight lang="scheme">
(translate "
/*
Line 13,946 ⟶ 14,089:
phoenix_number = 142857;
print(phoenix_number, \"\\\\n\");
")</langsyntaxhighlight>
{{Out}}
<pre>
Line 13,965 ⟶ 14,108:
====== Testcase 3 ======
 
<langsyntaxhighlight lang="scheme">
(translate "
/*
Line 13,989 ⟶ 14,132:
/* character literal */ '\\\\'
/* character literal */ ' '
")</langsyntaxhighlight>
{{Out}}
<pre>
Line 14,030 ⟶ 14,173:
====== Testcase 4 ======
 
<langsyntaxhighlight lang="scheme">
(translate "
/*** test printing, embedded \\\\n and comments with lots of '*' ***/
Line 14,037 ⟶ 14,180:
print(\"Print a slash n - \\\\\\\\n.\\\\n\");
")
</syntaxhighlight>
</lang>
{{Out}}
<pre>
Line 14,060 ⟶ 14,203:
=={{header|Perl}}==
 
<langsyntaxhighlight lang="perl">#!/usr/bin/env perl
 
use strict;
Line 14,199 ⟶ 14,342:
($line, $col)
}
}</langsyntaxhighlight>
 
{{out|case=test case 3}}
Line 14,242 ⟶ 14,385:
===Alternate Perl Solution===
Tested on perl v5.26.1
<langsyntaxhighlight Perllang="perl">#!/usr/bin/perl
 
use strict; # lex.pl - source to tokens
Line 14,278 ⟶ 14,421:
1 + $` =~ tr/\n//, 1 + length $` =~ s/.*\n//sr, $^R;
}
printf "%5d %7d %s\n", 1 + tr/\n//, 1, 'End_of_input';</langsyntaxhighlight>
 
=={{header|Phix}}==
Line 14,285 ⟶ 14,428:
form. If required, demo\rosetta\Compiler\extra.e (below) contains some code that achieves the latter.
Code to print the human readable forms is likewise kept separate from any re-usable parts.
<!--<langsyntaxhighlight Phixlang="phix">(phixonline)-->
<span style="color: #000080;font-style:italic;">--
-- demo\rosetta\Compiler\core.e
Line 14,445 ⟶ 14,588:
<span style="color: #008080;">return</span> <span style="color: #000000;">s</span>
<span style="color: #008080;">end</span> <span style="color: #008080;">function</span>
<!--</langsyntaxhighlight>-->
For running under pwa/p2js, we also have a "fake file/io" component:
<!--<langsyntaxhighlight Phixlang="phix">(phixonline)-->
<span style="color: #000080;font-style:italic;">--
-- demo\rosetta\Compiler\js_io.e
Line 14,549 ⟶ 14,692:
<span style="color: #008080;">return</span> <span style="color: #000000;">EOF</span>
<span style="color: #008080;">end</span> <span style="color: #008080;">function</span>
<!--</langsyntaxhighlight>-->
The main lexer is also written to be reusable by later stages.
<!--<langsyntaxhighlight Phixlang="phix">(phixonline)-->
<span style="color: #000080;font-style:italic;">--
-- demo\\rosetta\\Compiler\\lex.e
Line 14,738 ⟶ 14,881:
<span style="color: #008080;">return</span> <span style="color: #000000;">toks</span>
<span style="color: #008080;">end</span> <span style="color: #008080;">function</span>
<!--</langsyntaxhighlight>-->
Optional: if you need human-readable output/input at each (later) stage, so you can use pipes
<!--<langsyntaxhighlight Phixlang="phix">-->
<span style="color: #000080;font-style:italic;">--
-- demo\rosetta\Compiler\extra.e
Line 14,793 ⟶ 14,936:
<span style="color: #008080;">return</span> <span style="color: #0000FF;">{</span><span style="color: #000000;">n_type</span><span style="color: #0000FF;">,</span> <span style="color: #000000;">left</span><span style="color: #0000FF;">,</span> <span style="color: #000000;">right</span><span style="color: #0000FF;">}</span>
<span style="color: #008080;">end</span> <span style="color: #008080;">function</span>
<!--</langsyntaxhighlight>-->
Finally, a simple test driver for the specific task:
<!--<langsyntaxhighlight Phixlang="phix">(phixonline)-->
<span style="color: #000080;font-style:italic;">--
-- demo\rosetta\Compiler\lex.exw
Line 14,823 ⟶ 14,966:
<span style="color: #000080;font-style:italic;">--main(command_line())</span>
<span style="color: #000000;">main</span><span style="color: #0000FF;">({</span><span style="color: #000000;">0</span><span style="color: #0000FF;">,</span><span style="color: #000000;">0</span><span style="color: #0000FF;">,</span><span style="color: #008000;">"test4.c"</span><span style="color: #0000FF;">})</span>
<!--</langsyntaxhighlight>-->
{{out}}
<pre>
Line 14,846 ⟶ 14,989:
=={{header|Prolog}}==
 
<langsyntaxhighlight lang="prolog">/*
Test harness for the analyzer, not needed if we are actually using the output.
*/
Line 15,006 ⟶ 15,149:
 
% anything else is an error
tok(_,_,L,P) --> { format(atom(Error), 'Invalid token at line ~d,~d', [L,P]), throw(Error) }.</langsyntaxhighlight>
{{out}}
<pre>
Line 15,047 ⟶ 15,190:
=={{header|Python}}==
Tested with Python 2.7 and 3.x
<langsyntaxhighlight Pythonlang="python">from __future__ import print_function
import sys
 
Line 15,228 ⟶ 15,371:
 
if tok == tk_EOI:
break</langsyntaxhighlight>
 
{{out|case=test case 3}}
Line 15,272 ⟶ 15,415:
=={{header|QB64}}==
Tested with QB64 1.5
<langsyntaxhighlight lang="vb">dim shared source as string, the_ch as string, tok as string, toktyp as string
dim shared line_n as integer, col_n as integer, text_p as integer, err_line as integer, err_col as integer, errors as integer
 
Line 15,512 ⟶ 15,655:
end
end sub
</syntaxhighlight>
</lang>
{{out|case=test case 3}}
<b>
Line 15,552 ⟶ 15,695:
 
=={{header|Racket}}==
<langsyntaxhighlight lang="racket">
#lang racket
(require parser-tools/lex)
Line 15,708 ⟶ 15,851:
"TEST 5"
(display-tokens (string->tokens test5))
</syntaxhighlight>
</lang>
 
=={{header|Raku}}==
Line 15,718 ⟶ 15,861:
{{works with|Rakudo|2016.08}}
 
<syntaxhighlight lang="raku" perl6line>grammar tiny_C {
rule TOP { ^ <.whitespace>? <tokens> + % <.whitespace> <.whitespace> <eoi> }
 
Line 15,811 ⟶ 15,954:
 
my $tokenizer = tiny_C.parse(@*ARGS[0].IO.slurp);
parse_it( $tokenizer );</langsyntaxhighlight>
 
{{out|case=test case 3}}
Line 15,857 ⟶ 16,000:
 
 
<langsyntaxhighlight lang="ratfor">######################################################################
#
# The Rosetta Code scanner in Ratfor 77.
Line 17,087 ⟶ 17,230:
end
 
######################################################################</langsyntaxhighlight>
 
 
Line 17,193 ⟶ 17,336:
The following code implements a configurable (from a symbol map and keyword map provided as parameters) lexical analyzer.
 
<langsyntaxhighlight lang="scala">
package xyz.hyperreal.rosettacodeCompiler
 
Line 17,454 ⟶ 17,597:
 
}
</syntaxhighlight>
</lang>
 
=={{header|Scheme}}==
 
<langsyntaxhighlight lang="scheme">
(import (scheme base)
(scheme char)
Line 17,655 ⟶ 17,798:
(display-tokens (lexer (cadr (command-line))))
(display "Error: provide program filename\n"))
</syntaxhighlight>
</lang>
 
{{out}}
Line 17,673 ⟶ 17,816:
 
 
<langsyntaxhighlight SMLlang="sml">(*------------------------------------------------------------------*)
(* The Rosetta Code lexical analyzer, in Standard ML. Based on the ATS
and the OCaml. The intended compiler is Mlton or Poly/ML; there is
Line 18,479 ⟶ 18,622:
(* sml-indent-args: 2 *)
(* end: *)
(*------------------------------------------------------------------*)</langsyntaxhighlight>
 
 
Line 18,533 ⟶ 18,676:
{{libheader|Wren-fmt}}
{{libheader|Wren-ioutil}}
<langsyntaxhighlight ecmascriptlang="wren">import "./dynamic" for Enum, Struct, Tuple
import "./str" for Char
import "./fmt" for Fmt
import "./ioutil" for FileUtil
import "os" for Process
 
Line 18,882 ⟶ 19,025:
lineCount = lines.count
initLex.call()
process.call()</langsyntaxhighlight>
 
{{out}}
Line 18,924 ⟶ 19,067:
 
=={{header|Zig}}==
<langsyntaxhighlight lang="zig">
const std = @import("std");
 
Line 19,333 ⟶ 19,476:
return result.items;
}
</syntaxhighlight>
</lang>
9,476

edits