diff --git a/highlighter.d b/highlighter.d index 6bd4482..ccd1673 100644 --- a/highlighter.d +++ b/highlighter.d @@ -15,6 +15,8 @@ void writeSpan(string cssClass, string value) stdout.write(``, value.replace("&", "&").replace("<", "<"), ``); } + +// http://ethanschoonover.com/solarized void highlight(R)(R tokens) { stdout.writeln(q"[ @@ -23,13 +25,14 @@ void highlight(R)(R tokens)
]");
@@ -47,6 +50,8 @@ html { background-color: #fff; color: #222; }
writeSpan("num", t.value);
else if (t.type > TokenType.OPERATORS_BEGIN && t.type < TokenType.OPERATORS_END)
writeSpan("op", t.value);
+ else if (t.type > TokenType.CONSTANTS_BEGIN && t.type < TokenType.CONSTANTS_END)
+ writeSpan("cons", t.value);
else
stdout.write(t.value.replace("<", "<"));
}
diff --git a/main.d b/main.d
index 0e77918..b1d00ec 100644
--- a/main.d
+++ b/main.d
@@ -143,7 +143,7 @@ int main(string[] args)
}
else
{
- writeln(args[1..$].map!(a => File(a).byLine(KeepTerminator.yes).join().byToken())()
+ writeln(args[1..$].map!(a => File(a).byLine(KeepTerminator.yes).join().byToken(a))()
.joiner().count!(a => isLineOfCode(a.type))());
}
return 0;
@@ -153,7 +153,7 @@ int main(string[] args)
{
File f = args.length == 1 ? stdin : File(args[1]);
highlighter.highlight(f.byLine(KeepTerminator.yes).join().byToken(
- IterationStyle.Everything, StringStyle.Source));
+ "", IterationStyle.Everything, TokenStyle.Source));
return 0;
}
diff --git a/std/d/lexer.d b/std/d/lexer.d
index 1ac8019..ebd7ab2 100644
--- a/std/d/lexer.d
+++ b/std/d/lexer.d
@@ -1,71 +1,93 @@
// Written in the D programming language
/**
-* This module contains a range-based lexer for the D programming language.
-*
-* Examples:
-*
-* Generate HTML markup of D code.
-* ---
-* import std.stdio;
-* import std.array;
-* import std.file;
-* import std.d.lexer;
-*
-* void writeSpan(string cssClass, string value)
-* {
-* stdout.write(``, value.replace("&", "&").replace("<", "<"), ``);
-* }
-*
-* void highlight(R)(R tokens)
-* {
-* stdout.writeln(q"[
-*
-*
-*
-*
-*
-* ]");
-*
-* foreach (Token t; tokens)
-* {
-* if (t.type > TokenType.TYPES_BEGIN && t.type < TokenType.TYPES_END)
-* writeSpan("type", t.value);
-* else if (t.type > TokenType.KEYWORDS_BEGIN && t.type < TokenType.KEYWORDS_END)
-* writeSpan("kwrd", t.value);
-* else if (t.type == TokenType.Comment)
-* writeSpan("com", t.value);
-* else if (t.type > TokenType.STRINGS_BEGIN && t.type < TokenType.STRINGS_END)
-* writeSpan("str", t.value);
-* else if (t.type > TokenType.NUMBERS_BEGIN && t.type < TokenType.NUMBERS_END)
-* writeSpan("num", t.value);
-* else if (t.type > TokenType.OPERATORS_BEGIN && t.type < TokenType.OPERATORS_END)
-* writeSpan("op", t.value);
-* else
-* stdout.write(t.value.replace("<", "<"));
-* }
-* stdout.writeln("\n");
-* }
-*
-* void main(string[] args)
-* {
-* args[1].readText().byToken(IterationStyle.Everything, StringStyle.Source).highlight();
-* }
-* ---
-*
-* Copyright: Brian Schott 2013
-* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt Boost, License 1.0)
-* Authors: Brian Schott
-* Source: $(PHOBOSSRC std/d/_lexer.d)
-*/
+ * This module contains a range-based _lexer for the D programming language.
+ *
+ * Examples:
+ *
+ * Generate HTML markup of D code.
+ * ---
+ * import std.stdio;
+ * import std.array;
+ * import std.file;
+ * import std.d.lexer;
+ *
+ * void writeSpan(string cssClass, string value)
+ * {
+ * stdout.write(``, value.replace("&", "&").replace("<", "<"), ``);
+ * }
+ *
+ * // http://ethanschoonover.com/solarized
+ * void highlight(R)(R tokens)
+ * {
+ * stdout.writeln(q"[
+ *
+ *
+ *
+ *
+ *
+ * ]");
+ *
+ * foreach (Token t; tokens)
+ * {
+ * if (t.type > TokenType.TYPES_BEGIN && t.type < TokenType.TYPES_END)
+ * writeSpan("type", t.value);
+ * else if (t.type > TokenType.KEYWORDS_BEGIN && t.type < TokenType.KEYWORDS_END)
+ * writeSpan("kwrd", t.value);
+ * else if (t.type == TokenType.Comment)
+ * writeSpan("com", t.value);
+ * else if (t.type > TokenType.STRINGS_BEGIN && t.type < TokenType.STRINGS_END)
+ * writeSpan("str", t.value);
+ * else if (t.type > TokenType.NUMBERS_BEGIN && t.type < TokenType.NUMBERS_END)
+ * writeSpan("num", t.value);
+ * else if (t.type > TokenType.OPERATORS_BEGIN && t.type < TokenType.OPERATORS_END)
+ * writeSpan("op", t.value);
+ * else
+ * stdout.write(t.value.replace("<", "<"));
+ * }
+ * stdout.writeln("\n");
+ * }
+ *
+ * void main(string[] args)
+ * {
+ * args[1].readText().byToken(args[1], IterationStyle.Everything, TokenStyle.Source).highlight();
+ * }
+ * ---
+ * Iterate by tokens that would be significant to a parser
+ * ---
+ * import std.range;
+ * import std.d.lexer;
+ *
+ * // ...
+ *
+ * string s = "import std.stdio; // comment";
+ * auto tokens = byToken(s);
+ * // The comment and whitespace are not included
+ * assert (walkLength(tokens) == 5);
+ * ---
+ * Replace special tokens
+ * ---
+ * string s = "#line 5\n__VERSION__";
+ * auto tokens = byToken(s, "example.d", IterationStyle.CodeOnly, TokenStyle.Default, "foo", "1.0");
+ * assert (tokens.front.type == TokenType.IntLiteral);
+ * assert (tokens.front.value == "1.0")
+ * assert (tokens.front.lineNumber == 5);
+ * ---
+ *
+ * Copyright: Brian Schott 2013
+ * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt Boost, License 1.0)
+ * Authors: Brian Schott
+ * Source: $(PHOBOSSRC std/d/_lexer.d)
+ */
module std.d.lexer;
@@ -76,6 +98,8 @@ import std.conv;
import std.uni;
import std.ascii;
import std.exception;
+import std.datetime;
+import std.string;
import std.d.entities;
public:
@@ -129,8 +153,9 @@ struct Token
}
/**
-* Configure the behavior of the byToken() function
-*/
+ * Configure the behavior of the byToken() function. These flags may be
+ * combined using a bitwise or.
+ */
enum IterationStyle
{
/// Only include code, not whitespace or comments
@@ -141,20 +166,24 @@ enum IterationStyle
IncludeWhitespace = 0b0010,
/// Include $(LINK2 http://dlang.org/lex.html#specialtokens, special tokens)
IncludeSpecialTokens = 0b0100,
+ /// Do not stop iteration on reaching the ___EOF__ token
+ IgnoreEOF = 0b1000,
/// Include everything
- Everything = IncludeComments | IncludeWhitespace
+ Everything = IncludeComments | IncludeWhitespace | IgnoreEOF
}
/**
-* Configuration of the string lexing style
-*/
-enum StringStyle : uint
+ * Configuration of the token lexing style. These flags may be combined with a
+ * bitwise or.
+ */
+enum TokenStyle : uint
{
/**
- * Escape sequences will be replaced with their equivalent characters,
- * enclosing quote characters will not be included. Useful for creating a
- * compiler or interpreter.
- */
+ * Escape sequences will be replaced with their equivalent characters,
+ * enclosing quote characters will not be included. Special tokens such as
+ * __VENDOR__ will be replaced with their equivalent strings. Useful for
+ * creating a compiler or interpreter.
+ */
Default = 0b0000,
/**
@@ -165,33 +194,48 @@ enum StringStyle : uint
NotEscaped = 0b0001,
/**
- * Strings will include their opening and closing quote characters as well
- * as any prefixes or suffixes $(LPAREN)e.g.: $(D_STRING "abcde"w) will
- * include the $(D_STRING 'w') character as well as the opening and closing
- * quotes$(RPAREN)
- */
- IncludeQuotes = 0x0010,
+ * Strings will include their opening and closing quote characters as well
+ * as any prefixes or suffixes $(LPAREN)e.g.: $(D_STRING "abcde"w) will
+ * include the $(D_STRING 'w') character as well as the opening and closing
+ * quotes$(RPAREN)
+ */
+ IncludeQuotes = 0b0010,
/**
- * Strings will be read exactly as they appeared in the source, including
- * their opening and closing quote characters. Useful for syntax
- * highlighting.
- */
- Source = NotEscaped | IncludeQuotes,
+ * Do not replace the value field of the special tokens such as ___DATE__
+ * with their string equivalents.
+ */
+ DoNotReplaceSpecial = 0b0100,
+
+ /**
+ * Strings will be read exactly as they appeared in the source, including
+ * their opening and closing quote characters. Useful for syntax
+ * highlighting.
+ */
+ Source = NotEscaped | IncludeQuotes | DoNotReplaceSpecial,
}
+/// Default replacement for the ___VERSION__ special token
+immutable string VERSION = "1.0";
+
+/// Default replacement for the ___VENDOR__ special token
+immutable string VENDOR = "std.d.lexer";
+
/**
-* Iterate over the given range of characters by D tokens.
-* Params:
-* range = the range of characters
-* iterationStyle = See IterationStyle
-* stringStyle = see StringStyle
-* Returns:
-* an input range of tokens
-*/
-TokenRange!(R) byToken(R)(R range, const IterationStyle iterationStyle = IterationStyle.CodeOnly,
- const StringStyle stringStyle = StringStyle.Default)
- if (isForwardRange!(R) && (isSomeChar!(ElementType!(R)) || is (ElementType!(R) == ubyte)))
+ * Iterate over the given range of characters by D tokens.
+ * Params:
+ * range = the range of characters
+ * iterationStyle = See IterationStyle
+ * stringStyle = see TokenStyle
+ * vendor = the string literal that should replace the ___VENDOR__ special token
+ * ver = the string literal that should replace the ___VERSION__ special token
+ * Returns:
+ * an input range of tokens
+ */
+TokenRange!(R) byToken(R)(R range, string fileName = "",
+ const IterationStyle iterationStyle = IterationStyle.CodeOnly,
+ const TokenStyle stringStyle = TokenStyle.Default, string vendor = VENDOR,
+ string ver = VERSION) if (isForwardRange!(R) && isSomeChar!(ElementType!(R)))
{
auto r = new TokenRange!(R)(range);
r.stringStyle = stringStyle;
@@ -202,19 +246,13 @@ TokenRange!(R) byToken(R)(R range, const IterationStyle iterationStyle = Iterati
}
/**
-* Range of tokens. Avoid creating instances of this manually. Use
-* $(DDOC_PSYMBOL byToken$(LPAREN)$(RPAREN)) instead, as it does some initialization work.
-*/
+ * Range of tokens. Use byToken$(LPAREN)$(RPAREN) to instantiate.
+ */
class TokenRange(R) : InputRange!(Token)
{
- this(ref R range)
- {
- this.range = range;
- }
-
/**
- * Returns: true if the range is empty
- */
+ * Returns: true if the range is empty
+ */
override bool empty() const @property
{
return _empty;
@@ -295,9 +333,14 @@ class TokenRange(R) : InputRange!(Token)
private:
+ this(ref R range)
+ {
+ this.range = range;
+ }
+
/*
- * Advances the range to the next token
- */
+ * Advances the range to the next token
+ */
void advance()
{
if (range.empty)
@@ -483,6 +526,55 @@ private:
}
current.value = to!string(app.data);
current.type = lookupTokenType(current.value);
+
+ if (!(iterStyle & IterationStyle.IgnoreEOF) && current.type == TokenType.EOF)
+ {
+ _empty = true;
+ return;
+ }
+
+ if (!(iterStyle & TokenStyle.DoNotReplaceSpecial))
+ break;
+
+ switch (current.type)
+ {
+ case TokenType.Date:
+ current.type = TokenType.StringLiteral;
+ auto time = Clock.currTime();
+ current.value = format("%s %02d %04d", time.month, time.day, time.year);
+ break;
+ case TokenType.Time:
+ auto time = Clock.currTime();
+ current.type = TokenType.StringLiteral;
+ current.value = (cast(TimeOfDay)(time)).toISOExtString();
+ break;
+ case TokenType.Timestamp:
+ auto time = Clock.currTime();
+ auto dt = cast(DateTime) time;
+ current.type = TokenType.StringLiteral;
+ current.value = format("%s %s %02d %02d:%02d:%02d %04d",
+ dt.dayOfWeek, dt.month, dt.day, dt.hour, dt.minute,
+ dt.second, dt.year);
+ break;
+ case TokenType.Vendor:
+ current.type = TokenType.StringLiteral;
+ current.value = vendor;
+ break;
+ case TokenType.CompilerVersion:
+ current.type = TokenType.StringLiteral;
+ current.value = ver;
+ break;
+ case TokenType.Line:
+ current.type = TokenType.IntLiteral;
+ current.value = format("%d", current.lineNumber);
+ break;
+ case TokenType.File:
+ current.type = TokenType.StringLiteral;
+ current.value = fileName;
+ break;
+ default:
+ break;
+ }
break;
}
}
@@ -493,7 +585,10 @@ private:
R range;
bool _empty;
IterationStyle iterStyle;
- StringStyle stringStyle;
+ TokenStyle stringStyle;
+ string ver;
+ string vendor;
+ string fileName;
}
unittest
@@ -505,98 +600,98 @@ unittest
}
/**
-* Listing of all the tokens in the D language.
-*
-* Token types are arranged so that it is easy to group tokens while iterating
-* over them. For example:
-* ---
-* assert(TokenType.Increment < TokenType.OPERATORS_END);
-* assert(TokenType.Increment > TokenType.OPERATORS_BEGIN);
-* ---
-* The non-token values are documented below:
-*
-* $(BOOKTABLE ,
-* $(TR $(TH Begin) $(TH End) $(TH Content) $(TH Examples))
-* $(TR $(TD OPERATORS_BEGIN) $(TD OPERATORS_END) $(TD operatiors) $(TD +, -, <<=))
-* $(TR $(TD TYPES_BEGIN) $(TD TYPES_END) $(TD types) $(TD bool, char, double))
-* $(TR $(TD KEYWORDS_BEGIN) $(TD KEYWORDS) $(TD keywords) $(TD class, if, assert))
-* $(TR $(TD ATTRIBUTES_BEGIN) $(TD ATTRIBUTES_END) $(TD attributes) $(TD override synchronized, __gshared))
-* $(TR $(TD ATTRIBUTES_BEGIN) $(TD ATTRIBUTES_END) $(TD protection) $(TD public, protected))
-* $(TR $(TD CONSTANTS_BEGIN) $(TD CONSTANTS_END) $(TD compile-time constants) $(TD __FILE__, __TIME__))
-* $(TR $(TD LITERALS_BEGIN) $(TD LITERALS_END) $(TD string and numeric literals) $(TD "str", 123))
-* $(TR $(TD NUMBERS_BEGIN) $(TD NUMBERS_END) $(TD numeric literals) $(TD 0x123p+9, 0b0110))
-* $(TR $(TD STRINGS_BEGIN) $(TD STRINGS_END) $(TD string literals) $(TD `123`c, q{tokens;}, "abcde"))
-* $(TR $(TD MISC_BEGIN) $(TD MISC_END) $(TD anything else) $(TD whitespace, comments, identifiers))
-* )
-* Note that several of the above ranges overlap.
-*/
+ * Listing of all the tokens in the D language.
+ *
+ * Token types are arranged so that it is easy to group tokens while iterating
+ * over them. For example:
+ * ---
+ * assert(TokenType.Increment < TokenType.OPERATORS_END);
+ * assert(TokenType.Increment > TokenType.OPERATORS_BEGIN);
+ * ---
+ * The non-token values are documented below:
+ *
+ * $(BOOKTABLE ,
+ * $(TR $(TH Begin) $(TH End) $(TH Content) $(TH Examples))
+ * $(TR $(TD OPERATORS_BEGIN) $(TD OPERATORS_END) $(TD operatiors) $(TD +, -, <<=))
+ * $(TR $(TD TYPES_BEGIN) $(TD TYPES_END) $(TD types) $(TD bool, char, double))
+ * $(TR $(TD KEYWORDS_BEGIN) $(TD KEYWORDS) $(TD keywords) $(TD class, if, assert))
+ * $(TR $(TD ATTRIBUTES_BEGIN) $(TD ATTRIBUTES_END) $(TD attributes) $(TD override synchronized, __gshared))
+ * $(TR $(TD ATTRIBUTES_BEGIN) $(TD ATTRIBUTES_END) $(TD protection) $(TD public, protected))
+ * $(TR $(TD CONSTANTS_BEGIN) $(TD CONSTANTS_END) $(TD compile-time constants) $(TD ___FILE__, ___TIME__))
+ * $(TR $(TD LITERALS_BEGIN) $(TD LITERALS_END) $(TD string and numeric literals) $(TD "str", 123))
+ * $(TR $(TD NUMBERS_BEGIN) $(TD NUMBERS_END) $(TD numeric literals) $(TD 0x123p+9, 0b0110))
+ * $(TR $(TD STRINGS_BEGIN) $(TD STRINGS_END) $(TD string literals) $(TD `123`c, q{tokens;}, "abcde"))
+ * $(TR $(TD MISC_BEGIN) $(TD MISC_END) $(TD anything else) $(TD whitespace, comments, identifiers))
+ * )
+ * Note that several of the above ranges overlap.
+ */
enum TokenType: uint
{
// Operators
OPERATORS_BEGIN, ///
- Assign, /// $(D_KEYWORD =)
- At, /// $(D_KEYWORD @)
- BitAnd, /// $(D_KEYWORD &)
- BitAndEquals, /// $(D_KEYWORD &=)
- BitOr, /// $(D_KEYWORD |)
- BitOrEquals, /// $(D_KEYWORD |=)
- CatEquals, /// $(D_KEYWORD ~=)
- Colon, /// $(D_KEYWORD :)
- Comma, /// $(D_KEYWORD ,)
- Decrement, /// $(D_KEYWORD --)
- Div, /// $(D_KEYWORD /)
- DivEquals, /// $(D_KEYWORD /=)
- Dollar, /// $(D_KEYWORD $)
- Dot, /// $(D_KEYWORD .)
- Equals, /// $(D_KEYWORD ==)
+ Assign, /// =
+ At, /// @
+ BitAnd, /// &
+ BitAndEquals, /// &=
+ BitOr, /// |
+ BitOrEquals, /// |=
+ CatEquals, /// ~=
+ Colon, /// :
+ Comma, /// ,
+ Decrement, /// --
+ Div, /// /
+ DivEquals, /// /=
+ Dollar, /// $
+ Dot, /// .
+ Equals, /// ==
GoesTo, // =>
- Greater, /// $(D_KEYWORD >)
- GreaterEqual, /// $(D_KEYWORD >=)
- Hash, // $(D_KEYWORD #)
- Increment, /// $(D_KEYWORD ++)
- LBrace, /// $(D_KEYWORD {)
- LBracket, /// $(D_KEYWORD [)
- Less, /// $(D_KEYWORD <)
- LessEqual, /// $(D_KEYWORD <=)
- LessEqualGreater, // $(D_KEYWORD <>=)
- LessOrGreater, /// $(D_KEYWORD <>)
- LogicAnd, /// $(D_KEYWORD &&)
- LogicOr, /// $(D_KEYWORD ||)
- LParen, /// $(D_KEYWORD $(LPAREN))
- Minus, /// $(D_KEYWORD -)
- MinusEquals, /// $(D_KEYWORD -=)
- Mod, /// $(D_KEYWORD %)
- ModEquals, /// $(D_KEYWORD %=)
- MulEquals, /// $(D_KEYWORD *=)
- Not, /// $(D_KEYWORD !)
- NotEquals, /// $(D_KEYWORD !=)
- NotGreater, /// $(D_KEYWORD !>)
- NotGreaterEqual, /// $(D_KEYWORD !>=)
- NotLess, /// $(D_KEYWORD !<)
- NotLessEqual, /// $(D_KEYWORD !<=)
- NotLessEqualGreater, /// $(D_KEYWORD !<>)
- Plus, /// $(D_KEYWORD +)
- PlusEquals, /// $(D_KEYWORD +=)
- Pow, /// $(D_KEYWORD ^^)
- PowEquals, /// $(D_KEYWORD ^^=)
- RBrace, /// $(D_KEYWORD })
- RBracket, /// $(D_KEYWORD ])
- RParen, /// $(D_KEYWORD $(RPAREN))
- Semicolon, /// $(D_KEYWORD ;)
- ShiftLeft, /// $(D_KEYWORD <<)
- ShiftLeftEqual, /// $(D_KEYWORD <<=)
- ShiftRight, /// $(D_KEYWORD >>)
- ShiftRightEqual, /// $(D_KEYWORD >>=)
+ Greater, /// >
+ GreaterEqual, /// >=
+ Hash, // #
+ Increment, /// ++
+ LBrace, /// {
+ LBracket, /// [
+ Less, /// <
+ LessEqual, /// <=
+ LessEqualGreater, // <>=
+ LessOrGreater, /// <>
+ LogicAnd, /// &&
+ LogicOr, /// ||
+ LParen, /// $(LPAREN)
+ Minus, /// -
+ MinusEquals, /// -=
+ Mod, /// %
+ ModEquals, /// %=
+ MulEquals, /// *=
+ Not, /// !
+ NotEquals, /// !=
+ NotGreater, /// !>
+ NotGreaterEqual, /// !>=
+ NotLess, /// !<
+ NotLessEqual, /// !<=
+ NotLessEqualGreater, /// !<>
+ Plus, /// +
+ PlusEquals, /// +=
+ Pow, /// ^^
+ PowEquals, /// ^^=
+ RBrace, /// }
+ RBracket, /// ]
+ RParen, /// $(RPAREN)
+ Semicolon, /// ;
+ ShiftLeft, /// <<
+ ShiftLeftEqual, /// <<=
+ ShiftRight, /// >>
+ ShiftRightEqual, /// >>=
Slice, // ..
- Star, /// $(D_KEYWORD *)
- Ternary, /// $(D_KEYWORD ?)
- Tilde, /// $(D_KEYWORD ~)
- Unordered, /// $(D_KEYWORD !<>=)
- UnsignedShiftRight, /// $(D_KEYWORD >>>)
- UnsignedShiftRightEqual, /// $(D_KEYWORD >>>=)
- Vararg, /// $(D_KEYWORD ...)
- Xor, /// $(D_KEYWORD ^)
- XorEquals, /// $(D_KEYWORD ^=)
+ Star, /// *
+ Ternary, /// ?
+ Tilde, /// ~
+ Unordered, /// !<>=
+ UnsignedShiftRight, /// >>>
+ UnsignedShiftRightEqual, /// >>>=
+ Vararg, /// ...
+ Xor, /// ^
+ XorEquals, /// ^=
OPERATORS_END, ///
@@ -718,10 +813,16 @@ enum TokenType: uint
// Constants
CONSTANTS_BEGIN, ///
- File, /// $(D_KEYWORD __FILE__)
- Line, /// $(D_KEYWORD __LINE__)
- Thread, /// $(D_KEYWORD __thread)
- Traits, /// $(D_KEYWORD __traits)
+ Date, /// ___DATE__
+ EOF, /// ___EOF__
+ Time, /// ___TIME__
+ Timestamp, /// ___TIMESTAMP__
+ Vendor, /// ___VENDOR__
+ CompilerVersion, /// ___VERSION__
+ File, /// ___FILE__
+ Line, /// ___LINE__
+ Thread, /// ___thread
+ Traits, /// ___traits
CONSTANTS_END, ///
// Misc
@@ -1120,7 +1221,7 @@ unittest
}
Token lexHexString(R, C = ElementType!R)(ref R input, ref uint index, ref uint lineNumber,
- const StringStyle style = StringStyle.Default)
+ const TokenStyle style = TokenStyle.Default)
in
{
assert (input.front == 'x');
@@ -1132,7 +1233,7 @@ body
t.startIndex = index;
t.type = TokenType.StringLiteral;
auto app = appender!(C[])();
- if (style & StringStyle.IncludeQuotes)
+ if (style & TokenStyle.IncludeQuotes)
app.put("x\"");
input.popFront();
input.popFront();
@@ -1150,7 +1251,7 @@ body
input.popFront();
++index;
}
- else if (std.uni.isWhite(input.front) && (style & StringStyle.NotEscaped))
+ else if (std.uni.isWhite(input.front) && (style & TokenStyle.NotEscaped))
{
app.put(input.front);
input.popFront();
@@ -1158,7 +1259,7 @@ body
}
else if (input.front == '"')
{
- if (style & StringStyle.IncludeQuotes)
+ if (style & TokenStyle.IncludeQuotes)
app.put('"');
input.popFront();
++index;
@@ -1180,7 +1281,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
- if (style & StringStyle.IncludeQuotes)
+ if (style & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@@ -1189,7 +1290,7 @@ body
break;
}
}
- if (style & StringStyle.NotEscaped)
+ if (style & TokenStyle.NotEscaped)
t.value = to!string(app.data);
else
{
@@ -1219,17 +1320,17 @@ unittest
assert (br == TokenType.WStringLiteral);
auto c = `x"6d"`;
- auto cr = lexHexString(c, i, l, StringStyle.NotEscaped);
+ auto cr = lexHexString(c, i, l, TokenStyle.NotEscaped);
assert (cr == "6d");
auto d = `x"5e5f"d`;
- auto dr = lexHexString(d, i, l, StringStyle.NotEscaped | StringStyle.IncludeQuotes);
+ auto dr = lexHexString(d, i, l, TokenStyle.NotEscaped | TokenStyle.IncludeQuotes);
assert (dr == `x"5e5f"d`);
assert (dr == TokenType.DStringLiteral);
}
Token lexString(R)(ref R input, ref uint index, ref uint lineNumber,
- const StringStyle style = StringStyle.Default)
+ const TokenStyle style = TokenStyle.Default)
in
{
assert (input.front == '\'' || input.front == '"' || input.front == '`' || input.front == 'r');
@@ -1244,7 +1345,7 @@ body
bool isWysiwyg = input.front == 'r' || input.front == '`';
if (input.front == 'r')
{
- if (style & StringStyle.IncludeQuotes)
+ if (style & TokenStyle.IncludeQuotes)
app.put('r');
input.popFront();
}
@@ -1252,7 +1353,7 @@ body
input.popFront();
++index;
- if (style & StringStyle.IncludeQuotes)
+ if (style & TokenStyle.IncludeQuotes)
app.put(quote);
while (!isEoF(input))
{
@@ -1263,7 +1364,7 @@ body
}
else if (input.front == '\\')
{
- if (style & StringStyle.NotEscaped)
+ if (style & TokenStyle.NotEscaped)
{
auto r = input.save();
r.popFront();
@@ -1295,7 +1396,7 @@ body
}
else if (input.front == quote)
{
- if (style & StringStyle.IncludeQuotes)
+ if (style & TokenStyle.IncludeQuotes)
app.put(quote);
input.popFront();
++index;
@@ -1319,7 +1420,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
- if (style & StringStyle.IncludeQuotes)
+ if (style & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@@ -1341,7 +1442,7 @@ unittest
auto b = "\"ab\\ncd\"";
assert (lexString(b, i, l) == "ab\ncd");
auto c = "`abc\\ndef`";
- assert (lexString(c, i, l, StringStyle.NotEscaped) == "abc\\ndef");
+ assert (lexString(c, i, l, TokenStyle.NotEscaped) == "abc\\ndef");
auto d = `"12345"w`;
assert (lexString(d, i, l).type == TokenType.WStringLiteral);
auto e = `"abc"c`;
@@ -1353,7 +1454,7 @@ unittest
}
Token lexDelimitedString(R)(ref R input, ref uint index,
- ref uint lineNumber, const StringStyle stringStyle = StringStyle.Default)
+ ref uint lineNumber, const TokenStyle stringStyle = TokenStyle.Default)
in
{
assert(input.front == 'q');
@@ -1369,7 +1470,7 @@ body
input.popFront(); // q
input.popFront(); // "
index += 2;
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
{
app.put('q');
app.put('"');
@@ -1415,7 +1516,7 @@ body
app.put('"');
++index;
input.popFront();
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
t.value = to!string(app.data);
else
t.value = to!string(app.data[0 .. app.data.length - hereOpen.data.length - 1]);
@@ -1431,7 +1532,7 @@ body
}
else
{
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
int depth = 1;
@@ -1446,7 +1547,7 @@ body
--depth;
if (depth == 0)
{
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
{
app.put(close);
app.put('"');
@@ -1475,7 +1576,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@@ -1504,13 +1605,13 @@ unittest
assert (br == TokenType.WStringLiteral);
auto c = `q"[ ]");`;
- auto cr = lexDelimitedString(c, i, l, StringStyle.Source);
+ auto cr = lexDelimitedString(c, i, l, TokenStyle.Source);
assert (cr == `q"[ ]"`);
assert (cr == TokenType.StringLiteral);
}
Token lexTokenString(R)(ref R input, ref uint index, ref uint lineNumber,
- const StringStyle stringStyle = StringStyle.Default)
+ const TokenStyle stringStyle = TokenStyle.Default)
in
{
assert (input.front == 'q');
@@ -1525,12 +1626,12 @@ body
input.popFront(); // q
input.popFront(); // {
index += 2;
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
{
app.put('q');
app.put('{');
}
- auto r = byToken(input, IterationStyle.Everything, StringStyle.Source);
+ auto r = byToken(input, "", IterationStyle.Everything, TokenStyle.Source);
r.index = index;
int depth = 1;
while (!r.empty)
@@ -1544,7 +1645,7 @@ body
--depth;
if (depth <= 0)
{
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
app.put('}');
r.popFront();
break;
@@ -1554,7 +1655,7 @@ body
r.popFront();
}
- auto n = app.data.length - (stringStyle & StringStyle.IncludeQuotes ? 2 : 0);
+ auto n = app.data.length - (stringStyle & TokenStyle.IncludeQuotes ? 2 : 0);
input.popFrontN(n);
if (!input.isEoF())
{
@@ -1567,7 +1668,7 @@ body
t.type = TokenType.DStringLiteral;
goto case 'c';
case 'c':
- if (stringStyle & StringStyle.IncludeQuotes)
+ if (stringStyle & TokenStyle.IncludeQuotes)
app.put(input.front);
input.popFront();
++index;
@@ -1592,7 +1693,7 @@ unittest
assert (ar == "import std.stdio;");
auto b = `q{writeln("hello world");}`;
- auto br = lexTokenString(b, i, l, StringStyle.Source);
+ auto br = lexTokenString(b, i, l, TokenStyle.Source);
assert (br == TokenType.StringLiteral);
assert (br == `q{writeln("hello world");}`);
}
@@ -2414,6 +2515,7 @@ pure nothrow TokenType lookupTokenType(const string input)
case 7:
switch (input)
{
+ case "__EOF__": return TokenType.EOF;
case "cdouble": return TokenType.Cdouble;
case "default": return TokenType.Default;
case "dstring": return TokenType.DString;
@@ -2444,6 +2546,8 @@ pure nothrow TokenType lookupTokenType(const string input)
case "function": return TokenType.Function;
case "unittest": return TokenType.Unittest;
case "__FILE__": return TokenType.File;
+ case "__DATE__": return TokenType.Date;
+ case "__TIME__": return TokenType.Date;
default: break;
}
break;
@@ -2459,14 +2563,26 @@ pure nothrow TokenType lookupTokenType(const string input)
}
break;
case 10:
- if (input == "deprecated")
- return TokenType.Deprecated;
+ switch (input)
+ {
+ case "deprecated": return TokenType.Deprecated;
+ case "__VENDOR__": return TokenType.Vendor;
+ default: break;
+ }
break;
case 11:
+ if (input == "__VERSION__")
+ return TokenType.CompilerVersion;
+ break;
+ case 12:
if (input == "synchronized")
return TokenType.Synchronized;
break;
case 13:
+ if (input == "__TIMESTAMP__")
+ return TokenType.Timestamp;
+ break;
+ case 15:
if (input == "foreach_reverse")
return TokenType.Foreach_reverse;
break;
@@ -2574,3 +2690,5 @@ string generateCaseTrie(string[] args ...)
}
return printCaseStatements(t, "");
}
+
+//void main(string[] args) {}