diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..0cc7023 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "msgpack-d"] + path = msgpack-d + url = https://github.com/msgpack/msgpack-d.git diff --git a/editors/textadept/modules/dmd/init.lua b/editors/textadept/modules/dmd/init.lua index 6f883cb..4953deb 100755 --- a/editors/textadept/modules/dmd/init.lua +++ b/editors/textadept/modules/dmd/init.lua @@ -316,7 +316,7 @@ local keywords = { -- For this module to work the dscanner program must be installed. Configure the -- path to the executable here -M.PATH_TO_DSCANNER = "dscanner" +M.PATH_TO_DSCANNER = "/home/alaran/src/dscanner-master/dscanner" _M.textadept.editing.comment_string.dmd = '//' _M.textadept.run.compile_command.dmd = 'dmd -c -o- %(filename)' @@ -367,34 +367,34 @@ local function showCompletionList(r) buffer.auto_c_choose_single = setting end ---events.connect(events.CHAR_ADDED, function(ch) --- if buffer:get_lexer() ~= "dmd" then return end --- if ch > 255 then return end --- local character = string.char(ch) --- if character == "." or character == "(" then --- local fileName = os.tmpname() --- local tmpFile = io.open(fileName, "w") --- tmpFile:write(buffer:get_text()) --- local command = M.PATH_TO_DSCANNER --- .. (character == "." and " --dotComplete " or " --parenComplete ") --- .. fileName .. " " .. buffer.current_pos .. " -I" .. buffer.filename:match(".+[\\/]") --- local p = io.popen(command) --- local r = p:read("*a") --- if r ~= "\n" then --- if character == "." then --- showCompletionList(r) --- elseif character == "(" then --- if r:find("^completions\n") then --- showCompletionList(r) --- elseif r:find("^calltips\n.*") then --- r = r:gsub("^calltips\n", "") --- buffer:call_tip_show(buffer.current_pos, r:gsub("\\n", "\n"):gsub("\\t", "\t"):match("(.*)%s+$")) --- end --- end --- end --- os.remove(fileName) --- end ---end) +events.connect(events.CHAR_ADDED, function(ch) + if buffer:get_lexer() ~= "dmd" then return end + if ch > 255 then return end + local character = string.char(ch) + if character == "." or character == "(" then + local fileName = os.tmpname() + local tmpFile = io.open(fileName, "w") + tmpFile:write(buffer:get_text()) + local command = M.PATH_TO_DSCANNER + .. (character == "." and " --dotComplete " or " --parenComplete ") + .. fileName .. " " .. buffer.current_pos .. " -I" .. buffer.filename:match(".+[\\/]") + local p = io.popen(command) + local r = p:read("*a") + if r ~= "\n" then + if character == "." then + showCompletionList(r) + elseif character == "(" then + if r:find("^completions\n") then + showCompletionList(r) + elseif r:find("^calltips\n.*") then + r = r:gsub("^calltips\n", "") + buffer:call_tip_show(buffer.current_pos, r:gsub("\\n", "\n"):gsub("\\t", "\t"):match("(.*)%s+$")) + end + end + end + os.remove(fileName) + end +end) local function autocomplete() diff --git a/msgpack-d b/msgpack-d new file mode 160000 index 0000000..b0798c7 --- /dev/null +++ b/msgpack-d @@ -0,0 +1 @@ +Subproject commit b0798c79c523afbab770353728ef83253f493bcd diff --git a/std/d/lexer.d b/std/d/lexer.d index af5a71c..2e8ae1d 100644 --- a/std/d/lexer.d +++ b/std/d/lexer.d @@ -1,110 +1,110 @@ // Written in the D programming language /** -* This module contains a range-based _lexer for the D programming language. -* -* For performance reasons the _lexer contained in this module operates only on -* ASCII or UTF-8 encoded source code. If the use of other encodings is -* desired, the source code must be converted to UTF-8 before passing it to this -* _lexer. -* -* To use the _lexer, create a LexerConfig struct -* --- -* LexerConfig config; -* config.iterStyle = IterationStyle.everything; -* config.tokenStyle = IterationStyle.source; -* config.versionNumber = 2061; -* config.vendorString = "Lexer Example"; -* --- -* Once you have configured the _lexer, call byToken$(LPAREN)$(RPAREN) on your -* source code, passing in the configuration. -* --- -* auto source = "import std.stdio;"c; -* auto tokens = byToken(source, config); -* --- -* The result of byToken$(LPAREN)$(RPAREN) is a forward range of tokens that can -* be used easily with the algorithms from std.algorithm or iterated over with -* $(D_KEYWORD foreach) -* --- -* assert (tokens.front.type == TokenType.import_); -* assert (tokens.front.value == "import"); -* assert (tokens.front.line == 1); -* assert (tokens.front.startIndex == 0); -* --- -* -* Examples: -* -* Generate HTML markup of D code. -* --- -* module highlighter; -* -* import std.stdio; -* import std.array; -* import std.d.lexer; -* -* void writeSpan(string cssClass, string value) -* { -* stdout.write(``, value.replace("&", "&").replace("<", "<"), ``); -* } -* -* -* // http://ethanschoonover.com/solarized -* void highlight(R)(R tokens) -* { -* stdout.writeln(q"[ -* -*
-* -* -* -* -*]");
-*
-* foreach (Token t; tokens)
-* {
-* if (isType(t.type))
-* writeSpan("type", t.value);
-* else if (isKeyword(t.type))
-* writeSpan("kwrd", t.value);
-* else if (t.type == TokenType.comment)
-* writeSpan("com", t.value);
-* else if (isStringLiteral(t.type))
-* writeSpan("str", t.value);
-* else if (isNumberLiteral(t.type))
-* writeSpan("num", t.value);
-* else if (isOperator(t.type))
-* writeSpan("op", t.value);
-* else
-* stdout.write(t.value.replace("<", "<"));
-* }
-* stdout.writeln("\n");
-* }
-*
-* void main(string[] args)
-* {
-* LexerConfig config;
-* config.tokenStyle = TokenStyle.source;
-* config.iterStyle = IterationStyle.everything;
-* config.fileName = args[1];
-* auto f = File(args[1]);
-* (cast(ubyte[]) f.byLine(KeepTerminator.yes).join()).byToken(config).highlight();
-* }
-* ---
-*
-* Copyright: Brian Schott 2013
-* License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt Boost, License 1.0)
-* Authors: Brian Schott, Dmitry Olshansky
-* Source: $(PHOBOSSRC std/d/_lexer.d)
-*/
+ * This module contains a range-based _lexer for the D programming language.
+ *
+ * For performance reasons the _lexer contained in this module operates only on
+ * ASCII or UTF-8 encoded source code. If the use of other encodings is
+ * desired, the source code must be converted to UTF-8 before passing it to this
+ * _lexer.
+ *
+ * To use the _lexer, create a LexerConfig struct
+ * ---
+ * LexerConfig config;
+ * config.iterStyle = IterationStyle.everything;
+ * config.tokenStyle = IterationStyle.source;
+ * config.versionNumber = 2061;
+ * config.vendorString = "Lexer Example";
+ * ---
+ * Once you have configured the _lexer, call byToken$(LPAREN)$(RPAREN) on your
+ * source code, passing in the configuration.
+ * ---
+ * auto source = "import std.stdio;"c;
+ * auto tokens = byToken(source, config);
+ * ---
+ * The result of byToken$(LPAREN)$(RPAREN) is a forward range of tokens that can
+ * be used easily with the algorithms from std.algorithm or iterated over with
+ * $(D_KEYWORD foreach)
+ * ---
+ * assert (tokens.front.type == TokenType.import_);
+ * assert (tokens.front.value == "import");
+ * assert (tokens.front.line == 1);
+ * assert (tokens.front.startIndex == 0);
+ * ---
+ *
+ * Examples:
+ *
+ * Generate HTML markup of D code.
+ * ---
+ * module highlighter;
+ *
+ * import std.stdio;
+ * import std.array;
+ * import std.d.lexer;
+ *
+ * void writeSpan(string cssClass, string value)
+ * {
+ * stdout.write(``, value.replace("&", "&").replace("<", "<"), ``);
+ * }
+ *
+ *
+ * // http://ethanschoonover.com/solarized
+ * void highlight(R)(R tokens)
+ * {
+ * stdout.writeln(q"[
+ *
+ *
+ *
+ *
+ *
+ *
+ * ]");
+ *
+ * foreach (Token t; tokens)
+ * {
+ * if (isType(t.type))
+ * writeSpan("type", t.value);
+ * else if (isKeyword(t.type))
+ * writeSpan("kwrd", t.value);
+ * else if (t.type == TokenType.comment)
+ * writeSpan("com", t.value);
+ * else if (isStringLiteral(t.type))
+ * writeSpan("str", t.value);
+ * else if (isNumberLiteral(t.type))
+ * writeSpan("num", t.value);
+ * else if (isOperator(t.type))
+ * writeSpan("op", t.value);
+ * else
+ * stdout.write(t.value.replace("<", "<"));
+ * }
+ * stdout.writeln("\n");
+ * }
+ *
+ * void main(string[] args)
+ * {
+ * LexerConfig config;
+ * config.tokenStyle = TokenStyle.source;
+ * config.iterStyle = IterationStyle.everything;
+ * config.fileName = args[1];
+ * auto f = File(args[1]);
+ * (cast(ubyte[]) f.byLine(KeepTerminator.yes).join()).byToken(config).highlight();
+ * }
+ * ---
+ *
+ * Copyright: Brian Schott 2013
+ * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt Boost, License 1.0)
+ * Authors: Brian Schott, Dmitry Olshansky
+ * Source: $(PHOBOSSRC std/d/_lexer.d)
+ */
module std.d.lexer;
@@ -357,7 +357,7 @@ struct TokenRange(LexSrc)
popFront();
return r;
}
-
+
/**
* Removes the current token from the range
*/
@@ -3013,7 +3013,7 @@ struct StringCache
assert((startSize & (startSize-1)) == 0);
index = new Slot*[startSize];
}
-
+
string get(R)(R range)
if(isRandomAccessRange!R
&& is(Unqual!(ElementType!R) : const(ubyte)))
@@ -3021,7 +3021,7 @@ struct StringCache
uint h = hash(range);
uint bucket = h & (index.length-1);
Slot *s = index[bucket];
- if(s == null)
+ if(s == null)
{
string str = putIntoCache(range);
index[bucket] = allocateSlot(str, h);
@@ -3031,12 +3031,12 @@ struct StringCache
for(;;)
{
if(s.hash == h && s.value.equal(range))
- return s.value;
+ return s.value;
if(s.next == null) break;
s = s.next;
}
string str = putIntoCache(range);
- s.next = allocateSlot(str, h);
+ s.next = allocateSlot(str, h);
uniqueSlots++;
// had at least 1 item in this bucket
// and inserted another one - check load factor
@@ -3044,8 +3044,8 @@ struct StringCache
rehash();
return str;
}
-
-private:
+
+private:
static uint hash(R)(R data)
{
@@ -3064,7 +3064,7 @@ private:
Slot* next;
uint hash;
};
-
+
void printLoadFactor()
{
size_t cnt = 0, maxChain = 0;
@@ -3080,8 +3080,8 @@ private:
}
import std.stdio;
assert(cnt == uniqueSlots);
- writefln("Load factor: %.3f; max bucket %d",
- cast(double)cnt/index.length,
+ writefln("Load factor: %.3f; max bucket %d",
+ cast(double)cnt/index.length,
maxChain);
}
@@ -3095,9 +3095,9 @@ private:
{
Slot* cur = index[i], prev;
while(cur)
- {
+ {
//has extra bit set - move it out
- if(cur.hash & oldLen)
+ if(cur.hash & oldLen)
{
if(prev == null)
{
@@ -3122,7 +3122,7 @@ private:
//writefln("AFTER (size = %d):", index.length);
//printLoadFactor();
}
-
+
static Slot* removeLink(ref Slot* cur, Slot* prev)
{
prev.next = cur.next;
@@ -3130,16 +3130,16 @@ private:
cur = cur.next;
return r;
}
-
+
//insert at front of bucket
void insertIntoBucket(Slot* what, size_t bucket)
{
what.next = null;
Slot* p = index[bucket];
what.next = p;
- index[bucket] = what;
+ index[bucket] = what;
}
-
+
Slot* allocateSlot(string val, uint hash)
{
auto slice = allocateInCache(Slot.sizeof);
@@ -3159,7 +3159,7 @@ private:
//TODO: add aligned variant that allocates at word boundary
ubyte[] allocateInCache(size_t size)
{
- import core.memory;
+ import core.memory;
if(next + size > chunkSize)
{
// avoid huge allocations