diff --git a/plugin/packages/wakatime/HISTORY.rst b/plugin/packages/wakatime/HISTORY.rst index a166fd9..d952942 100644 --- a/plugin/packages/wakatime/HISTORY.rst +++ b/plugin/packages/wakatime/HISTORY.rst @@ -3,6 +3,13 @@ History ------- +2.1.7 (2014-11-30) +++++++++++++++++++ + +- upgrade pygments to v2.0.1 +- always log an error when api key is incorrect + + 2.1.6 (2014-11-18) ++++++++++++++++++ diff --git a/plugin/packages/wakatime/wakatime/__init__.py b/plugin/packages/wakatime/wakatime/__init__.py index 9292cc6..94c722b 100644 --- a/plugin/packages/wakatime/wakatime/__init__.py +++ b/plugin/packages/wakatime/wakatime/__init__.py @@ -13,7 +13,7 @@ from __future__ import print_function __title__ = 'wakatime' -__version__ = '2.1.6' +__version__ = '2.1.7' __author__ = 'Alan Hamlett' __license__ = 'BSD' __copyright__ = 'Copyright 2014 Alan Hamlett' @@ -286,6 +286,10 @@ def send_action(project=None, branch=None, stats=None, key=None, targetFile=None auth = u('Basic {key}').format(key=u(base64.b64encode(str.encode(key) if is_py3 else key))) request.add_header('Authorization', auth) + ALWAYS_LOG_CODES = [ + 401, + ] + # add Olson timezone to request try: tz = tzlocal.get_localzone() @@ -310,6 +314,10 @@ def send_action(project=None, branch=None, stats=None, key=None, targetFile=None queue.push(data, plugin) if log.isEnabledFor(logging.DEBUG): log.warn(exception_data) + if response.getcode() in ALWAYS_LOG_CODES: + log.error({ + 'response_code': response.getcode(), + }) else: log.error(exception_data) except: @@ -325,6 +333,10 @@ def send_action(project=None, branch=None, stats=None, key=None, targetFile=None log.error(exception_data) elif log.isEnabledFor(logging.DEBUG): log.warn(exception_data) + if response.getcode() in ALWAYS_LOG_CODES: + log.error({ + 'response_code': response.getcode(), + }) else: log.error(exception_data) else: diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/formatters/__init__.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/formatters/__init__.py deleted file mode 100644 index d842b96..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/formatters/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.formatters - ~~~~~~~~~~~~~~~~~~~ - - Pygments formatters. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" -import os.path -import fnmatch - -from pygments.formatters._mapping import FORMATTERS -from pygments.plugin import find_plugin_formatters -from pygments.util import ClassNotFound - -ns = globals() -for fcls in FORMATTERS: - ns[fcls.__name__] = fcls -del fcls - -__all__ = ['get_formatter_by_name', 'get_formatter_for_filename', - 'get_all_formatters'] + [cls.__name__ for cls in FORMATTERS] - - -_formatter_alias_cache = {} -_formatter_filename_cache = [] - -def _init_formatter_cache(): - if _formatter_alias_cache: - return - for cls in get_all_formatters(): - for alias in cls.aliases: - _formatter_alias_cache[alias] = cls - for fn in cls.filenames: - _formatter_filename_cache.append((fn, cls)) - - -def find_formatter_class(name): - _init_formatter_cache() - cls = _formatter_alias_cache.get(name, None) - return cls - - -def get_formatter_by_name(name, **options): - _init_formatter_cache() - cls = _formatter_alias_cache.get(name, None) - if not cls: - raise ClassNotFound("No formatter found for name %r" % name) - return cls(**options) - - -def get_formatter_for_filename(fn, **options): - _init_formatter_cache() - fn = os.path.basename(fn) - for pattern, cls in _formatter_filename_cache: - if fnmatch.fnmatch(fn, pattern): - return cls(**options) - raise ClassNotFound("No formatter found for file name %r" % fn) - - -def get_all_formatters(): - """Return a generator for all formatters.""" - for formatter in FORMATTERS: - yield formatter - for _, formatter in find_plugin_formatters(): - yield formatter diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/formatters/_mapping.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/formatters/_mapping.py deleted file mode 100755 index a423ba5..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/formatters/_mapping.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.formatters._mapping - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Formatter mapping defintions. This file is generated by itself. Everytime - you change something on a builtin formatter defintion, run this script from - the formatters folder to update it. - - Do not alter the FORMATTERS dictionary by hand. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -# start -from pygments.formatters.bbcode import BBCodeFormatter -from pygments.formatters.html import HtmlFormatter -from pygments.formatters.img import BmpImageFormatter -from pygments.formatters.img import GifImageFormatter -from pygments.formatters.img import ImageFormatter -from pygments.formatters.img import JpgImageFormatter -from pygments.formatters.latex import LatexFormatter -from pygments.formatters.other import NullFormatter -from pygments.formatters.other import RawTokenFormatter -from pygments.formatters.rtf import RtfFormatter -from pygments.formatters.svg import SvgFormatter -from pygments.formatters.terminal import TerminalFormatter -from pygments.formatters.terminal256 import Terminal256Formatter - -FORMATTERS = { - BBCodeFormatter: ('BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'), - BmpImageFormatter: ('img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), - GifImageFormatter: ('img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), - HtmlFormatter: ('HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ```` tags within a ``
`` tag, wrapped in a ``
`` tag. The ``
``'s CSS class can be set by the `cssclass` option."), - ImageFormatter: ('img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), - JpgImageFormatter: ('img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), - LatexFormatter: ('LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'), - NullFormatter: ('Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'), - RawTokenFormatter: ('Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'), - RtfFormatter: ('RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft\xc2\xae Word\xc2\xae documents.'), - SvgFormatter: ('SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ```` element with explicit ``x`` and ``y`` coordinates containing ```` elements with the individual token styles.'), - Terminal256Formatter: ('Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), - TerminalFormatter: ('Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.') -} - -if __name__ == '__main__': - import sys - import os - - # lookup formatters - found_formatters = [] - imports = [] - sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) - from pygments.util import docstring_headline - - for filename in os.listdir('.'): - if filename.endswith('.py') and not filename.startswith('_'): - module_name = 'pygments.formatters.%s' % filename[:-3] - print module_name - module = __import__(module_name, None, None, ['']) - for formatter_name in module.__all__: - imports.append((module_name, formatter_name)) - formatter = getattr(module, formatter_name) - found_formatters.append( - '%s: %r' % (formatter_name, - (formatter.name, - tuple(formatter.aliases), - tuple(formatter.filenames), - docstring_headline(formatter)))) - # sort them, that should make the diff files for svn smaller - found_formatters.sort() - imports.sort() - - # extract useful sourcecode from this file - f = open(__file__) - try: - content = f.read() - finally: - f.close() - header = content[:content.find('# start')] - footer = content[content.find("if __name__ == '__main__':"):] - - # write new file - f = open(__file__, 'w') - f.write(header) - f.write('# start\n') - f.write('\n'.join(['from %s import %s' % imp for imp in imports])) - f.write('\n\n') - f.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters)) - f.write(footer) - f.close() diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_mapping.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_mapping.py deleted file mode 100644 index 969bdba..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_mapping.py +++ /dev/null @@ -1,350 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers._mapping - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer mapping defintions. This file is generated by itself. Everytime - you change something on a builtin lexer defintion, run this script from - the lexers folder to update it. - - Do not alter the LEXERS dictionary by hand. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -LEXERS = { - 'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)), - 'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')), - 'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')), - 'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), - 'AgdaLexer': ('pygments.lexers.functional', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)), - 'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()), - 'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()), - 'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()), - 'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()), - 'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()), - 'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()), - 'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()), - 'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()), - 'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()), - 'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)), - 'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()), - 'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), - 'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)), - 'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)), - 'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), - 'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), - 'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)), - 'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()), - 'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')), - 'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)), - 'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)), - 'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)), - 'BlitzBasicLexer': ('pygments.lexers.compiled', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), - 'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), - 'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)), - 'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)), - 'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()), - 'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()), - 'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')), - 'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)), - 'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)), - 'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), - 'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)), - 'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()), - 'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()), - 'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)), - 'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()), - 'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')), - 'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')), - 'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')), - 'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')), - 'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)), - 'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')), - 'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()), - 'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)), - 'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)), - 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)), - 'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()), - 'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)), - 'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)), - 'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')), - 'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)), - 'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)), - 'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')), - 'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)), - 'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)), - 'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)), - 'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)), - 'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)), - 'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)), - 'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')), - 'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)), - 'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), - 'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), - 'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), - 'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()), - 'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)), - 'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), - 'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')), - 'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')), - 'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)), - 'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')), - 'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)), - 'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)), - 'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)), - 'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)), - 'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')), - 'EbnfLexer': ('pygments.lexers.text', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)), - 'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)), - 'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)), - 'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)), - 'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)), - 'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)), - 'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)), - 'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)), - 'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)), - 'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)), - 'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)), - 'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)), - 'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)), - 'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)), - 'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)), - 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()), - 'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)), - 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)), - 'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')), - 'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')), - 'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')), - 'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)), - 'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)), - 'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)), - 'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)), - 'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)), - 'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)), - 'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')), - 'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)), - 'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)), - 'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)), - 'HaxeLexer': ('pygments.lexers.web', 'Haxe', ('hx', 'Haxe', 'haxe', 'haXe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')), - 'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')), - 'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)), - 'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')), - 'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')), - 'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)), - 'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()), - 'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()), - 'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')), - 'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)), - 'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)), - 'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)), - 'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)), - 'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)), - 'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)), - 'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)), - 'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()), - 'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)), - 'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')), - 'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')), - 'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')), - 'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')), - 'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')), - 'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')), - 'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)), - 'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)), - 'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()), - 'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')), - 'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)), - 'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)), - 'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)), - 'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)), - 'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')), - 'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')), - 'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)), - 'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)), - 'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)), - 'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)), - 'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)), - 'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)), - 'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)), - 'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)), - 'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)), - 'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')), - 'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)), - 'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)), - 'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)), - 'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)), - 'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')), - 'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)), - 'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)), - 'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')), - 'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)), - 'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)), - 'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()), - 'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)), - 'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)), - 'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)), - 'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)), - 'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)), - 'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')), - 'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()), - 'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()), - 'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()), - 'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)), - 'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)), - 'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)), - 'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')), - 'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)), - 'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)), - 'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)), - 'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)), - 'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)), - 'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)), - 'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')), - 'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)), - 'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)), - 'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)), - 'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()), - 'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)), - 'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)), - 'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)), - 'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)), - 'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)), - 'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)), - 'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)), - 'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)), - 'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')), - 'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6'), ('text/x-perl6', 'application/x-perl6')), - 'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')), - 'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)), - 'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)), - 'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)), - 'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)), - 'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)), - 'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)), - 'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)), - 'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)), - 'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)), - 'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()), - 'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()), - 'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)), - 'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')), - 'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)), - 'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)), - 'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')), - 'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)), - 'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)), - 'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()), - 'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)), - 'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')), - 'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()), - 'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()), - 'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()), - 'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()), - 'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()), - 'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()), - 'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()), - 'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()), - 'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)), - 'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)), - 'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)), - 'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()), - 'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)), - 'RexxLexer': ('pygments.lexers.other', 'Rexx', ('rexx', 'ARexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)), - 'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)), - 'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)), - 'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')), - 'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)), - 'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')), - 'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)), - 'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), - 'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), - 'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)), - 'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)), - 'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)), - 'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')), - 'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)), - 'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)), - 'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)), - 'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)), - 'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)), - 'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)), - 'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)), - 'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)), - 'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()), - 'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)), - 'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)), - 'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)), - 'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)), - 'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()), - 'SwigLexer': ('pygments.lexers.compiled', 'SWIG', ('Swig', 'swig'), ('*.swg', '*.i'), ('text/swig',)), - 'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)), - 'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), - 'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), - 'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)), - 'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')), - 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), - 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), - 'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)), - 'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)), - 'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()), - 'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)), - 'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), - 'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')), - 'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)), - 'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()), - 'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)), - 'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)), - 'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)), - 'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)), - 'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')), - 'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')), - 'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)), - 'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')), - 'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)), - 'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)), - 'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')), - 'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)), - 'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)), -} - -if __name__ == '__main__': - import sys - import os - - # lookup lexers - found_lexers = [] - sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) - for filename in os.listdir('.'): - if filename.endswith('.py') and not filename.startswith('_'): - module_name = 'pygments.lexers.%s' % filename[:-3] - print module_name - module = __import__(module_name, None, None, ['']) - for lexer_name in module.__all__: - lexer = getattr(module, lexer_name) - found_lexers.append( - '%r: %r' % (lexer_name, - (module_name, - lexer.name, - tuple(lexer.aliases), - tuple(lexer.filenames), - tuple(lexer.mimetypes)))) - # sort them, that should make the diff files for svn smaller - found_lexers.sort() - - # extract useful sourcecode from this file - f = open(__file__) - try: - content = f.read() - finally: - f.close() - header = content[:content.find('LEXERS = {')] - footer = content[content.find("if __name__ == '__main__':"):] - - # write new file - f = open(__file__, 'wb') - f.write(header) - f.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers)) - f.write(footer) - f.close() diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_openedgebuiltins.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_openedgebuiltins.py deleted file mode 100644 index 4561b07..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_openedgebuiltins.py +++ /dev/null @@ -1,562 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers._openedgebuiltins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Builtin list for the OpenEdgeLexer. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -OPENEDGEKEYWORDS = [ - 'ABSOLUTE', 'ABS', 'ABSO', 'ABSOL', 'ABSOLU', 'ABSOLUT', 'ACCELERATOR', - 'ACCUM', 'ACCUMULATE', 'ACCUM', 'ACCUMU', 'ACCUMUL', 'ACCUMULA', - 'ACCUMULAT', 'ACTIVE-FORM', 'ACTIVE-WINDOW', 'ADD', 'ADD-BUFFER', - 'ADD-CALC-COLUMN', 'ADD-COLUMNS-FROM', 'ADD-EVENTS-PROCEDURE', - 'ADD-FIELDS-FROM', 'ADD-FIRST', 'ADD-INDEX-FIELD', 'ADD-LAST', - 'ADD-LIKE-COLUMN', 'ADD-LIKE-FIELD', 'ADD-LIKE-INDEX', 'ADD-NEW-FIELD', - 'ADD-NEW-INDEX', 'ADD-SCHEMA-LOCATION', 'ADD-SUPER-PROCEDURE', 'ADM-DATA', - 'ADVISE', 'ALERT-BOX', 'ALIAS', 'ALL', 'ALLOW-COLUMN-SEARCHING', - 'ALLOW-REPLICATION', 'ALTER', 'ALWAYS-ON-TOP', 'AMBIGUOUS', 'AMBIG', - 'AMBIGU', 'AMBIGUO', 'AMBIGUOU', 'ANALYZE', 'ANALYZ', 'AND', 'ANSI-ONLY', - 'ANY', 'ANYWHERE', 'APPEND', 'APPL-ALERT-BOXES', 'APPL-ALERT', - 'APPL-ALERT-', 'APPL-ALERT-B', 'APPL-ALERT-BO', 'APPL-ALERT-BOX', - 'APPL-ALERT-BOXE', 'APPL-CONTEXT-ID', 'APPLICATION', 'APPLY', - 'APPSERVER-INFO', 'APPSERVER-PASSWORD', 'APPSERVER-USERID', 'ARRAY-MESSAGE', - 'AS', 'ASC', 'ASCENDING', 'ASCE', 'ASCEN', 'ASCEND', 'ASCENDI', 'ASCENDIN', - 'ASK-OVERWRITE', 'ASSEMBLY', 'ASSIGN', 'ASYNCHRONOUS', - 'ASYNC-REQUEST-COUNT', 'ASYNC-REQUEST-HANDLE', 'AT', 'ATTACHED-PAIRLIST', - 'ATTR-SPACE', 'ATTR', 'ATTRI', 'ATTRIB', 'ATTRIBU', 'ATTRIBUT', - 'AUDIT-CONTROL', 'AUDIT-ENABLED', 'AUDIT-EVENT-CONTEXT', 'AUDIT-POLICY', - 'AUTHENTICATION-FAILED', 'AUTHORIZATION', 'AUTO-COMPLETION', 'AUTO-COMP', - 'AUTO-COMPL', 'AUTO-COMPLE', 'AUTO-COMPLET', 'AUTO-COMPLETI', - 'AUTO-COMPLETIO', 'AUTO-ENDKEY', 'AUTO-END-KEY', 'AUTO-GO', 'AUTO-INDENT', - 'AUTO-IND', 'AUTO-INDE', 'AUTO-INDEN', 'AUTOMATIC', 'AUTO-RESIZE', - 'AUTO-RETURN', 'AUTO-RET', 'AUTO-RETU', 'AUTO-RETUR', 'AUTO-SYNCHRONIZE', - 'AUTO-ZAP', 'AUTO-Z', 'AUTO-ZA', 'AVAILABLE', 'AVAIL', 'AVAILA', 'AVAILAB', - 'AVAILABL', 'AVAILABLE-FORMATS', 'AVERAGE', 'AVE', 'AVER', 'AVERA', - 'AVERAG', 'AVG', 'BACKGROUND', 'BACK', 'BACKG', 'BACKGR', 'BACKGRO', - 'BACKGROU', 'BACKGROUN', 'BACKWARDS', 'BACKWARD', 'BASE64-DECODE', - 'BASE64-ENCODE', 'BASE-ADE', 'BASE-KEY', 'BATCH-MODE', 'BATCH', 'BATCH-', - 'BATCH-M', 'BATCH-MO', 'BATCH-MOD', 'BATCH-SIZE', 'BEFORE-HIDE', 'BEFORE-H', - 'BEFORE-HI', 'BEFORE-HID', 'BEGIN-EVENT-GROUP', 'BEGINS', 'BELL', 'BETWEEN', - 'BGCOLOR', 'BGC', 'BGCO', 'BGCOL', 'BGCOLO', 'BIG-ENDIAN', 'BINARY', 'BIND', - 'BIND-WHERE', 'BLANK', 'BLOCK-ITERATION-DISPLAY', 'BORDER-BOTTOM-CHARS', - 'BORDER-B', 'BORDER-BO', 'BORDER-BOT', 'BORDER-BOTT', 'BORDER-BOTTO', - 'BORDER-BOTTOM-PIXELS', 'BORDER-BOTTOM-P', 'BORDER-BOTTOM-PI', - 'BORDER-BOTTOM-PIX', 'BORDER-BOTTOM-PIXE', 'BORDER-BOTTOM-PIXEL', - 'BORDER-LEFT-CHARS', 'BORDER-L', 'BORDER-LE', 'BORDER-LEF', 'BORDER-LEFT', - 'BORDER-LEFT-', 'BORDER-LEFT-C', 'BORDER-LEFT-CH', 'BORDER-LEFT-CHA', - 'BORDER-LEFT-CHAR', 'BORDER-LEFT-PIXELS', 'BORDER-LEFT-P', 'BORDER-LEFT-PI', - 'BORDER-LEFT-PIX', 'BORDER-LEFT-PIXE', 'BORDER-LEFT-PIXEL', - 'BORDER-RIGHT-CHARS', 'BORDER-R', 'BORDER-RI', 'BORDER-RIG', 'BORDER-RIGH', - 'BORDER-RIGHT', 'BORDER-RIGHT-', 'BORDER-RIGHT-C', 'BORDER-RIGHT-CH', - 'BORDER-RIGHT-CHA', 'BORDER-RIGHT-CHAR', 'BORDER-RIGHT-PIXELS', - 'BORDER-RIGHT-P', 'BORDER-RIGHT-PI', 'BORDER-RIGHT-PIX', - 'BORDER-RIGHT-PIXE', 'BORDER-RIGHT-PIXEL', 'BORDER-TOP-CHARS', 'BORDER-T', - 'BORDER-TO', 'BORDER-TOP', 'BORDER-TOP-', 'BORDER-TOP-C', 'BORDER-TOP-CH', - 'BORDER-TOP-CHA', 'BORDER-TOP-CHAR', 'BORDER-TOP-PIXELS', 'BORDER-TOP-P', - 'BORDER-TOP-PI', 'BORDER-TOP-PIX', 'BORDER-TOP-PIXE', 'BORDER-TOP-PIXEL', - 'BOX', 'BOX-SELECTABLE', 'BOX-SELECT', 'BOX-SELECTA', 'BOX-SELECTAB', - 'BOX-SELECTABL', 'BREAK', 'BROWSE', 'BUFFER', 'BUFFER-CHARS', - 'BUFFER-COMPARE', 'BUFFER-COPY', 'BUFFER-CREATE', 'BUFFER-DELETE', - 'BUFFER-FIELD', 'BUFFER-HANDLE', 'BUFFER-LINES', 'BUFFER-NAME', - 'BUFFER-RELEASE', 'BUFFER-VALUE', 'BUTTON', 'BUTTONS', 'BUTTON', 'BY', - 'BY-POINTER', 'BY-VARIANT-POINTER', 'CACHE', 'CACHE-SIZE', 'CALL', - 'CALL-NAME', 'CALL-TYPE', 'CANCEL-BREAK', 'CANCEL-BUTTON', 'CAN-CREATE', - 'CAN-DELETE', 'CAN-DO', 'CAN-FIND', 'CAN-QUERY', 'CAN-READ', 'CAN-SET', - 'CAN-WRITE', 'CAPS', 'CAREFUL-PAINT', 'CASE', 'CASE-SENSITIVE', 'CASE-SEN', - 'CASE-SENS', 'CASE-SENSI', 'CASE-SENSIT', 'CASE-SENSITI', 'CASE-SENSITIV', - 'CAST', 'CATCH', 'CDECL', 'CENTERED', 'CENTER', 'CENTERE', 'CHAINED', - 'CHARACTER_LENGTH', 'CHARSET', 'CHECK', 'CHECKED', 'CHOOSE', 'CHR', 'CLASS', - 'CLASS-TYPE', 'CLEAR', 'CLEAR-APPL-CONTEXT', 'CLEAR-LOG', 'CLEAR-SELECTION', - 'CLEAR-SELECT', 'CLEAR-SELECTI', 'CLEAR-SELECTIO', 'CLEAR-SORT-ARROWS', - 'CLEAR-SORT-ARROW', 'CLIENT-CONNECTION-ID', 'CLIENT-PRINCIPAL', - 'CLIENT-TTY', 'CLIENT-TYPE', 'CLIENT-WORKSTATION', 'CLIPBOARD', 'CLOSE', - 'CLOSE-LOG', 'CODE', 'CODEBASE-LOCATOR', 'CODEPAGE', 'CODEPAGE-CONVERT', - 'COLLATE', 'COL-OF', 'COLON', 'COLON-ALIGNED', 'COLON-ALIGN', - 'COLON-ALIGNE', 'COLOR', 'COLOR-TABLE', 'COLUMN', 'COL', 'COLU', 'COLUM', - 'COLUMN-BGCOLOR', 'COLUMN-DCOLOR', 'COLUMN-FGCOLOR', 'COLUMN-FONT', - 'COLUMN-LABEL', 'COLUMN-LAB', 'COLUMN-LABE', 'COLUMN-MOVABLE', 'COLUMN-OF', - 'COLUMN-PFCOLOR', 'COLUMN-READ-ONLY', 'COLUMN-RESIZABLE', 'COLUMNS', - 'COLUMN-SCROLLING', 'COMBO-BOX', 'COMMAND', 'COMPARES', 'COMPILE', - 'COMPILER', 'COMPLETE', 'COM-SELF', 'CONFIG-NAME', 'CONNECT', 'CONNECTED', - 'CONSTRUCTOR', 'CONTAINS', 'CONTENTS', 'CONTEXT', 'CONTEXT-HELP', - 'CONTEXT-HELP-FILE', 'CONTEXT-HELP-ID', 'CONTEXT-POPUP', 'CONTROL', - 'CONTROL-BOX', 'CONTROL-FRAME', 'CONVERT', 'CONVERT-3D-COLORS', - 'CONVERT-TO-OFFSET', 'CONVERT-TO-OFFS', 'CONVERT-TO-OFFSE', 'COPY-DATASET', - 'COPY-LOB', 'COPY-SAX-ATTRIBUTES', 'COPY-TEMP-TABLE', 'COUNT', 'COUNT-OF', - 'CPCASE', 'CPCOLL', 'CPINTERNAL', 'CPLOG', 'CPPRINT', 'CPRCODEIN', - 'CPRCODEOUT', 'CPSTREAM', 'CPTERM', 'CRC-VALUE', 'CREATE', 'CREATE-LIKE', - 'CREATE-LIKE-SEQUENTIAL', 'CREATE-NODE-NAMESPACE', - 'CREATE-RESULT-LIST-ENTRY', 'CREATE-TEST-FILE', 'CURRENT', 'CURRENT_DATE', - 'CURRENT_DATE', 'CURRENT-CHANGED', 'CURRENT-COLUMN', 'CURRENT-ENVIRONMENT', - 'CURRENT-ENV', 'CURRENT-ENVI', 'CURRENT-ENVIR', 'CURRENT-ENVIRO', - 'CURRENT-ENVIRON', 'CURRENT-ENVIRONM', 'CURRENT-ENVIRONME', - 'CURRENT-ENVIRONMEN', 'CURRENT-ITERATION', 'CURRENT-LANGUAGE', - 'CURRENT-LANG', 'CURRENT-LANGU', 'CURRENT-LANGUA', 'CURRENT-LANGUAG', - 'CURRENT-QUERY', 'CURRENT-RESULT-ROW', 'CURRENT-ROW-MODIFIED', - 'CURRENT-VALUE', 'CURRENT-WINDOW', 'CURSOR', 'CURS', 'CURSO', 'CURSOR-CHAR', - 'CURSOR-LINE', 'CURSOR-OFFSET', 'DATABASE', 'DATA-BIND', - 'DATA-ENTRY-RETURN', 'DATA-ENTRY-RET', 'DATA-ENTRY-RETU', - 'DATA-ENTRY-RETUR', 'DATA-RELATION', 'DATA-REL', 'DATA-RELA', 'DATA-RELAT', - 'DATA-RELATI', 'DATA-RELATIO', 'DATASERVERS', 'DATASET', 'DATASET-HANDLE', - 'DATA-SOURCE', 'DATA-SOURCE-COMPLETE-MAP', 'DATA-SOURCE-MODIFIED', - 'DATA-SOURCE-ROWID', 'DATA-TYPE', 'DATA-T', 'DATA-TY', 'DATA-TYP', - 'DATE-FORMAT', 'DATE-F', 'DATE-FO', 'DATE-FOR', 'DATE-FORM', 'DATE-FORMA', - 'DAY', 'DBCODEPAGE', 'DBCOLLATION', 'DBNAME', 'DBPARAM', 'DB-REFERENCES', - 'DBRESTRICTIONS', 'DBREST', 'DBRESTR', 'DBRESTRI', 'DBRESTRIC', - 'DBRESTRICT', 'DBRESTRICTI', 'DBRESTRICTIO', 'DBRESTRICTION', 'DBTASKID', - 'DBTYPE', 'DBVERSION', 'DBVERS', 'DBVERSI', 'DBVERSIO', 'DCOLOR', 'DDE', - 'DDE-ERROR', 'DDE-ID', 'DDE-I', 'DDE-ITEM', 'DDE-NAME', 'DDE-TOPIC', - 'DEBLANK', 'DEBUG', 'DEBU', 'DEBUG-ALERT', 'DEBUGGER', 'DEBUG-LIST', - 'DECIMALS', 'DECLARE', 'DECLARE-NAMESPACE', 'DECRYPT', 'DEFAULT', - 'DEFAULT-BUFFER-HANDLE', 'DEFAULT-BUTTON', 'DEFAUT-B', 'DEFAUT-BU', - 'DEFAUT-BUT', 'DEFAUT-BUTT', 'DEFAUT-BUTTO', 'DEFAULT-COMMIT', - 'DEFAULT-EXTENSION', 'DEFAULT-EX', 'DEFAULT-EXT', 'DEFAULT-EXTE', - 'DEFAULT-EXTEN', 'DEFAULT-EXTENS', 'DEFAULT-EXTENSI', 'DEFAULT-EXTENSIO', - 'DEFAULT-NOXLATE', 'DEFAULT-NOXL', 'DEFAULT-NOXLA', 'DEFAULT-NOXLAT', - 'DEFAULT-VALUE', 'DEFAULT-WINDOW', 'DEFINED', 'DEFINE-USER-EVENT-MANAGER', - 'DELETE', 'DEL', 'DELE', 'DELET', 'DELETE-CHARACTER', 'DELETE-CHAR', - 'DELETE-CHARA', 'DELETE-CHARAC', 'DELETE-CHARACT', 'DELETE-CHARACTE', - 'DELETE-CURRENT-ROW', 'DELETE-LINE', 'DELETE-RESULT-LIST-ENTRY', - 'DELETE-SELECTED-ROW', 'DELETE-SELECTED-ROWS', 'DELIMITER', 'DESC', - 'DESCENDING', 'DESC', 'DESCE', 'DESCEN', 'DESCEND', 'DESCENDI', 'DESCENDIN', - 'DESELECT-FOCUSED-ROW', 'DESELECTION', 'DESELECT-ROWS', - 'DESELECT-SELECTED-ROW', 'DESTRUCTOR', 'DIALOG-BOX', 'DICTIONARY', 'DICT', - 'DICTI', 'DICTIO', 'DICTION', 'DICTIONA', 'DICTIONAR', 'DIR', 'DISABLE', - 'DISABLE-AUTO-ZAP', 'DISABLED', 'DISABLE-DUMP-TRIGGERS', - 'DISABLE-LOAD-TRIGGERS', 'DISCONNECT', 'DISCON', 'DISCONN', 'DISCONNE', - 'DISCONNEC', 'DISP', 'DISPLAY', 'DISP', 'DISPL', 'DISPLA', - 'DISPLAY-MESSAGE', 'DISPLAY-TYPE', 'DISPLAY-T', 'DISPLAY-TY', 'DISPLAY-TYP', - 'DISTINCT', 'DO', 'DOMAIN-DESCRIPTION', 'DOMAIN-NAME', 'DOMAIN-TYPE', 'DOS', - 'DOUBLE', 'DOWN', 'DRAG-ENABLED', 'DROP', 'DROP-DOWN', 'DROP-DOWN-LIST', - 'DROP-FILE-NOTIFY', 'DROP-TARGET', 'DUMP', 'DYNAMIC', 'DYNAMIC-FUNCTION', - 'EACH', 'ECHO', 'EDGE-CHARS', 'EDGE', 'EDGE-', 'EDGE-C', 'EDGE-CH', - 'EDGE-CHA', 'EDGE-CHAR', 'EDGE-PIXELS', 'EDGE-P', 'EDGE-PI', 'EDGE-PIX', - 'EDGE-PIXE', 'EDGE-PIXEL', 'EDIT-CAN-PASTE', 'EDIT-CAN-UNDO', 'EDIT-CLEAR', - 'EDIT-COPY', 'EDIT-CUT', 'EDITING', 'EDITOR', 'EDIT-PASTE', 'EDIT-UNDO', - 'ELSE', 'EMPTY', 'EMPTY-TEMP-TABLE', 'ENABLE', 'ENABLED-FIELDS', 'ENCODE', - 'ENCRYPT', 'ENCRYPT-AUDIT-MAC-KEY', 'ENCRYPTION-SALT', 'END', - 'END-DOCUMENT', 'END-ELEMENT', 'END-EVENT-GROUP', 'END-FILE-DROP', 'ENDKEY', - 'END-KEY', 'END-MOVE', 'END-RESIZE', 'END-ROW-RESIZE', 'END-USER-PROMPT', - 'ENTERED', 'ENTRY', 'EQ', 'ERROR', 'ERROR-COLUMN', 'ERROR-COL', - 'ERROR-COLU', 'ERROR-COLUM', 'ERROR-ROW', 'ERROR-STACK-TRACE', - 'ERROR-STATUS', 'ERROR-STAT', 'ERROR-STATU', 'ESCAPE', 'ETIME', - 'EVENT-GROUP-ID', 'EVENT-PROCEDURE', 'EVENT-PROCEDURE-CONTEXT', 'EVENTS', - 'EVENT', 'EVENT-TYPE', 'EVENT-T', 'EVENT-TY', 'EVENT-TYP', 'EXCEPT', - 'EXCLUSIVE-ID', 'EXCLUSIVE-LOCK', 'EXCLUSIVE', 'EXCLUSIVE-', 'EXCLUSIVE-L', - 'EXCLUSIVE-LO', 'EXCLUSIVE-LOC', 'EXCLUSIVE-WEB-USER', 'EXECUTE', 'EXISTS', - 'EXP', 'EXPAND', 'EXPANDABLE', 'EXPLICIT', 'EXPORT', 'EXPORT-PRINCIPAL', - 'EXTENDED', 'EXTENT', 'EXTERNAL', 'FALSE', 'FETCH', 'FETCH-SELECTED-ROW', - 'FGCOLOR', 'FGC', 'FGCO', 'FGCOL', 'FGCOLO', 'FIELD', 'FIELDS', 'FIELD', - 'FILE', 'FILE-CREATE-DATE', 'FILE-CREATE-TIME', 'FILE-INFORMATION', - 'FILE-INFO', 'FILE-INFOR', 'FILE-INFORM', 'FILE-INFORMA', 'FILE-INFORMAT', - 'FILE-INFORMATI', 'FILE-INFORMATIO', 'FILE-MOD-DATE', 'FILE-MOD-TIME', - 'FILENAME', 'FILE-NAME', 'FILE-OFFSET', 'FILE-OFF', 'FILE-OFFS', - 'FILE-OFFSE', 'FILE-SIZE', 'FILE-TYPE', 'FILL', 'FILLED', 'FILL-IN', - 'FILTERS', 'FINAL', 'FINALLY', 'FIND', 'FIND-BY-ROWID', - 'FIND-CASE-SENSITIVE', 'FIND-CURRENT', 'FINDER', 'FIND-FIRST', - 'FIND-GLOBAL', 'FIND-LAST', 'FIND-NEXT-OCCURRENCE', 'FIND-PREV-OCCURRENCE', - 'FIND-SELECT', 'FIND-UNIQUE', 'FIND-WRAP-AROUND', 'FIRST', - 'FIRST-ASYNCH-REQUEST', 'FIRST-CHILD', 'FIRST-COLUMN', 'FIRST-FORM', - 'FIRST-OBJECT', 'FIRST-OF', 'FIRST-PROCEDURE', 'FIRST-PROC', 'FIRST-PROCE', - 'FIRST-PROCED', 'FIRST-PROCEDU', 'FIRST-PROCEDUR', 'FIRST-SERVER', - 'FIRST-TAB-ITEM', 'FIRST-TAB-I', 'FIRST-TAB-IT', 'FIRST-TAB-ITE', - 'FIT-LAST-COLUMN', 'FIXED-ONLY', 'FLAT-BUTTON', 'FLOAT', 'FOCUS', - 'FOCUSED-ROW', 'FOCUSED-ROW-SELECTED', 'FONT', 'FONT-TABLE', 'FOR', - 'FORCE-FILE', 'FOREGROUND', 'FORE', 'FOREG', 'FOREGR', 'FOREGRO', - 'FOREGROU', 'FOREGROUN', 'FORM', 'FORMAT', 'FORM', 'FORMA', 'FORMATTED', - 'FORMATTE', 'FORM-LONG-INPUT', 'FORWARD', 'FORWARDS', 'FORWARD', 'FRAGMENT', - 'FRAGMEN', 'FRAME', 'FRAM', 'FRAME-COL', 'FRAME-DB', 'FRAME-DOWN', - 'FRAME-FIELD', 'FRAME-FILE', 'FRAME-INDEX', 'FRAME-INDE', 'FRAME-LINE', - 'FRAME-NAME', 'FRAME-ROW', 'FRAME-SPACING', 'FRAME-SPA', 'FRAME-SPAC', - 'FRAME-SPACI', 'FRAME-SPACIN', 'FRAME-VALUE', 'FRAME-VAL', 'FRAME-VALU', - 'FRAME-X', 'FRAME-Y', 'FREQUENCY', 'FROM', 'FROM-CHARS', 'FROM-C', - 'FROM-CH', 'FROM-CHA', 'FROM-CHAR', 'FROM-CURRENT', 'FROM-CUR', 'FROM-CURR', - 'FROM-CURRE', 'FROM-CURREN', 'FROM-PIXELS', 'FROM-P', 'FROM-PI', 'FROM-PIX', - 'FROM-PIXE', 'FROM-PIXEL', 'FULL-HEIGHT-CHARS', 'FULL-HEIGHT', - 'FULL-HEIGHT-', 'FULL-HEIGHT-C', 'FULL-HEIGHT-CH', 'FULL-HEIGHT-CHA', - 'FULL-HEIGHT-CHAR', 'FULL-HEIGHT-PIXELS', 'FULL-HEIGHT-P', 'FULL-HEIGHT-PI', - 'FULL-HEIGHT-PIX', 'FULL-HEIGHT-PIXE', 'FULL-HEIGHT-PIXEL', 'FULL-PATHNAME', - 'FULL-PATHN', 'FULL-PATHNA', 'FULL-PATHNAM', 'FULL-WIDTH-CHARS', - 'FULL-WIDTH', 'FULL-WIDTH-', 'FULL-WIDTH-C', 'FULL-WIDTH-CH', - 'FULL-WIDTH-CHA', 'FULL-WIDTH-CHAR', 'FULL-WIDTH-PIXELS', 'FULL-WIDTH-P', - 'FULL-WIDTH-PI', 'FULL-WIDTH-PIX', 'FULL-WIDTH-PIXE', 'FULL-WIDTH-PIXEL', - 'FUNCTION', 'FUNCTION-CALL-TYPE', 'GATEWAYS', 'GATEWAY', 'GE', - 'GENERATE-MD5', 'GENERATE-PBE-KEY', 'GENERATE-PBE-SALT', - 'GENERATE-RANDOM-KEY', 'GENERATE-UUID', 'GET', 'GET-ATTR-CALL-TYPE', - 'GET-ATTRIBUTE-NODE', 'GET-BINARY-DATA', 'GET-BLUE-VALUE', 'GET-BLUE', - 'GET-BLUE-', 'GET-BLUE-V', 'GET-BLUE-VA', 'GET-BLUE-VAL', 'GET-BLUE-VALU', - 'GET-BROWSE-COLUMN', 'GET-BUFFER-HANDLEGETBYTE', 'GET-BYTE', - 'GET-CALLBACK-PROC-CONTEXT', 'GET-CALLBACK-PROC-NAME', 'GET-CGI-LIST', - 'GET-CGI-LONG-VALUE', 'GET-CGI-VALUE', 'GET-CODEPAGES', 'GET-COLLATIONS', - 'GET-CONFIG-VALUE', 'GET-CURRENT', 'GET-DOUBLE', 'GET-DROPPED-FILE', - 'GET-DYNAMIC', 'GET-ERROR-COLUMN', 'GET-ERROR-ROW', 'GET-FILE', - 'GET-FILE-NAME', 'GET-FILE-OFFSET', 'GET-FILE-OFFSE', 'GET-FIRST', - 'GET-FLOAT', 'GET-GREEN-VALUE', 'GET-GREEN', 'GET-GREEN-', 'GET-GREEN-V', - 'GET-GREEN-VA', 'GET-GREEN-VAL', 'GET-GREEN-VALU', - 'GET-INDEX-BY-NAMESPACE-NAME', 'GET-INDEX-BY-QNAME', 'GET-INT64', - 'GET-ITERATION', 'GET-KEY-VALUE', 'GET-KEY-VAL', 'GET-KEY-VALU', 'GET-LAST', - 'GET-LOCALNAME-BY-INDEX', 'GET-LONG', 'GET-MESSAGE', 'GET-NEXT', - 'GET-NUMBER', 'GET-POINTER-VALUE', 'GET-PREV', 'GET-PRINTERS', - 'GET-PROPERTY', 'GET-QNAME-BY-INDEX', 'GET-RED-VALUE', 'GET-RED', - 'GET-RED-', 'GET-RED-V', 'GET-RED-VA', 'GET-RED-VAL', 'GET-RED-VALU', - 'GET-REPOSITIONED-ROW', 'GET-RGB-VALUE', 'GET-SELECTED-WIDGET', - 'GET-SELECTED', 'GET-SELECTED-', 'GET-SELECTED-W', 'GET-SELECTED-WI', - 'GET-SELECTED-WID', 'GET-SELECTED-WIDG', 'GET-SELECTED-WIDGE', 'GET-SHORT', - 'GET-SIGNATURE', 'GET-SIZE', 'GET-STRING', 'GET-TAB-ITEM', - 'GET-TEXT-HEIGHT-CHARS', 'GET-TEXT-HEIGHT', 'GET-TEXT-HEIGHT-', - 'GET-TEXT-HEIGHT-C', 'GET-TEXT-HEIGHT-CH', 'GET-TEXT-HEIGHT-CHA', - 'GET-TEXT-HEIGHT-CHAR', 'GET-TEXT-HEIGHT-PIXELS', 'GET-TEXT-HEIGHT-P', - 'GET-TEXT-HEIGHT-PI', 'GET-TEXT-HEIGHT-PIX', 'GET-TEXT-HEIGHT-PIXE', - 'GET-TEXT-HEIGHT-PIXEL', 'GET-TEXT-WIDTH-CHARS', 'GET-TEXT-WIDTH', - 'GET-TEXT-WIDTH-', 'GET-TEXT-WIDTH-C', 'GET-TEXT-WIDTH-CH', - 'GET-TEXT-WIDTH-CHA', 'GET-TEXT-WIDTH-CHAR', 'GET-TEXT-WIDTH-PIXELS', - 'GET-TEXT-WIDTH-P', 'GET-TEXT-WIDTH-PI', 'GET-TEXT-WIDTH-PIX', - 'GET-TEXT-WIDTH-PIXE', 'GET-TEXT-WIDTH-PIXEL', 'GET-TYPE-BY-INDEX', - 'GET-TYPE-BY-NAMESPACE-NAME', 'GET-TYPE-BY-QNAME', 'GET-UNSIGNED-LONG', - 'GET-UNSIGNED-SHORT', 'GET-URI-BY-INDEX', 'GET-VALUE-BY-INDEX', - 'GET-VALUE-BY-NAMESPACE-NAME', 'GET-VALUE-BY-QNAME', 'GET-WAIT-STATE', - 'GLOBAL', 'GO-ON', 'GO-PENDING', 'GO-PEND', 'GO-PENDI', 'GO-PENDIN', - 'GRANT', 'GRAPHIC-EDGE', 'GRAPHIC-E', 'GRAPHIC-ED', 'GRAPHIC-EDG', - 'GRID-FACTOR-HORIZONTAL', 'GRID-FACTOR-H', 'GRID-FACTOR-HO', - 'GRID-FACTOR-HOR', 'GRID-FACTOR-HORI', 'GRID-FACTOR-HORIZ', - 'GRID-FACTOR-HORIZO', 'GRID-FACTOR-HORIZON', 'GRID-FACTOR-HORIZONT', - 'GRID-FACTOR-HORIZONTA', 'GRID-FACTOR-VERTICAL', 'GRID-FACTOR-V', - 'GRID-FACTOR-VE', 'GRID-FACTOR-VER', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERT', - 'GRID-FACTOR-VERTI', 'GRID-FACTOR-VERTIC', 'GRID-FACTOR-VERTICA', - 'GRID-SNAP', 'GRID-UNIT-HEIGHT-CHARS', 'GRID-UNIT-HEIGHT', - 'GRID-UNIT-HEIGHT-', 'GRID-UNIT-HEIGHT-C', 'GRID-UNIT-HEIGHT-CH', - 'GRID-UNIT-HEIGHT-CHA', 'GRID-UNIT-HEIGHT-PIXELS', 'GRID-UNIT-HEIGHT-P', - 'GRID-UNIT-HEIGHT-PI', 'GRID-UNIT-HEIGHT-PIX', 'GRID-UNIT-HEIGHT-PIXE', - 'GRID-UNIT-HEIGHT-PIXEL', 'GRID-UNIT-WIDTH-CHARS', 'GRID-UNIT-WIDTH', - 'GRID-UNIT-WIDTH-', 'GRID-UNIT-WIDTH-C', 'GRID-UNIT-WIDTH-CH', - 'GRID-UNIT-WIDTH-CHA', 'GRID-UNIT-WIDTH-CHAR', 'GRID-UNIT-WIDTH-PIXELS', - 'GRID-UNIT-WIDTH-P', 'GRID-UNIT-WIDTH-PI', 'GRID-UNIT-WIDTH-PIX', - 'GRID-UNIT-WIDTH-PIXE', 'GRID-UNIT-WIDTH-PIXEL', 'GRID-VISIBLE', 'GROUP', - 'GT', 'GUID', 'HANDLER', 'HAS-RECORDS', 'HAVING', 'HEADER', 'HEIGHT-CHARS', - 'HEIGHT', 'HEIGHT-', 'HEIGHT-C', 'HEIGHT-CH', 'HEIGHT-CHA', 'HEIGHT-CHAR', - 'HEIGHT-PIXELS', 'HEIGHT-P', 'HEIGHT-PI', 'HEIGHT-PIX', 'HEIGHT-PIXE', - 'HEIGHT-PIXEL', 'HELP', 'HEX-DECODE', 'HEX-ENCODE', 'HIDDEN', 'HIDE', - 'HORIZONTAL', 'HORI', 'HORIZ', 'HORIZO', 'HORIZON', 'HORIZONT', 'HORIZONTA', - 'HOST-BYTE-ORDER', 'HTML-CHARSET', 'HTML-END-OF-LINE', 'HTML-END-OF-PAGE', - 'HTML-FRAME-BEGIN', 'HTML-FRAME-END', 'HTML-HEADER-BEGIN', - 'HTML-HEADER-END', 'HTML-TITLE-BEGIN', 'HTML-TITLE-END', 'HWND', 'ICON', - 'IF', 'IMAGE', 'IMAGE-DOWN', 'IMAGE-INSENSITIVE', 'IMAGE-SIZE', - 'IMAGE-SIZE-CHARS', 'IMAGE-SIZE-C', 'IMAGE-SIZE-CH', 'IMAGE-SIZE-CHA', - 'IMAGE-SIZE-CHAR', 'IMAGE-SIZE-PIXELS', 'IMAGE-SIZE-P', 'IMAGE-SIZE-PI', - 'IMAGE-SIZE-PIX', 'IMAGE-SIZE-PIXE', 'IMAGE-SIZE-PIXEL', 'IMAGE-UP', - 'IMMEDIATE-DISPLAY', 'IMPLEMENTS', 'IMPORT', 'IMPORT-PRINCIPAL', 'IN', - 'INCREMENT-EXCLUSIVE-ID', 'INDEX', 'INDEXED-REPOSITION', 'INDEX-HINT', - 'INDEX-INFORMATION', 'INDICATOR', 'INFORMATION', 'INFO', 'INFOR', 'INFORM', - 'INFORMA', 'INFORMAT', 'INFORMATI', 'INFORMATIO', 'IN-HANDLE', - 'INHERIT-BGCOLOR', 'INHERIT-BGC', 'INHERIT-BGCO', 'INHERIT-BGCOL', - 'INHERIT-BGCOLO', 'INHERIT-FGCOLOR', 'INHERIT-FGC', 'INHERIT-FGCO', - 'INHERIT-FGCOL', 'INHERIT-FGCOLO', 'INHERITS', 'INITIAL', 'INIT', 'INITI', - 'INITIA', 'INITIAL-DIR', 'INITIAL-FILTER', 'INITIALIZE-DOCUMENT-TYPE', - 'INITIATE', 'INNER-CHARS', 'INNER-LINES', 'INPUT', 'INPUT-OUTPUT', - 'INPUT-O', 'INPUT-OU', 'INPUT-OUT', 'INPUT-OUTP', 'INPUT-OUTPU', - 'INPUT-VALUE', 'INSERT', 'INSERT-ATTRIBUTE', 'INSERT-BACKTAB', 'INSERT-B', - 'INSERT-BA', 'INSERT-BAC', 'INSERT-BACK', 'INSERT-BACKT', 'INSERT-BACKTA', - 'INSERT-FILE', 'INSERT-ROW', 'INSERT-STRING', 'INSERT-TAB', 'INSERT-T', - 'INSERT-TA', 'INTERFACE', 'INTERNAL-ENTRIES', 'INTO', 'INVOKE', 'IS', - 'IS-ATTR-SPACE', 'IS-ATTR', 'IS-ATTR-', 'IS-ATTR-S', 'IS-ATTR-SP', - 'IS-ATTR-SPA', 'IS-ATTR-SPAC', 'IS-CLASS', 'IS-CLAS', 'IS-LEAD-BYTE', - 'IS-ATTR', 'IS-OPEN', 'IS-PARAMETER-SET', 'IS-ROW-SELECTED', 'IS-SELECTED', - 'ITEM', 'ITEMS-PER-ROW', 'JOIN', 'JOIN-BY-SQLDB', 'KBLABEL', - 'KEEP-CONNECTION-OPEN', 'KEEP-FRAME-Z-ORDER', 'KEEP-FRAME-Z', - 'KEEP-FRAME-Z-', 'KEEP-FRAME-Z-O', 'KEEP-FRAME-Z-OR', 'KEEP-FRAME-Z-ORD', - 'KEEP-FRAME-Z-ORDE', 'KEEP-MESSAGES', 'KEEP-SECURITY-CACHE', - 'KEEP-TAB-ORDER', 'KEY', 'KEYCODE', 'KEY-CODE', 'KEYFUNCTION', 'KEYFUNC', - 'KEYFUNCT', 'KEYFUNCTI', 'KEYFUNCTIO', 'KEY-FUNCTION', 'KEY-FUNC', - 'KEY-FUNCT', 'KEY-FUNCTI', 'KEY-FUNCTIO', 'KEYLABEL', 'KEY-LABEL', 'KEYS', - 'KEYWORD', 'KEYWORD-ALL', 'LABEL', 'LABEL-BGCOLOR', 'LABEL-BGC', - 'LABEL-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'LABEL-DCOLOR', 'LABEL-DC', - 'LABEL-DCO', 'LABEL-DCOL', 'LABEL-DCOLO', 'LABEL-FGCOLOR', 'LABEL-FGC', - 'LABEL-FGCO', 'LABEL-FGCOL', 'LABEL-FGCOLO', 'LABEL-FONT', 'LABEL-PFCOLOR', - 'LABEL-PFC', 'LABEL-PFCO', 'LABEL-PFCOL', 'LABEL-PFCOLO', 'LABELS', - 'LANDSCAPE', 'LANGUAGES', 'LANGUAGE', 'LARGE', 'LARGE-TO-SMALL', 'LAST', - 'LAST-ASYNCH-REQUEST', 'LAST-BATCH', 'LAST-CHILD', 'LAST-EVENT', - 'LAST-EVEN', 'LAST-FORM', 'LASTKEY', 'LAST-KEY', 'LAST-OBJECT', 'LAST-OF', - 'LAST-PROCEDURE', 'LAST-PROCE', 'LAST-PROCED', 'LAST-PROCEDU', - 'LAST-PROCEDUR', 'LAST-SERVER', 'LAST-TAB-ITEM', 'LAST-TAB-I', - 'LAST-TAB-IT', 'LAST-TAB-ITE', 'LC', 'LDBNAME', 'LE', 'LEAVE', - 'LEFT-ALIGNED', 'LEFT-ALIGN', 'LEFT-ALIGNE', 'LEFT-TRIM', 'LENGTH', - 'LIBRARY', 'LIKE', 'LIKE-SEQUENTIAL', 'LINE', 'LINE-COUNTER', 'LINE-COUNT', - 'LINE-COUNTE', 'LIST-EVENTS', 'LISTING', 'LISTI', 'LISTIN', - 'LIST-ITEM-PAIRS', 'LIST-ITEMS', 'LIST-PROPERTY-NAMES', 'LIST-QUERY-ATTRS', - 'LIST-SET-ATTRS', 'LIST-WIDGETS', 'LITERAL-QUESTION', 'LITTLE-ENDIAN', - 'LOAD', 'LOAD-DOMAINS', 'LOAD-ICON', 'LOAD-IMAGE', 'LOAD-IMAGE-DOWN', - 'LOAD-IMAGE-INSENSITIVE', 'LOAD-IMAGE-UP', 'LOAD-MOUSE-POINTER', - 'LOAD-MOUSE-P', 'LOAD-MOUSE-PO', 'LOAD-MOUSE-POI', 'LOAD-MOUSE-POIN', - 'LOAD-MOUSE-POINT', 'LOAD-MOUSE-POINTE', 'LOAD-PICTURE', 'LOAD-SMALL-ICON', - 'LOCAL-NAME', 'LOCATOR-COLUMN-NUMBER', 'LOCATOR-LINE-NUMBER', - 'LOCATOR-PUBLIC-ID', 'LOCATOR-SYSTEM-ID', 'LOCATOR-TYPE', 'LOCKED', - 'LOCK-REGISTRATION', 'LOG', 'LOG-AUDIT-EVENT', 'LOGIN-EXPIRATION-TIMESTAMP', - 'LOGIN-HOST', 'LOGIN-STATE', 'LOG-MANAGER', 'LOGOUT', 'LOOKAHEAD', 'LOOKUP', - 'LT', 'MACHINE-CLASS', 'MANDATORY', 'MANUAL-HIGHLIGHT', 'MAP', - 'MARGIN-EXTRA', 'MARGIN-HEIGHT-CHARS', 'MARGIN-HEIGHT', 'MARGIN-HEIGHT-', - 'MARGIN-HEIGHT-C', 'MARGIN-HEIGHT-CH', 'MARGIN-HEIGHT-CHA', - 'MARGIN-HEIGHT-CHAR', 'MARGIN-HEIGHT-PIXELS', 'MARGIN-HEIGHT-P', - 'MARGIN-HEIGHT-PI', 'MARGIN-HEIGHT-PIX', 'MARGIN-HEIGHT-PIXE', - 'MARGIN-HEIGHT-PIXEL', 'MARGIN-WIDTH-CHARS', 'MARGIN-WIDTH', - 'MARGIN-WIDTH-', 'MARGIN-WIDTH-C', 'MARGIN-WIDTH-CH', 'MARGIN-WIDTH-CHA', - 'MARGIN-WIDTH-CHAR', 'MARGIN-WIDTH-PIXELS', 'MARGIN-WIDTH-P', - 'MARGIN-WIDTH-PI', 'MARGIN-WIDTH-PIX', 'MARGIN-WIDTH-PIXE', - 'MARGIN-WIDTH-PIXEL', 'MARK-NEW', 'MARK-ROW-STATE', 'MATCHES', 'MAX', - 'MAX-BUTTON', 'MAX-CHARS', 'MAX-DATA-GUESS', 'MAX-HEIGHT', - 'MAX-HEIGHT-CHARS', 'MAX-HEIGHT-C', 'MAX-HEIGHT-CH', 'MAX-HEIGHT-CHA', - 'MAX-HEIGHT-CHAR', 'MAX-HEIGHT-PIXELS', 'MAX-HEIGHT-P', 'MAX-HEIGHT-PI', - 'MAX-HEIGHT-PIX', 'MAX-HEIGHT-PIXE', 'MAX-HEIGHT-PIXEL', 'MAXIMIZE', - 'MAXIMUM', 'MAX', 'MAXI', 'MAXIM', 'MAXIMU', 'MAXIMUM-LEVEL', 'MAX-ROWS', - 'MAX-SIZE', 'MAX-VALUE', 'MAX-VAL', 'MAX-VALU', 'MAX-WIDTH', - 'MAX-WIDTH-CHARS', 'MAX-WIDTH', 'MAX-WIDTH-', 'MAX-WIDTH-C', 'MAX-WIDTH-CH', - 'MAX-WIDTH-CHA', 'MAX-WIDTH-CHAR', 'MAX-WIDTH-PIXELS', 'MAX-WIDTH-P', - 'MAX-WIDTH-PI', 'MAX-WIDTH-PIX', 'MAX-WIDTH-PIXE', 'MAX-WIDTH-PIXEL', - 'MD5-DIGEST', 'MEMBER', 'MEMPTR-TO-NODE-VALUE', 'MENU', 'MENUBAR', - 'MENU-BAR', 'MENU-ITEM', 'MENU-KEY', 'MENU-K', 'MENU-KE', 'MENU-MOUSE', - 'MENU-M', 'MENU-MO', 'MENU-MOU', 'MENU-MOUS', 'MERGE-BY-FIELD', 'MESSAGE', - 'MESSAGE-AREA', 'MESSAGE-AREA-FONT', 'MESSAGE-LINES', 'METHOD', 'MIN', - 'MIN-BUTTON', 'MIN-COLUMN-WIDTH-CHARS', 'MIN-COLUMN-WIDTH-C', - 'MIN-COLUMN-WIDTH-CH', 'MIN-COLUMN-WIDTH-CHA', 'MIN-COLUMN-WIDTH-CHAR', - 'MIN-COLUMN-WIDTH-PIXELS', 'MIN-COLUMN-WIDTH-P', 'MIN-COLUMN-WIDTH-PI', - 'MIN-COLUMN-WIDTH-PIX', 'MIN-COLUMN-WIDTH-PIXE', 'MIN-COLUMN-WIDTH-PIXEL', - 'MIN-HEIGHT-CHARS', 'MIN-HEIGHT', 'MIN-HEIGHT-', 'MIN-HEIGHT-C', - 'MIN-HEIGHT-CH', 'MIN-HEIGHT-CHA', 'MIN-HEIGHT-CHAR', 'MIN-HEIGHT-PIXELS', - 'MIN-HEIGHT-P', 'MIN-HEIGHT-PI', 'MIN-HEIGHT-PIX', 'MIN-HEIGHT-PIXE', - 'MIN-HEIGHT-PIXEL', 'MINIMUM', 'MIN', 'MINI', 'MINIM', 'MINIMU', 'MIN-SIZE', - 'MIN-VALUE', 'MIN-VAL', 'MIN-VALU', 'MIN-WIDTH-CHARS', 'MIN-WIDTH', - 'MIN-WIDTH-', 'MIN-WIDTH-C', 'MIN-WIDTH-CH', 'MIN-WIDTH-CHA', - 'MIN-WIDTH-CHAR', 'MIN-WIDTH-PIXELS', 'MIN-WIDTH-P', 'MIN-WIDTH-PI', - 'MIN-WIDTH-PIX', 'MIN-WIDTH-PIXE', 'MIN-WIDTH-PIXEL', 'MODIFIED', 'MODULO', - 'MOD', 'MODU', 'MODUL', 'MONTH', 'MOUSE', 'MOUSE-POINTER', 'MOUSE-P', - 'MOUSE-PO', 'MOUSE-POI', 'MOUSE-POIN', 'MOUSE-POINT', 'MOUSE-POINTE', - 'MOVABLE', 'MOVE-AFTER-TAB-ITEM', 'MOVE-AFTER', 'MOVE-AFTER-', - 'MOVE-AFTER-T', 'MOVE-AFTER-TA', 'MOVE-AFTER-TAB', 'MOVE-AFTER-TAB-', - 'MOVE-AFTER-TAB-I', 'MOVE-AFTER-TAB-IT', 'MOVE-AFTER-TAB-ITE', - 'MOVE-BEFORE-TAB-ITEM', 'MOVE-BEFOR', 'MOVE-BEFORE', 'MOVE-BEFORE-', - 'MOVE-BEFORE-T', 'MOVE-BEFORE-TA', 'MOVE-BEFORE-TAB', 'MOVE-BEFORE-TAB-', - 'MOVE-BEFORE-TAB-I', 'MOVE-BEFORE-TAB-IT', 'MOVE-BEFORE-TAB-ITE', - 'MOVE-COLUMN', 'MOVE-COL', 'MOVE-COLU', 'MOVE-COLUM', 'MOVE-TO-BOTTOM', - 'MOVE-TO-B', 'MOVE-TO-BO', 'MOVE-TO-BOT', 'MOVE-TO-BOTT', 'MOVE-TO-BOTTO', - 'MOVE-TO-EOF', 'MOVE-TO-TOP', 'MOVE-TO-T', 'MOVE-TO-TO', 'MPE', - 'MULTI-COMPILE', 'MULTIPLE', 'MULTIPLE-KEY', 'MULTITASKING-INTERVAL', - 'MUST-EXIST', 'NAME', 'NAMESPACE-PREFIX', 'NAMESPACE-URI', 'NATIVE', 'NE', - 'NEEDS-APPSERVER-PROMPT', 'NEEDS-PROMPT', 'NEW', 'NEW-INSTANCE', 'NEW-ROW', - 'NEXT', 'NEXT-COLUMN', 'NEXT-PROMPT', 'NEXT-ROWID', 'NEXT-SIBLING', - 'NEXT-TAB-ITEM', 'NEXT-TAB-I', 'NEXT-TAB-IT', 'NEXT-TAB-ITE', 'NEXT-VALUE', - 'NO', 'NO-APPLY', 'NO-ARRAY-MESSAGE', 'NO-ASSIGN', 'NO-ATTR-LIST', - 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-L', 'NO-ATTR-LI', 'NO-ATTR-LIS', - 'NO-ATTR-SPACE', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-S', 'NO-ATTR-SP', - 'NO-ATTR-SPA', 'NO-ATTR-SPAC', 'NO-AUTO-VALIDATE', 'NO-BIND-WHERE', - 'NO-BOX', 'NO-CONSOLE', 'NO-CONVERT', 'NO-CONVERT-3D-COLORS', - 'NO-CURRENT-VALUE', 'NO-DEBUG', 'NODE-VALUE-TO-MEMPTR', 'NO-DRAG', - 'NO-ECHO', 'NO-EMPTY-SPACE', 'NO-ERROR', 'NO-FILL', 'NO-F', 'NO-FI', - 'NO-FIL', 'NO-FOCUS', 'NO-HELP', 'NO-HIDE', 'NO-INDEX-HINT', - 'NO-INHERIT-BGCOLOR', 'NO-INHERIT-BGC', 'NO-INHERIT-BGCO', 'LABEL-BGCOL', - 'LABEL-BGCOLO', 'NO-INHERIT-FGCOLOR', 'NO-INHERIT-FGC', 'NO-INHERIT-FGCO', - 'NO-INHERIT-FGCOL', 'NO-INHERIT-FGCOLO', 'NO-JOIN-BY-SQLDB', 'NO-LABELS', - 'NO-LABE', 'NO-LOBS', 'NO-LOCK', 'NO-LOOKAHEAD', 'NO-MAP', 'NO-MESSAGE', - 'NO-MES', 'NO-MESS', 'NO-MESSA', 'NO-MESSAG', 'NONAMESPACE-SCHEMA-LOCATION', - 'NONE', 'NO-PAUSE', 'NO-PREFETCH', 'NO-PREFE', 'NO-PREFET', 'NO-PREFETC', - 'NORMALIZE', 'NO-ROW-MARKERS', 'NO-SCROLLBAR-VERTICAL', - 'NO-SEPARATE-CONNECTION', 'NO-SEPARATORS', 'NOT', 'NO-TAB-STOP', - 'NOT-ACTIVE', 'NO-UNDERLINE', 'NO-UND', 'NO-UNDE', 'NO-UNDER', 'NO-UNDERL', - 'NO-UNDERLI', 'NO-UNDERLIN', 'NO-UNDO', 'NO-VALIDATE', 'NO-VAL', 'NO-VALI', - 'NO-VALID', 'NO-VALIDA', 'NO-VALIDAT', 'NOW', 'NO-WAIT', 'NO-WORD-WRAP', - 'NULL', 'NUM-ALIASES', 'NUM-ALI', 'NUM-ALIA', 'NUM-ALIAS', 'NUM-ALIASE', - 'NUM-BUFFERS', 'NUM-BUTTONS', 'NUM-BUT', 'NUM-BUTT', 'NUM-BUTTO', - 'NUM-BUTTON', 'NUM-COLUMNS', 'NUM-COL', 'NUM-COLU', 'NUM-COLUM', - 'NUM-COLUMN', 'NUM-COPIES', 'NUM-DBS', 'NUM-DROPPED-FILES', 'NUM-ENTRIES', - 'NUMERIC', 'NUMERIC-FORMAT', 'NUMERIC-F', 'NUMERIC-FO', 'NUMERIC-FOR', - 'NUMERIC-FORM', 'NUMERIC-FORMA', 'NUM-FIELDS', 'NUM-FORMATS', 'NUM-ITEMS', - 'NUM-ITERATIONS', 'NUM-LINES', 'NUM-LOCKED-COLUMNS', 'NUM-LOCKED-COL', - 'NUM-LOCKED-COLU', 'NUM-LOCKED-COLUM', 'NUM-LOCKED-COLUMN', 'NUM-MESSAGES', - 'NUM-PARAMETERS', 'NUM-REFERENCES', 'NUM-REPLACED', 'NUM-RESULTS', - 'NUM-SELECTED-ROWS', 'NUM-SELECTED-WIDGETS', 'NUM-SELECTED', - 'NUM-SELECTED-', 'NUM-SELECTED-W', 'NUM-SELECTED-WI', 'NUM-SELECTED-WID', - 'NUM-SELECTED-WIDG', 'NUM-SELECTED-WIDGE', 'NUM-SELECTED-WIDGET', - 'NUM-TABS', 'NUM-TO-RETAIN', 'NUM-VISIBLE-COLUMNS', 'OCTET-LENGTH', 'OF', - 'OFF', 'OK', 'OK-CANCEL', 'OLD', 'ON', 'ON-FRAME-BORDER', 'ON-FRAME', - 'ON-FRAME-', 'ON-FRAME-B', 'ON-FRAME-BO', 'ON-FRAME-BOR', 'ON-FRAME-BORD', - 'ON-FRAME-BORDE', 'OPEN', 'OPSYS', 'OPTION', 'OR', 'ORDERED-JOIN', - 'ORDINAL', 'OS-APPEND', 'OS-COMMAND', 'OS-COPY', 'OS-CREATE-DIR', - 'OS-DELETE', 'OS-DIR', 'OS-DRIVES', 'OS-DRIVE', 'OS-ERROR', 'OS-GETENV', - 'OS-RENAME', 'OTHERWISE', 'OUTPUT', 'OVERLAY', 'OVERRIDE', 'OWNER', 'PAGE', - 'PAGE-BOTTOM', 'PAGE-BOT', 'PAGE-BOTT', 'PAGE-BOTTO', 'PAGED', - 'PAGE-NUMBER', 'PAGE-NUM', 'PAGE-NUMB', 'PAGE-NUMBE', 'PAGE-SIZE', - 'PAGE-TOP', 'PAGE-WIDTH', 'PAGE-WID', 'PAGE-WIDT', 'PARAMETER', 'PARAM', - 'PARAME', 'PARAMET', 'PARAMETE', 'PARENT', 'PARSE-STATUS', 'PARTIAL-KEY', - 'PASCAL', 'PASSWORD-FIELD', 'PATHNAME', 'PAUSE', 'PBE-HASH-ALGORITHM', - 'PBE-HASH-ALG', 'PBE-HASH-ALGO', 'PBE-HASH-ALGOR', 'PBE-HASH-ALGORI', - 'PBE-HASH-ALGORIT', 'PBE-HASH-ALGORITH', 'PBE-KEY-ROUNDS', 'PDBNAME', - 'PERSISTENT', 'PERSIST', 'PERSISTE', 'PERSISTEN', - 'PERSISTENT-CACHE-DISABLED', 'PFCOLOR', 'PFC', 'PFCO', 'PFCOL', 'PFCOLO', - 'PIXELS', 'PIXELS-PER-COLUMN', 'PIXELS-PER-COL', 'PIXELS-PER-COLU', - 'PIXELS-PER-COLUM', 'PIXELS-PER-ROW', 'POPUP-MENU', 'POPUP-M', 'POPUP-ME', - 'POPUP-MEN', 'POPUP-ONLY', 'POPUP-O', 'POPUP-ON', 'POPUP-ONL', 'PORTRAIT', - 'POSITION', 'PRECISION', 'PREFER-DATASET', 'PREPARED', 'PREPARE-STRING', - 'PREPROCESS', 'PREPROC', 'PREPROCE', 'PREPROCES', 'PRESELECT', 'PRESEL', - 'PRESELE', 'PRESELEC', 'PREV', 'PREV-COLUMN', 'PREV-SIBLING', - 'PREV-TAB-ITEM', 'PREV-TAB-I', 'PREV-TAB-IT', 'PREV-TAB-ITE', 'PRIMARY', - 'PRINTER', 'PRINTER-CONTROL-HANDLE', 'PRINTER-HDC', 'PRINTER-NAME', - 'PRINTER-PORT', 'PRINTER-SETUP', 'PRIVATE', 'PRIVATE-DATA', 'PRIVATE-D', - 'PRIVATE-DA', 'PRIVATE-DAT', 'PRIVILEGES', 'PROCEDURE', 'PROCE', 'PROCED', - 'PROCEDU', 'PROCEDUR', 'PROCEDURE-CALL-TYPE', 'PROCESS', 'PROC-HANDLE', - 'PROC-HA', 'PROC-HAN', 'PROC-HAND', 'PROC-HANDL', 'PROC-STATUS', 'PROC-ST', - 'PROC-STA', 'PROC-STAT', 'PROC-STATU', 'proc-text', 'proc-text-buffe', - 'PROFILER', 'PROGRAM-NAME', 'PROGRESS', 'PROGRESS-SOURCE', 'PROGRESS-S', - 'PROGRESS-SO', 'PROGRESS-SOU', 'PROGRESS-SOUR', 'PROGRESS-SOURC', 'PROMPT', - 'PROMPT-FOR', 'PROMPT-F', 'PROMPT-FO', 'PROMSGS', 'PROPATH', 'PROPERTY', - 'PROTECTED', 'PROVERSION', 'PROVERS', 'PROVERSI', 'PROVERSIO', 'PROXY', - 'PROXY-PASSWORD', 'PROXY-USERID', 'PUBLIC', 'PUBLIC-ID', 'PUBLISH', - 'PUBLISHED-EVENTS', 'PUT', 'PUTBYTE', 'PUT-BYTE', 'PUT-DOUBLE', 'PUT-FLOAT', - 'PUT-INT64', 'PUT-KEY-VALUE', 'PUT-KEY-VAL', 'PUT-KEY-VALU', 'PUT-LONG', - 'PUT-SHORT', 'PUT-STRING', 'PUT-UNSIGNED-LONG', 'QUERY', 'QUERY-CLOSE', - 'QUERY-OFF-END', 'QUERY-OPEN', 'QUERY-PREPARE', 'QUERY-TUNING', 'QUESTION', - 'QUIT', 'QUOTER', 'RADIO-BUTTONS', 'RADIO-SET', 'RANDOM', 'RAW-TRANSFER', - 'RCODE-INFORMATION', 'RCODE-INFO', 'RCODE-INFOR', 'RCODE-INFORM', - 'RCODE-INFORMA', 'RCODE-INFORMAT', 'RCODE-INFORMATI', 'RCODE-INFORMATIO', - 'READ-AVAILABLE', 'READ-EXACT-NUM', 'READ-FILE', 'READKEY', 'READ-ONLY', - 'READ-XML', 'READ-XMLSCHEMA', 'REAL', 'RECORD-LENGTH', 'RECTANGLE', 'RECT', - 'RECTA', 'RECTAN', 'RECTANG', 'RECTANGL', 'RECURSIVE', 'REFERENCE-ONLY', - 'REFRESH', 'REFRESHABLE', 'REFRESH-AUDIT-POLICY', 'REGISTER-DOMAIN', - 'RELEASE', 'REMOTE', 'REMOVE-EVENTS-PROCEDURE', 'REMOVE-SUPER-PROCEDURE', - 'REPEAT', 'REPLACE', 'REPLACE-SELECTION-TEXT', 'REPOSITION', - 'REPOSITION-BACKWARD', 'REPOSITION-FORWARD', 'REPOSITION-MODE', - 'REPOSITION-TO-ROW', 'REPOSITION-TO-ROWID', 'REQUEST', 'RESET', 'RESIZABLE', - 'RESIZA', 'RESIZAB', 'RESIZABL', 'RESIZE', 'RESTART-ROW', 'RESTART-ROWID', - 'RETAIN', 'RETAIN-SHAPE', 'RETRY', 'RETRY-CANCEL', 'RETURN', - 'RETURN-INSERTED', 'RETURN-INS', 'RETURN-INSE', 'RETURN-INSER', - 'RETURN-INSERT', 'RETURN-INSERTE', 'RETURNS', 'RETURN-TO-START-DIR', - 'RETURN-TO-START-DI', 'RETURN-VALUE', 'RETURN-VAL', 'RETURN-VALU', - 'RETURN-VALUE-DATA-TYPE', 'REVERSE-FROM', 'REVERT', 'REVOKE', 'RGB-VALUE', - 'RIGHT-ALIGNED', 'RETURN-ALIGN', 'RETURN-ALIGNE', 'RIGHT-TRIM', 'R-INDEX', - 'ROLES', 'ROUND', 'ROUTINE-LEVEL', 'ROW', 'ROW-HEIGHT-CHARS', 'HEIGHT', - 'ROW-HEIGHT-PIXELS', 'HEIGHT-P', 'ROW-MARKERS', 'ROW-OF', 'ROW-RESIZABLE', - 'RULE', 'RUN', 'RUN-PROCEDURE', 'SAVE', 'SAVE-AS', 'SAVE-FILE', - 'SAX-COMPLETE', 'SAX-COMPLE', 'SAX-COMPLET', 'SAX-PARSE', 'SAX-PARSE-FIRST', - 'SAX-PARSE-NEXT', 'SAX-PARSER-ERROR', 'SAX-RUNNING', 'SAX-UNINITIALIZED', - 'SAX-WRITE-BEGIN', 'SAX-WRITE-COMPLETE', 'SAX-WRITE-CONTENT', - 'SAX-WRITE-ELEMENT', 'SAX-WRITE-ERROR', 'SAX-WRITE-IDLE', 'SAX-WRITER', - 'SAX-WRITE-TAG', 'SCHEMA', 'SCHEMA-LOCATION', 'SCHEMA-MARSHAL', - 'SCHEMA-PATH', 'SCREEN', 'SCREEN-IO', 'SCREEN-LINES', 'SCREEN-VALUE', - 'SCREEN-VAL', 'SCREEN-VALU', 'SCROLL', 'SCROLLABLE', 'SCROLLBAR-HORIZONTAL', - 'SCROLLBAR-H', 'SCROLLBAR-HO', 'SCROLLBAR-HOR', 'SCROLLBAR-HORI', - 'SCROLLBAR-HORIZ', 'SCROLLBAR-HORIZO', 'SCROLLBAR-HORIZON', - 'SCROLLBAR-HORIZONT', 'SCROLLBAR-HORIZONTA', 'SCROLL-BARS', - 'SCROLLBAR-VERTICAL', 'SCROLLBAR-V', 'SCROLLBAR-VE', 'SCROLLBAR-VER', - 'SCROLLBAR-VERT', 'SCROLLBAR-VERTI', 'SCROLLBAR-VERTIC', - 'SCROLLBAR-VERTICA', 'SCROLL-DELTA', 'SCROLLED-ROW-POSITION', - 'SCROLLED-ROW-POS', 'SCROLLED-ROW-POSI', 'SCROLLED-ROW-POSIT', - 'SCROLLED-ROW-POSITI', 'SCROLLED-ROW-POSITIO', 'SCROLLING', 'SCROLL-OFFSET', - 'SCROLL-TO-CURRENT-ROW', 'SCROLL-TO-ITEM', 'SCROLL-TO-I', 'SCROLL-TO-IT', - 'SCROLL-TO-ITE', 'SCROLL-TO-SELECTED-ROW', 'SDBNAME', 'SEAL', - 'SEAL-TIMESTAMP', 'SEARCH', 'SEARCH-SELF', 'SEARCH-TARGET', 'SECTION', - 'SECURITY-POLICY', 'SEEK', 'SELECT', 'SELECTABLE', 'SELECT-ALL', 'SELECTED', - 'SELECT-FOCUSED-ROW', 'SELECTION', 'SELECTION-END', 'SELECTION-LIST', - 'SELECTION-START', 'SELECTION-TEXT', 'SELECT-NEXT-ROW', 'SELECT-PREV-ROW', - 'SELECT-ROW', 'SELF', 'SEND', 'send-sql-statement', 'send-sql', 'SENSITIVE', - 'SEPARATE-CONNECTION', 'SEPARATOR-FGCOLOR', 'SEPARATORS', 'SERVER', - 'SERVER-CONNECTION-BOUND', 'SERVER-CONNECTION-BOUND-REQUEST', - 'SERVER-CONNECTION-CONTEXT', 'SERVER-CONNECTION-ID', - 'SERVER-OPERATING-MODE', 'SESSION', 'SESSION-ID', 'SET', 'SET-APPL-CONTEXT', - 'SET-ATTR-CALL-TYPE', 'SET-ATTRIBUTE-NODE', 'SET-BLUE-VALUE', 'SET-BLUE', - 'SET-BLUE-', 'SET-BLUE-V', 'SET-BLUE-VA', 'SET-BLUE-VAL', 'SET-BLUE-VALU', - 'SET-BREAK', 'SET-BUFFERS', 'SET-CALLBACK', 'SET-CLIENT', 'SET-COMMIT', - 'SET-CONTENTS', 'SET-CURRENT-VALUE', 'SET-DB-CLIENT', 'SET-DYNAMIC', - 'SET-EVENT-MANAGER-OPTION', 'SET-GREEN-VALUE', 'SET-GREEN', 'SET-GREEN-', - 'SET-GREEN-V', 'SET-GREEN-VA', 'SET-GREEN-VAL', 'SET-GREEN-VALU', - 'SET-INPUT-SOURCE', 'SET-OPTION', 'SET-OUTPUT-DESTINATION', 'SET-PARAMETER', - 'SET-POINTER-VALUE', 'SET-PROPERTY', 'SET-RED-VALUE', 'SET-RED', 'SET-RED-', - 'SET-RED-V', 'SET-RED-VA', 'SET-RED-VAL', 'SET-RED-VALU', - 'SET-REPOSITIONED-ROW', 'SET-RGB-VALUE', 'SET-ROLLBACK', 'SET-SELECTION', - 'SET-SIZE', 'SET-SORT-ARROW', 'SETUSERID', 'SETUSER', 'SETUSERI', - 'SET-WAIT-STATE', 'SHA1-DIGEST', 'SHARED', 'SHARE-LOCK', 'SHARE', 'SHARE-', - 'SHARE-L', 'SHARE-LO', 'SHARE-LOC', 'SHOW-IN-TASKBAR', 'SHOW-STATS', - 'SHOW-STAT', 'SIDE-LABEL-HANDLE', 'SIDE-LABEL-H', 'SIDE-LABEL-HA', - 'SIDE-LABEL-HAN', 'SIDE-LABEL-HAND', 'SIDE-LABEL-HANDL', 'SIDE-LABELS', - 'SIDE-LAB', 'SIDE-LABE', 'SIDE-LABEL', 'SILENT', 'SIMPLE', 'SINGLE', 'SIZE', - 'SIZE-CHARS', 'SIZE-C', 'SIZE-CH', 'SIZE-CHA', 'SIZE-CHAR', 'SIZE-PIXELS', - 'SIZE-P', 'SIZE-PI', 'SIZE-PIX', 'SIZE-PIXE', 'SIZE-PIXEL', 'SKIP', - 'SKIP-DELETED-RECORD', 'SLIDER', 'SMALL-ICON', 'SMALLINT', 'SMALL-TITLE', - 'SOME', 'SORT', 'SORT-ASCENDING', 'SORT-NUMBER', 'SOURCE', - 'SOURCE-PROCEDURE', 'SPACE', 'SQL', 'SQRT', 'SSL-SERVER-NAME', 'STANDALONE', - 'START', 'START-DOCUMENT', 'START-ELEMENT', 'START-MOVE', 'START-RESIZE', - 'START-ROW-RESIZE', 'STATE-DETAIL', 'STATIC', 'STATUS', 'STATUS-AREA', - 'STATUS-AREA-FONT', 'STDCALL', 'STOP', 'STOP-PARSING', 'STOPPED', 'STOPPE', - 'STORED-PROCEDURE', 'STORED-PROC', 'STORED-PROCE', 'STORED-PROCED', - 'STORED-PROCEDU', 'STORED-PROCEDUR', 'STREAM', 'STREAM-HANDLE', 'STREAM-IO', - 'STRETCH-TO-FIT', 'STRICT', 'STRING', 'STRING-VALUE', 'STRING-XREF', - 'SUB-AVERAGE', 'SUB-AVE', 'SUB-AVER', 'SUB-AVERA', 'SUB-AVERAG', - 'SUB-COUNT', 'SUB-MAXIMUM', 'SUM-MAX', 'SUM-MAXI', 'SUM-MAXIM', - 'SUM-MAXIMU', 'SUB-MENU', 'SUBSUB-', 'MINIMUM', 'SUB-MIN', 'SUBSCRIBE', - 'SUBSTITUTE', 'SUBST', 'SUBSTI', 'SUBSTIT', 'SUBSTITU', 'SUBSTITUT', - 'SUBSTRING', 'SUBSTR', 'SUBSTRI', 'SUBSTRIN', 'SUB-TOTAL', 'SUBTYPE', 'SUM', - 'SUPER', 'SUPER-PROCEDURES', 'SUPPRESS-NAMESPACE-PROCESSING', - 'SUPPRESS-WARNINGS', 'SUPPRESS-W', 'SUPPRESS-WA', 'SUPPRESS-WAR', - 'SUPPRESS-WARN', 'SUPPRESS-WARNI', 'SUPPRESS-WARNIN', 'SUPPRESS-WARNING', - 'SYMMETRIC-ENCRYPTION-ALGORITHM', 'SYMMETRIC-ENCRYPTION-IV', - 'SYMMETRIC-ENCRYPTION-KEY', 'SYMMETRIC-SUPPORT', 'SYSTEM-ALERT-BOXES', - 'SYSTEM-ALERT', 'SYSTEM-ALERT-', 'SYSTEM-ALERT-B', 'SYSTEM-ALERT-BO', - 'SYSTEM-ALERT-BOX', 'SYSTEM-ALERT-BOXE', 'SYSTEM-DIALOG', 'SYSTEM-HELP', - 'SYSTEM-ID', 'TABLE', 'TABLE-HANDLE', 'TABLE-NUMBER', 'TAB-POSITION', - 'TAB-STOP', 'TARGET', 'TARGET-PROCEDURE', 'TEMP-DIRECTORY', 'TEMP-DIR', - 'TEMP-DIRE', 'TEMP-DIREC', 'TEMP-DIRECT', 'TEMP-DIRECTO', 'TEMP-DIRECTOR', - 'TEMP-TABLE', 'TEMP-TABLE-PREPARE', 'TERM', 'TERMINAL', 'TERM', 'TERMI', - 'TERMIN', 'TERMINA', 'TERMINATE', 'TEXT', 'TEXT-CURSOR', 'TEXT-SEG-GROW', - 'TEXT-SELECTED', 'THEN', 'THIS-OBJECT', 'THIS-PROCEDURE', 'THREE-D', - 'THROW', 'THROUGH', 'THRU', 'TIC-MARKS', 'TIME', 'TIME-SOURCE', 'TITLE', - 'TITLE-BGCOLOR', 'TITLE-BGC', 'TITLE-BGCO', 'TITLE-BGCOL', 'TITLE-BGCOLO', - 'TITLE-DCOLOR', 'TITLE-DC', 'TITLE-DCO', 'TITLE-DCOL', 'TITLE-DCOLO', - 'TITLE-FGCOLOR', 'TITLE-FGC', 'TITLE-FGCO', 'TITLE-FGCOL', 'TITLE-FGCOLO', - 'TITLE-FONT', 'TITLE-FO', 'TITLE-FON', 'TO', 'TODAY', 'TOGGLE-BOX', - 'TOOLTIP', 'TOOLTIPS', 'TOPIC', 'TOP-NAV-QUERY', 'TOP-ONLY', 'TO-ROWID', - 'TOTAL', 'TRAILING', 'TRANS', 'TRANSACTION', 'TRANSACTION-MODE', - 'TRANS-INIT-PROCEDURE', 'TRANSPARENT', 'TRIGGER', 'TRIGGERS', 'TRIM', - 'TRUE', 'TRUNCATE', 'TRUNC', 'TRUNCA', 'TRUNCAT', 'TYPE', 'TYPE-OF', - 'UNBOX', 'UNBUFFERED', 'UNBUFF', 'UNBUFFE', 'UNBUFFER', 'UNBUFFERE', - 'UNDERLINE', 'UNDERL', 'UNDERLI', 'UNDERLIN', 'UNDO', 'UNFORMATTED', - 'UNFORM', 'UNFORMA', 'UNFORMAT', 'UNFORMATT', 'UNFORMATTE', 'UNION', - 'UNIQUE', 'UNIQUE-ID', 'UNIQUE-MATCH', 'UNIX', 'UNLESS-HIDDEN', 'UNLOAD', - 'UNSIGNED-LONG', 'UNSUBSCRIBE', 'UP', 'UPDATE', 'UPDATE-ATTRIBUTE', 'URL', - 'URL-DECODE', 'URL-ENCODE', 'URL-PASSWORD', 'URL-USERID', 'USE', - 'USE-DICT-EXPS', 'USE-FILENAME', 'USE-INDEX', 'USER', 'USE-REVVIDEO', - 'USERID', 'USER-ID', 'USE-TEXT', 'USE-UNDERLINE', 'USE-WIDGET-POOL', - 'USING', 'V6DISPLAY', 'V6FRAME', 'VALIDATE', 'VALIDATE-EXPRESSION', - 'VALIDATE-MESSAGE', 'VALIDATE-SEAL', 'VALIDATION-ENABLED', 'VALID-EVENT', - 'VALID-HANDLE', 'VALID-OBJECT', 'VALUE', 'VALUE-CHANGED', 'VALUES', - 'VARIABLE', 'VAR', 'VARI', 'VARIA', 'VARIAB', 'VARIABL', 'VERBOSE', - 'VERSION', 'VERTICAL', 'VERT', 'VERTI', 'VERTIC', 'VERTICA', 'VIEW', - 'VIEW-AS', 'VIEW-FIRST-COLUMN-ON-REOPEN', 'VIRTUAL-HEIGHT-CHARS', - 'VIRTUAL-HEIGHT', 'VIRTUAL-HEIGHT-', 'VIRTUAL-HEIGHT-C', - 'VIRTUAL-HEIGHT-CH', 'VIRTUAL-HEIGHT-CHA', 'VIRTUAL-HEIGHT-CHAR', - 'VIRTUAL-HEIGHT-PIXELS', 'VIRTUAL-HEIGHT-P', 'VIRTUAL-HEIGHT-PI', - 'VIRTUAL-HEIGHT-PIX', 'VIRTUAL-HEIGHT-PIXE', 'VIRTUAL-HEIGHT-PIXEL', - 'VIRTUAL-WIDTH-CHARS', 'VIRTUAL-WIDTH', 'VIRTUAL-WIDTH-', 'VIRTUAL-WIDTH-C', - 'VIRTUAL-WIDTH-CH', 'VIRTUAL-WIDTH-CHA', 'VIRTUAL-WIDTH-CHAR', - 'VIRTUAL-WIDTH-PIXELS', 'VIRTUAL-WIDTH-P', 'VIRTUAL-WIDTH-PI', - 'VIRTUAL-WIDTH-PIX', 'VIRTUAL-WIDTH-PIXE', 'VIRTUAL-WIDTH-PIXEL', 'VISIBLE', - 'VOID', 'WAIT', 'WAIT-FOR', 'WARNING', 'WEB-CONTEXT', 'WEEKDAY', 'WHEN', - 'WHERE', 'WHILE', 'WIDGET', 'WIDGET-ENTER', 'WIDGET-E', 'WIDGET-EN', - 'WIDGET-ENT', 'WIDGET-ENTE', 'WIDGET-ID', 'WIDGET-LEAVE', 'WIDGET-L', - 'WIDGET-LE', 'WIDGET-LEA', 'WIDGET-LEAV', 'WIDGET-POOL', 'WIDTH', - 'WIDTH-CHARS', 'WIDTH', 'WIDTH-', 'WIDTH-C', 'WIDTH-CH', 'WIDTH-CHA', - 'WIDTH-CHAR', 'WIDTH-PIXELS', 'WIDTH-P', 'WIDTH-PI', 'WIDTH-PIX', - 'WIDTH-PIXE', 'WIDTH-PIXEL', 'WINDOW', 'WINDOW-MAXIMIZED', 'WINDOW-MAXIM', - 'WINDOW-MAXIMI', 'WINDOW-MAXIMIZ', 'WINDOW-MAXIMIZE', 'WINDOW-MINIMIZED', - 'WINDOW-MINIM', 'WINDOW-MINIMI', 'WINDOW-MINIMIZ', 'WINDOW-MINIMIZE', - 'WINDOW-NAME', 'WINDOW-NORMAL', 'WINDOW-STATE', 'WINDOW-STA', 'WINDOW-STAT', - 'WINDOW-SYSTEM', 'WITH', 'WORD-INDEX', 'WORD-WRAP', - 'WORK-AREA-HEIGHT-PIXELS', 'WORK-AREA-WIDTH-PIXELS', 'WORK-AREA-X', - 'WORK-AREA-Y', 'WORKFILE', 'WORK-TABLE', 'WORK-TAB', 'WORK-TABL', 'WRITE', - 'WRITE-CDATA', 'WRITE-CHARACTERS', 'WRITE-COMMENT', 'WRITE-DATA-ELEMENT', - 'WRITE-EMPTY-ELEMENT', 'WRITE-ENTITY-REF', 'WRITE-EXTERNAL-DTD', - 'WRITE-FRAGMENT', 'WRITE-MESSAGE', 'WRITE-PROCESSING-INSTRUCTION', - 'WRITE-STATUS', 'WRITE-XML', 'WRITE-XMLSCHEMA', 'X', 'XCODE', - 'XML-DATA-TYPE', 'XML-NODE-TYPE', 'XML-SCHEMA-PATH', - 'XML-SUPPRESS-NAMESPACE-PROCESSING', 'X-OF', 'XREF', 'XREF-XML', 'Y', - 'YEAR', 'YEAR-OFFSET', 'YES', 'YES-NO', 'YES-NO-CANCEL', 'Y-OF' -] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_postgres_builtins.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_postgres_builtins.py deleted file mode 100644 index b232213..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_postgres_builtins.py +++ /dev/null @@ -1,233 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers._postgres_builtins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Self-updating data files for PostgreSQL lexer. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -import urllib - -# One man's constant is another man's variable. -SOURCE_URL = 'https://github.com/postgres/postgres/raw/master' -KEYWORDS_URL = SOURCE_URL + '/doc/src/sgml/keywords.sgml' -DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml' - -def update_myself(): - data_file = list(fetch(DATATYPES_URL)) - datatypes = parse_datatypes(data_file) - pseudos = parse_pseudos(data_file) - - keywords = parse_keywords(fetch(KEYWORDS_URL)) - update_consts(__file__, 'DATATYPES', datatypes) - update_consts(__file__, 'PSEUDO_TYPES', pseudos) - update_consts(__file__, 'KEYWORDS', keywords) - -def parse_keywords(f): - kw = [] - for m in re.finditer( - r'\s*([^<]+)\s*' - r'([^<]+)', f.read()): - kw.append(m.group(1)) - - if not kw: - raise ValueError('no keyword found') - - kw.sort() - return kw - -def parse_datatypes(f): - dt = set() - for line in f: - if '' not in line: - continue - - # Parse a string such as - # time [ (p) ] [ without time zone ] - # into types "time" and "without time zone" - - # remove all the tags - line = re.sub("[^<]+", "", line) - line = re.sub("<[^>]+>", "", line) - - # Drop the parts containing braces - for tmp in [t for tmp in line.split('[') - for t in tmp.split(']') if "(" not in t]: - for t in tmp.split(','): - t = t.strip() - if not t: continue - dt.add(" ".join(t.split())) - - dt = list(dt) - dt.sort() - return dt - -def parse_pseudos(f): - dt = [] - re_start = re.compile(r'\s*') - re_entry = re.compile(r'\s*([^<]+)') - re_end = re.compile(r'\s*
') - - f = iter(f) - for line in f: - if re_start.match(line) is not None: - break - else: - raise ValueError('pseudo datatypes table not found') - - for line in f: - m = re_entry.match(line) - if m is not None: - dt.append(m.group(1)) - - if re_end.match(line) is not None: - break - else: - raise ValueError('end of pseudo datatypes table not found') - - if not dt: - raise ValueError('pseudo datatypes not found') - - return dt - -def fetch(url): - return urllib.urlopen(url) - -def update_consts(filename, constname, content): - f = open(filename) - lines = f.readlines() - f.close() - - # Line to start/end inserting - re_start = re.compile(r'^%s\s*=\s*\[\s*$' % constname) - re_end = re.compile(r'^\s*\]\s*$') - start = [ n for n, l in enumerate(lines) if re_start.match(l) ] - if not start: - raise ValueError("couldn't find line containing '%s = ['" % constname) - if len(start) > 1: - raise ValueError("too many lines containing '%s = ['" % constname) - start = start[0] + 1 - - end = [ n for n, l in enumerate(lines) if n >= start and re_end.match(l) ] - if not end: - raise ValueError("couldn't find line containing ']' after %s " % constname) - end = end[0] - - # Pack the new content in lines not too long - content = [repr(item) for item in content ] - new_lines = [[]] - for item in content: - if sum(map(len, new_lines[-1])) + 2 * len(new_lines[-1]) + len(item) + 4 > 75: - new_lines.append([]) - new_lines[-1].append(item) - - lines[start:end] = [ " %s,\n" % ", ".join(items) for items in new_lines ] - - f = open(filename, 'w') - f.write(''.join(lines)) - f.close() - - -# Autogenerated: please edit them if you like wasting your time. - -KEYWORDS = [ - 'ABORT', 'ABSOLUTE', 'ACCESS', 'ACTION', 'ADD', 'ADMIN', 'AFTER', - 'AGGREGATE', 'ALL', 'ALSO', 'ALTER', 'ALWAYS', 'ANALYSE', 'ANALYZE', - 'AND', 'ANY', 'ARRAY', 'AS', 'ASC', 'ASSERTION', 'ASSIGNMENT', - 'ASYMMETRIC', 'AT', 'ATTRIBUTE', 'AUTHORIZATION', 'BACKWARD', 'BEFORE', - 'BEGIN', 'BETWEEN', 'BIGINT', 'BINARY', 'BIT', 'BOOLEAN', 'BOTH', 'BY', - 'CACHE', 'CALLED', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', - 'CHAIN', 'CHAR', 'CHARACTER', 'CHARACTERISTICS', 'CHECK', 'CHECKPOINT', - 'CLASS', 'CLOSE', 'CLUSTER', 'COALESCE', 'COLLATE', 'COLLATION', - 'COLUMN', 'COMMENT', 'COMMENTS', 'COMMIT', 'COMMITTED', 'CONCURRENTLY', - 'CONFIGURATION', 'CONNECTION', 'CONSTRAINT', 'CONSTRAINTS', 'CONTENT', - 'CONTINUE', 'CONVERSION', 'COPY', 'COST', 'CREATE', 'CROSS', 'CSV', - 'CURRENT', 'CURRENT_CATALOG', 'CURRENT_DATE', 'CURRENT_ROLE', - 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', - 'CURSOR', 'CYCLE', 'DATA', 'DATABASE', 'DAY', 'DEALLOCATE', 'DEC', - 'DECIMAL', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED', - 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DESC', 'DICTIONARY', - 'DISABLE', 'DISCARD', 'DISTINCT', 'DO', 'DOCUMENT', 'DOMAIN', 'DOUBLE', - 'DROP', 'EACH', 'ELSE', 'ENABLE', 'ENCODING', 'ENCRYPTED', 'END', - 'ENUM', 'ESCAPE', 'EXCEPT', 'EXCLUDE', 'EXCLUDING', 'EXCLUSIVE', - 'EXECUTE', 'EXISTS', 'EXPLAIN', 'EXTENSION', 'EXTERNAL', 'EXTRACT', - 'FALSE', 'FAMILY', 'FETCH', 'FIRST', 'FLOAT', 'FOLLOWING', 'FOR', - 'FORCE', 'FOREIGN', 'FORWARD', 'FREEZE', 'FROM', 'FULL', 'FUNCTION', - 'FUNCTIONS', 'GLOBAL', 'GRANT', 'GRANTED', 'GREATEST', 'GROUP', - 'HANDLER', 'HAVING', 'HEADER', 'HOLD', 'HOUR', 'IDENTITY', 'IF', - 'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLICIT', 'IN', 'INCLUDING', - 'INCREMENT', 'INDEX', 'INDEXES', 'INHERIT', 'INHERITS', 'INITIALLY', - 'INLINE', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTEAD', - 'INT', 'INTEGER', 'INTERSECT', 'INTERVAL', 'INTO', 'INVOKER', 'IS', - 'ISNULL', 'ISOLATION', 'JOIN', 'KEY', 'LABEL', 'LANGUAGE', 'LARGE', - 'LAST', 'LC_COLLATE', 'LC_CTYPE', 'LEADING', 'LEAST', 'LEFT', 'LEVEL', - 'LIKE', 'LIMIT', 'LISTEN', 'LOAD', 'LOCAL', 'LOCALTIME', - 'LOCALTIMESTAMP', 'LOCATION', 'LOCK', 'MAPPING', 'MATCH', 'MAXVALUE', - 'MINUTE', 'MINVALUE', 'MODE', 'MONTH', 'MOVE', 'NAME', 'NAMES', - 'NATIONAL', 'NATURAL', 'NCHAR', 'NEXT', 'NO', 'NONE', 'NOT', 'NOTHING', - 'NOTIFY', 'NOTNULL', 'NOWAIT', 'NULL', 'NULLIF', 'NULLS', 'NUMERIC', - 'OBJECT', 'OF', 'OFF', 'OFFSET', 'OIDS', 'ON', 'ONLY', 'OPERATOR', - 'OPTION', 'OPTIONS', 'OR', 'ORDER', 'OUT', 'OUTER', 'OVER', 'OVERLAPS', - 'OVERLAY', 'OWNED', 'OWNER', 'PARSER', 'PARTIAL', 'PARTITION', - 'PASSING', 'PASSWORD', 'PLACING', 'PLANS', 'POSITION', 'PRECEDING', - 'PRECISION', 'PREPARE', 'PREPARED', 'PRESERVE', 'PRIMARY', 'PRIOR', - 'PRIVILEGES', 'PROCEDURAL', 'PROCEDURE', 'QUOTE', 'RANGE', 'READ', - 'REAL', 'REASSIGN', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES', - 'REINDEX', 'RELATIVE', 'RELEASE', 'RENAME', 'REPEATABLE', 'REPLACE', - 'REPLICA', 'RESET', 'RESTART', 'RESTRICT', 'RETURNING', 'RETURNS', - 'REVOKE', 'RIGHT', 'ROLE', 'ROLLBACK', 'ROW', 'ROWS', 'RULE', - 'SAVEPOINT', 'SCHEMA', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', - 'SELECT', 'SEQUENCE', 'SEQUENCES', 'SERIALIZABLE', 'SERVER', 'SESSION', - 'SESSION_USER', 'SET', 'SETOF', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', - 'SMALLINT', 'SOME', 'STABLE', 'STANDALONE', 'START', 'STATEMENT', - 'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRIP', - 'SUBSTRING', 'SYMMETRIC', 'SYSID', 'SYSTEM', 'TABLE', 'TABLES', - 'TABLESPACE', 'TEMP', 'TEMPLATE', 'TEMPORARY', 'TEXT', 'THEN', 'TIME', - 'TIMESTAMP', 'TO', 'TRAILING', 'TRANSACTION', 'TREAT', 'TRIGGER', - 'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE', 'UNBOUNDED', - 'UNCOMMITTED', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', - 'UNLOGGED', 'UNTIL', 'UPDATE', 'USER', 'USING', 'VACUUM', 'VALID', - 'VALIDATE', 'VALIDATOR', 'VALUE', 'VALUES', 'VARCHAR', 'VARIADIC', - 'VARYING', 'VERBOSE', 'VERSION', 'VIEW', 'VOLATILE', 'WHEN', 'WHERE', - 'WHITESPACE', 'WINDOW', 'WITH', 'WITHOUT', 'WORK', 'WRAPPER', 'WRITE', - 'XML', 'XMLATTRIBUTES', 'XMLCONCAT', 'XMLELEMENT', 'XMLEXISTS', - 'XMLFOREST', 'XMLPARSE', 'XMLPI', 'XMLROOT', 'XMLSERIALIZE', 'YEAR', - 'YES', 'ZONE', - ] - -DATATYPES = [ - 'bigint', 'bigserial', 'bit', 'bit varying', 'bool', 'boolean', 'box', - 'bytea', 'char', 'character', 'character varying', 'cidr', 'circle', - 'date', 'decimal', 'double precision', 'float4', 'float8', 'inet', - 'int', 'int2', 'int4', 'int8', 'integer', 'interval', 'json', 'line', - 'lseg', 'macaddr', 'money', 'numeric', 'path', 'point', 'polygon', - 'real', 'serial', 'serial2', 'serial4', 'serial8', 'smallint', - 'smallserial', 'text', 'time', 'timestamp', 'timestamptz', 'timetz', - 'tsquery', 'tsvector', 'txid_snapshot', 'uuid', 'varbit', 'varchar', - 'with time zone', 'without time zone', 'xml', - ] - -PSEUDO_TYPES = [ - 'any', 'anyelement', 'anyarray', 'anynonarray', 'anyenum', 'anyrange', - 'cstring', 'internal', 'language_handler', 'fdw_handler', 'record', - 'trigger', 'void', 'opaque', - ] - -# Remove 'trigger' from types -PSEUDO_TYPES = sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))) - -PLPGSQL_KEYWORDS = [ - 'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT', - 'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE', - 'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE', - ] - -if __name__ == '__main__': - update_myself() - diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_scilab_builtins.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_scilab_builtins.py deleted file mode 100644 index ed0dc81..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_scilab_builtins.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers._scilab_builtins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Builtin list for the ScilabLexer. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -# These lists are generated automatically. -# Run the following in a Scilab script: -# -# varType=["functions", "commands", "macros", "variables" ]; -# fd = mopen('list.txt','wt'); -# -# for j=1:size(varType,"*") -# myStr=""; -# a=completion("",varType(j)); -# myStr=varType(j)+"_kw = ["; -# for i=1:size(a,"*") -# myStr = myStr + """" + a(i) + """"; -# if size(a,"*") <> i then -# myStr = myStr + ","; end -# end -# myStr = myStr + "]"; -# mputl(myStr,fd); -# end -# mclose(fd); -# -# Then replace "$" by "\\$" manually. - -functions_kw = ["%XMLAttr_6","%XMLAttr_e","%XMLAttr_i_XMLElem","%XMLAttr_length","%XMLAttr_p","%XMLAttr_size","%XMLDoc_6","%XMLDoc_e","%XMLDoc_i_XMLList","%XMLDoc_p","%XMLElem_6","%XMLElem_e","%XMLElem_i_XMLDoc","%XMLElem_i_XMLElem","%XMLElem_i_XMLList","%XMLElem_p","%XMLList_6","%XMLList_e","%XMLList_i_XMLElem","%XMLList_i_XMLList","%XMLList_length","%XMLList_p","%XMLList_size","%XMLNs_6","%XMLNs_e","%XMLNs_i_XMLElem","%XMLNs_p","%XMLSet_6","%XMLSet_e","%XMLSet_length","%XMLSet_p","%XMLSet_size","%XMLValid_p","%b_i_XMLList","%c_i_XMLAttr","%c_i_XMLDoc","%c_i_XMLElem","%c_i_XMLList","%ce_i_XMLList","%fptr_i_XMLList","%h_i_XMLList","%hm_i_XMLList","%i_abs","%i_cumprod","%i_cumsum","%i_diag","%i_i_XMLList","%i_matrix","%i_max","%i_maxi","%i_min","%i_mini","%i_mput","%i_p","%i_prod","%i_sum","%i_tril","%i_triu","%ip_i_XMLList","%l_i_XMLList","%lss_i_XMLList","%mc_i_XMLList","%msp_full","%msp_i_XMLList","%msp_spget","%p_i_XMLList","%ptr_i_XMLList","%r_i_XMLList","%s_i_XMLList","%sp_i_XMLList","%spb_i_XMLList","%st_i_XMLList","Calendar","ClipBoard","Matplot","Matplot1","PlaySound","TCL_DeleteInterp","TCL_DoOneEvent","TCL_EvalFile","TCL_EvalStr","TCL_ExistArray","TCL_ExistInterp","TCL_ExistVar","TCL_GetVar","TCL_GetVersion","TCL_SetVar","TCL_UnsetVar","TCL_UpVar","_","_code2str","_str2code","about","abs","acos","addcb","addf","addhistory","addinter","amell","and","argn","arl2_ius","ascii","asin","atan","backslash","balanc","banner","base2dec","basename","bdiag","beep","besselh","besseli","besselj","besselk","bessely","beta","bezout","bfinit","blkfc1i","blkslvi","bool2s","browsehistory","browsevar","bsplin3val","buildDocv2","buildouttb","bvode","c_link","calerf","call","callblk","captions","cd","cdfbet","cdfbin","cdfchi","cdfchn","cdff","cdffnc","cdfgam","cdfnbn","cdfnor","cdfpoi","cdft","ceil","champ","champ1","chdir","chol","clc","clean","clear","clear_pixmap","clearfun","clearglobal","closeEditor","closeXcos","code2str","coeff","comp","completion","conj","contour2di","contr","conv2","convstr","copy","copyfile","corr","cos","coserror","createdir","cshep2d","ctree2","ctree3","ctree4","cumprod","cumsum","curblock","curblockc","dasrt","dassl","data2sig","debug","dec2base","deff","definedfields","degree","delbpt","delete","deletefile","delip","delmenu","det","dgettext","dhinf","diag","diary","diffobjs","disp","dispbpt","displayhistory","disposefftwlibrary","dlgamma","dnaupd","dneupd","double","draw","drawaxis","drawlater","drawnow","dsaupd","dsearch","dseupd","duplicate","editor","editvar","emptystr","end_scicosim","ereduc","errcatch","errclear","error","eval_cshep2d","exec","execstr","exists","exit","exp","expm","exportUI","export_to_hdf5","eye","fadj2sp","fec","feval","fft","fftw","fftw_flags","fftw_forget_wisdom","fftwlibraryisloaded","file","filebrowser","fileext","fileinfo","fileparts","filesep","find","findBD","findfiles","floor","format","fort","fprintfMat","freq","frexp","fromc","fromjava","fscanfMat","fsolve","fstair","full","fullpath","funcprot","funptr","gamma","gammaln","geom3d","get","get_absolute_file_path","get_fftw_wisdom","getblocklabel","getcallbackobject","getdate","getdebuginfo","getdefaultlanguage","getdrives","getdynlibext","getenv","getfield","gethistory","gethistoryfile","getinstalledlookandfeels","getio","getlanguage","getlongpathname","getlookandfeel","getmd5","getmemory","getmodules","getos","getpid","getrelativefilename","getscicosvars","getscilabmode","getshortpathname","gettext","getvariablesonstack","getversion","glist","global","glue","grand","grayplot","grep","gsort","gstacksize","havewindow","helpbrowser","hess","hinf","historymanager","historysize","host","iconvert","iconvert","ieee","ilib_verbose","imag","impl","import_from_hdf5","imult","inpnvi","int","int16","int2d","int32","int3d","int8","interp","interp2d","interp3d","intg","intppty","inttype","inv","is_handle_valid","isalphanum","isascii","isdef","isdigit","isdir","isequal","isequalbitwise","iserror","isfile","isglobal","isletter","isreal","iswaitingforinput","javaclasspath","javalibrarypath","kron","lasterror","ldiv","ldivf","legendre","length","lib","librarieslist","libraryinfo","linear_interpn","lines","link","linmeq","list","load","loadScicos","loadfftwlibrary","loadhistory","log","log1p","lsq","lsq_splin","lsqrsolve","lsslist","lstcat","lstsize","ltitr","lu","ludel","lufact","luget","lusolve","macr2lst","macr2tree","matfile_close","matfile_listvar","matfile_open","matfile_varreadnext","matfile_varwrite","matrix","max","maxfiles","mclearerr","mclose","meof","merror","messagebox","mfprintf","mfscanf","mget","mgeti","mgetl","mgetstr","min","mlist","mode","model2blk","mopen","move","movefile","mprintf","mput","mputl","mputstr","mscanf","mseek","msprintf","msscanf","mtell","mtlb_mode","mtlb_sparse","mucomp","mulf","nearfloat","newaxes","newest","newfun","nnz","notify","number_properties","ode","odedc","ones","opentk","optim","or","ordmmd","parallel_concurrency","parallel_run","param3d","param3d1","part","pathconvert","pathsep","phase_simulation","plot2d","plot2d1","plot2d2","plot2d3","plot2d4","plot3d","plot3d1","pointer_xproperty","poly","ppol","pppdiv","predef","print","printf","printfigure","printsetupbox","prod","progressionbar","prompt","pwd","qld","qp_solve","qr","raise_window","rand","rankqr","rat","rcond","rdivf","read","read4b","readb","readgateway","readmps","real","realtime","realtimeinit","regexp","relocate_handle","remez","removedir","removelinehistory","res_with_prec","resethistory","residu","resume","return","ricc","ricc_old","rlist","roots","rotate_axes","round","rpem","rtitr","rubberbox","save","saveafterncommands","saveconsecutivecommands","savehistory","schur","sci_haltscicos","sci_tree2","sci_tree3","sci_tree4","sciargs","scicos_debug","scicos_debug_count","scicos_time","scicosim","scinotes","sctree","semidef","set","set_blockerror","set_fftw_wisdom","set_xproperty","setbpt","setdefaultlanguage","setenv","setfield","sethistoryfile","setlanguage","setlookandfeel","setmenu","sfact","sfinit","show_pixmap","show_window","showalluimenushandles","sident","sig2data","sign","simp","simp_mode","sin","size","slash","sleep","sorder","sparse","spchol","spcompack","spec","spget","splin","splin2d","splin3d","spones","sprintf","sqrt","stacksize","str2code","strcat","strchr","strcmp","strcspn","strindex","string","stringbox","stripblanks","strncpy","strrchr","strrev","strsplit","strspn","strstr","strsubst","strtod","strtok","subf","sum","svd","swap_handles","symfcti","syredi","system_getproperty","system_setproperty","ta2lpd","tan","taucs_chdel","taucs_chfact","taucs_chget","taucs_chinfo","taucs_chsolve","tempname","testmatrix","timer","tlist","tohome","tokens","toolbar","toprint","tr_zer","tril","triu","type","typename","uiDisplayTree","uicontextmenu","uicontrol","uigetcolor","uigetdir","uigetfile","uigetfont","uimenu","uint16","uint32","uint8","uipopup","uiputfile","uiwait","ulink","umf_ludel","umf_lufact","umf_luget","umf_luinfo","umf_lusolve","umfpack","unglue","unix","unsetmenu","unzoom","updatebrowsevar","usecanvas","user","var2vec","varn","vec2var","waitbar","warnBlockByUID","warning","what","where","whereis","who","winsid","with_embedded_jre","with_module","writb","write","write4b","x_choose","x_choose_modeless","x_dialog","x_mdialog","xarc","xarcs","xarrows","xchange","xchoicesi","xclick","xcos","xcosAddToolsMenu","xcosConfigureXmlFile","xcosDiagramToScilab","xcosPalCategoryAdd","xcosPalDelete","xcosPalDisable","xcosPalEnable","xcosPalGenerateIcon","xcosPalLoad","xcosPalMove","xcosUpdateBlock","xdel","xfarc","xfarcs","xfpoly","xfpolys","xfrect","xget","xgetech","xgetmouse","xgraduate","xgrid","xlfont","xls_open","xls_read","xmlAddNs","xmlAsNumber","xmlAsText","xmlDTD","xmlDelete","xmlDocument","xmlDump","xmlElement","xmlFormat","xmlGetNsByHref","xmlGetNsByPrefix","xmlGetOpenDocs","xmlIsValidObject","xmlNs","xmlRead","xmlReadStr","xmlRelaxNG","xmlRemove","xmlSchema","xmlSetAttributes","xmlValidate","xmlWrite","xmlXPath","xname","xpause","xpoly","xpolys","xrect","xrects","xs2bmp","xs2eps","xs2gif","xs2jpg","xs2pdf","xs2png","xs2ppm","xs2ps","xs2svg","xsegs","xset","xsetech","xstring","xstringb","xtitle","zeros","znaupd","zneupd","zoom_rect"] - -commands_kw = ["abort","apropos","break","case","catch","clc","clear","continue","do","else","elseif","end","endfunction","exit","for","function","help","if","pause","pwd","quit","resume","return","select","then","try","what","while","who"] - -macros_kw = ["%0_i_st","%3d_i_h","%Block_xcosUpdateBlock","%TNELDER_p","%TNELDER_string","%TNMPLOT_p","%TNMPLOT_string","%TOPTIM_p","%TOPTIM_string","%TSIMPLEX_p","%TSIMPLEX_string","%_gsort","%_strsplit","%ar_p","%asn","%b_a_b","%b_a_s","%b_c_s","%b_c_spb","%b_cumprod","%b_cumsum","%b_d_s","%b_diag","%b_e","%b_f_s","%b_f_spb","%b_g_s","%b_g_spb","%b_h_s","%b_h_spb","%b_i_b","%b_i_ce","%b_i_h","%b_i_hm","%b_i_s","%b_i_sp","%b_i_spb","%b_i_st","%b_iconvert","%b_l_b","%b_l_s","%b_m_b","%b_m_s","%b_matrix","%b_n_hm","%b_o_hm","%b_p_s","%b_prod","%b_r_b","%b_r_s","%b_s_b","%b_s_s","%b_string","%b_sum","%b_tril","%b_triu","%b_x_b","%b_x_s","%c_a_c","%c_b_c","%c_b_s","%c_diag","%c_e","%c_eye","%c_f_s","%c_i_c","%c_i_ce","%c_i_h","%c_i_hm","%c_i_lss","%c_i_r","%c_i_s","%c_i_st","%c_matrix","%c_n_l","%c_n_st","%c_o_l","%c_o_st","%c_ones","%c_rand","%c_tril","%c_triu","%cblock_c_cblock","%cblock_c_s","%cblock_e","%cblock_f_cblock","%cblock_p","%cblock_size","%ce_6","%ce_c_ce","%ce_e","%ce_f_ce","%ce_i_ce","%ce_i_s","%ce_i_st","%ce_matrix","%ce_p","%ce_size","%ce_string","%ce_t","%champdat_i_h","%choose","%diagram_xcos","%dir_p","%fptr_i_st","%grayplot_i_h","%h_i_st","%hm_1_hm","%hm_1_s","%hm_2_hm","%hm_2_s","%hm_3_hm","%hm_3_s","%hm_4_hm","%hm_4_s","%hm_5","%hm_a_hm","%hm_a_r","%hm_a_s","%hm_abs","%hm_and","%hm_bool2s","%hm_c_hm","%hm_ceil","%hm_conj","%hm_cos","%hm_cumprod","%hm_cumsum","%hm_d_hm","%hm_d_s","%hm_degree","%hm_e","%hm_exp","%hm_f_hm","%hm_fft","%hm_find","%hm_floor","%hm_g_hm","%hm_h_hm","%hm_i_b","%hm_i_ce","%hm_i_hm","%hm_i_i","%hm_i_p","%hm_i_r","%hm_i_s","%hm_i_st","%hm_iconvert","%hm_imag","%hm_int","%hm_isnan","%hm_isreal","%hm_j_hm","%hm_j_s","%hm_k_hm","%hm_k_s","%hm_log","%hm_m_p","%hm_m_r","%hm_m_s","%hm_matrix","%hm_maxi","%hm_mean","%hm_median","%hm_mini","%hm_n_b","%hm_n_c","%hm_n_hm","%hm_n_i","%hm_n_p","%hm_n_s","%hm_o_b","%hm_o_c","%hm_o_hm","%hm_o_i","%hm_o_p","%hm_o_s","%hm_ones","%hm_or","%hm_p","%hm_prod","%hm_q_hm","%hm_r_s","%hm_rand","%hm_real","%hm_round","%hm_s","%hm_s_hm","%hm_s_r","%hm_s_s","%hm_sign","%hm_sin","%hm_size","%hm_sqrt","%hm_st_deviation","%hm_string","%hm_sum","%hm_x_hm","%hm_x_p","%hm_x_s","%hm_zeros","%i_1_s","%i_2_s","%i_3_s","%i_4_s","%i_Matplot","%i_a_i","%i_a_s","%i_and","%i_ascii","%i_b_s","%i_bezout","%i_champ","%i_champ1","%i_contour","%i_contour2d","%i_d_i","%i_d_s","%i_e","%i_fft","%i_g_i","%i_gcd","%i_h_i","%i_i_ce","%i_i_h","%i_i_hm","%i_i_i","%i_i_s","%i_i_st","%i_j_i","%i_j_s","%i_l_s","%i_lcm","%i_length","%i_m_i","%i_m_s","%i_mfprintf","%i_mprintf","%i_msprintf","%i_n_s","%i_o_s","%i_or","%i_p_i","%i_p_s","%i_plot2d","%i_plot2d1","%i_plot2d2","%i_q_s","%i_r_i","%i_r_s","%i_round","%i_s_i","%i_s_s","%i_sign","%i_string","%i_x_i","%i_x_s","%ip_a_s","%ip_i_st","%ip_m_s","%ip_n_ip","%ip_o_ip","%ip_p","%ip_s_s","%ip_string","%k","%l_i_h","%l_i_s","%l_i_st","%l_isequal","%l_n_c","%l_n_l","%l_n_m","%l_n_p","%l_n_s","%l_n_st","%l_o_c","%l_o_l","%l_o_m","%l_o_p","%l_o_s","%l_o_st","%lss_a_lss","%lss_a_p","%lss_a_r","%lss_a_s","%lss_c_lss","%lss_c_p","%lss_c_r","%lss_c_s","%lss_e","%lss_eye","%lss_f_lss","%lss_f_p","%lss_f_r","%lss_f_s","%lss_i_ce","%lss_i_lss","%lss_i_p","%lss_i_r","%lss_i_s","%lss_i_st","%lss_inv","%lss_l_lss","%lss_l_p","%lss_l_r","%lss_l_s","%lss_m_lss","%lss_m_p","%lss_m_r","%lss_m_s","%lss_n_lss","%lss_n_p","%lss_n_r","%lss_n_s","%lss_norm","%lss_o_lss","%lss_o_p","%lss_o_r","%lss_o_s","%lss_ones","%lss_r_lss","%lss_r_p","%lss_r_r","%lss_r_s","%lss_rand","%lss_s","%lss_s_lss","%lss_s_p","%lss_s_r","%lss_s_s","%lss_size","%lss_t","%lss_v_lss","%lss_v_p","%lss_v_r","%lss_v_s","%lt_i_s","%m_n_l","%m_o_l","%mc_i_h","%mc_i_s","%mc_i_st","%mc_n_st","%mc_o_st","%mc_string","%mps_p","%mps_string","%msp_a_s","%msp_abs","%msp_e","%msp_find","%msp_i_s","%msp_i_st","%msp_length","%msp_m_s","%msp_maxi","%msp_n_msp","%msp_nnz","%msp_o_msp","%msp_p","%msp_sparse","%msp_spones","%msp_t","%p_a_lss","%p_a_r","%p_c_lss","%p_c_r","%p_cumprod","%p_cumsum","%p_d_p","%p_d_r","%p_d_s","%p_det","%p_e","%p_f_lss","%p_f_r","%p_i_ce","%p_i_h","%p_i_hm","%p_i_lss","%p_i_p","%p_i_r","%p_i_s","%p_i_st","%p_inv","%p_j_s","%p_k_p","%p_k_r","%p_k_s","%p_l_lss","%p_l_p","%p_l_r","%p_l_s","%p_m_hm","%p_m_lss","%p_m_r","%p_matrix","%p_n_l","%p_n_lss","%p_n_r","%p_o_l","%p_o_lss","%p_o_r","%p_o_sp","%p_p_s","%p_prod","%p_q_p","%p_q_r","%p_q_s","%p_r_lss","%p_r_p","%p_r_r","%p_r_s","%p_s_lss","%p_s_r","%p_simp","%p_string","%p_sum","%p_v_lss","%p_v_p","%p_v_r","%p_v_s","%p_x_hm","%p_x_r","%p_y_p","%p_y_r","%p_y_s","%p_z_p","%p_z_r","%p_z_s","%r_a_hm","%r_a_lss","%r_a_p","%r_a_r","%r_a_s","%r_c_lss","%r_c_p","%r_c_r","%r_c_s","%r_clean","%r_cumprod","%r_d_p","%r_d_r","%r_d_s","%r_det","%r_diag","%r_e","%r_eye","%r_f_lss","%r_f_p","%r_f_r","%r_f_s","%r_i_ce","%r_i_hm","%r_i_lss","%r_i_p","%r_i_r","%r_i_s","%r_i_st","%r_inv","%r_j_s","%r_k_p","%r_k_r","%r_k_s","%r_l_lss","%r_l_p","%r_l_r","%r_l_s","%r_m_hm","%r_m_lss","%r_m_p","%r_m_r","%r_m_s","%r_matrix","%r_n_lss","%r_n_p","%r_n_r","%r_n_s","%r_norm","%r_o_lss","%r_o_p","%r_o_r","%r_o_s","%r_ones","%r_p","%r_p_s","%r_prod","%r_q_p","%r_q_r","%r_q_s","%r_r_lss","%r_r_p","%r_r_r","%r_r_s","%r_rand","%r_s","%r_s_hm","%r_s_lss","%r_s_p","%r_s_r","%r_s_s","%r_simp","%r_size","%r_string","%r_sum","%r_t","%r_tril","%r_triu","%r_v_lss","%r_v_p","%r_v_r","%r_v_s","%r_x_p","%r_x_r","%r_x_s","%r_y_p","%r_y_r","%r_y_s","%r_z_p","%r_z_r","%r_z_s","%s_1_hm","%s_1_i","%s_2_hm","%s_2_i","%s_3_hm","%s_3_i","%s_4_hm","%s_4_i","%s_5","%s_a_b","%s_a_hm","%s_a_i","%s_a_ip","%s_a_lss","%s_a_msp","%s_a_r","%s_a_sp","%s_and","%s_b_i","%s_b_s","%s_c_b","%s_c_cblock","%s_c_lss","%s_c_r","%s_c_sp","%s_d_b","%s_d_i","%s_d_p","%s_d_r","%s_d_sp","%s_e","%s_f_b","%s_f_cblock","%s_f_lss","%s_f_r","%s_f_sp","%s_g_b","%s_g_s","%s_h_b","%s_h_s","%s_i_b","%s_i_c","%s_i_ce","%s_i_h","%s_i_hm","%s_i_i","%s_i_lss","%s_i_p","%s_i_r","%s_i_s","%s_i_sp","%s_i_spb","%s_i_st","%s_j_i","%s_k_hm","%s_k_p","%s_k_r","%s_k_sp","%s_l_b","%s_l_hm","%s_l_i","%s_l_lss","%s_l_p","%s_l_r","%s_l_s","%s_l_sp","%s_m_b","%s_m_hm","%s_m_i","%s_m_ip","%s_m_lss","%s_m_msp","%s_m_r","%s_matrix","%s_n_hm","%s_n_i","%s_n_l","%s_n_lss","%s_n_r","%s_n_st","%s_o_hm","%s_o_i","%s_o_l","%s_o_lss","%s_o_r","%s_o_st","%s_or","%s_p_b","%s_p_i","%s_pow","%s_q_hm","%s_q_i","%s_q_p","%s_q_r","%s_q_sp","%s_r_b","%s_r_i","%s_r_lss","%s_r_p","%s_r_r","%s_r_s","%s_r_sp","%s_s_b","%s_s_hm","%s_s_i","%s_s_ip","%s_s_lss","%s_s_r","%s_s_sp","%s_simp","%s_v_lss","%s_v_p","%s_v_r","%s_v_s","%s_x_b","%s_x_hm","%s_x_i","%s_x_r","%s_y_p","%s_y_r","%s_y_sp","%s_z_p","%s_z_r","%s_z_sp","%sn","%sp_a_s","%sp_a_sp","%sp_and","%sp_c_s","%sp_ceil","%sp_cos","%sp_cumprod","%sp_cumsum","%sp_d_s","%sp_d_sp","%sp_diag","%sp_e","%sp_exp","%sp_f_s","%sp_floor","%sp_gsort","%sp_i_ce","%sp_i_h","%sp_i_s","%sp_i_sp","%sp_i_st","%sp_int","%sp_inv","%sp_k_s","%sp_k_sp","%sp_l_s","%sp_l_sp","%sp_length","%sp_norm","%sp_or","%sp_p_s","%sp_prod","%sp_q_s","%sp_q_sp","%sp_r_s","%sp_r_sp","%sp_round","%sp_s_s","%sp_s_sp","%sp_sin","%sp_sqrt","%sp_string","%sp_sum","%sp_tril","%sp_triu","%sp_y_s","%sp_y_sp","%sp_z_s","%sp_z_sp","%spb_and","%spb_c_b","%spb_cumprod","%spb_cumsum","%spb_diag","%spb_e","%spb_f_b","%spb_g_b","%spb_g_spb","%spb_h_b","%spb_h_spb","%spb_i_b","%spb_i_ce","%spb_i_h","%spb_i_st","%spb_or","%spb_prod","%spb_sum","%spb_tril","%spb_triu","%st_6","%st_c_st","%st_e","%st_f_st","%st_i_b","%st_i_c","%st_i_fptr","%st_i_h","%st_i_i","%st_i_ip","%st_i_lss","%st_i_msp","%st_i_p","%st_i_r","%st_i_s","%st_i_sp","%st_i_spb","%st_i_st","%st_matrix","%st_n_c","%st_n_l","%st_n_mc","%st_n_p","%st_n_s","%st_o_c","%st_o_l","%st_o_mc","%st_o_p","%st_o_s","%st_o_tl","%st_p","%st_size","%st_string","%st_t","%ticks_i_h","%xls_e","%xls_p","%xlssheet_e","%xlssheet_p","%xlssheet_size","%xlssheet_string","DominationRank","G_make","IsAScalar","NDcost","OS_Version","PlotSparse","ReadHBSparse","ReadmiMatrix","TCL_CreateSlave","WritemiMatrix","abcd","abinv","accept_func_default","accept_func_vfsa","acf","acosd","acosh","acoshm","acosm","acot","acotd","acoth","acsc","acscd","acsch","add_demo","add_help_chapter","add_module_help_chapter","add_param","add_profiling","adj2sp","aff2ab","ana_style","analpf","analyze","aplat","apropos","arhnk","arl2","arma2p","armac","armax","armax1","arobasestring2strings","arsimul","ascii2string","asciimat","asec","asecd","asech","asind","asinh","asinhm","asinm","assert_checkalmostequal","assert_checkequal","assert_checkerror","assert_checkfalse","assert_checkfilesequal","assert_checktrue","assert_comparecomplex","assert_computedigits","assert_cond2reltol","assert_cond2reqdigits","assert_generror","atand","atanh","atanhm","atanm","atomsAutoload","atomsAutoloadAdd","atomsAutoloadDel","atomsAutoloadList","atomsCategoryList","atomsCheckModule","atomsDepTreeShow","atomsGetConfig","atomsGetInstalled","atomsGetLoaded","atomsGetLoadedPath","atomsInstall","atomsIsInstalled","atomsIsLoaded","atomsList","atomsLoad","atomsRemove","atomsRepositoryAdd","atomsRepositoryDel","atomsRepositoryList","atomsRestoreConfig","atomsSaveConfig","atomsSearch","atomsSetConfig","atomsShow","atomsSystemInit","atomsSystemUpdate","atomsTest","atomsUpdate","atomsVersion","augment","auread","auwrite","balreal","bench_run","bilin","bilt","bin2dec","binomial","bitand","bitcmp","bitget","bitor","bitset","bitxor","black","blanks","bloc2exp","bloc2ss","block_parameter_error","bode","bstap","buttmag","bvodeS","bytecode","bytecodewalk","cainv","calendar","calfrq","canon","casc","cat","cat_code","cb_m2sci_gui","ccontrg","cell","cell2mat","cellstr","center","cepstrum","cfspec","char","chart","cheb1mag","cheb2mag","check_gateways","check_help","check_modules_xml","check_versions","chepol","chfact","chsolve","classmarkov","clean_help","clock","cls2dls","cmb_lin","cmndred","cmoment","coding_ga_binary","coding_ga_identity","coff","coffg","colcomp","colcompr","colinout","colregul","companion","complex","compute_initial_temp","cond","cond2sp","condestsp","config","configure_msifort","configure_msvc","cont_frm","cont_mat","contrss","conv","convert_to_float","convertindex","convol","convol2d","copfac","correl","cosd","cosh","coshm","cosm","cotd","cotg","coth","cothm","covar","createfun","createstruct","crossover_ga_binary","crossover_ga_default","csc","cscd","csch","csgn","csim","cspect","ctr_gram","czt","dae","daeoptions","damp","datafit","date","datenum","datevec","dbphi","dcf","ddp","dec2bin","dec2hex","dec2oct","del_help_chapter","del_module_help_chapter","demo_begin","demo_choose","demo_compiler","demo_end","demo_file_choice","demo_folder_choice","demo_function_choice","demo_gui","demo_mdialog","demo_message","demo_run","demo_viewCode","denom","derivat","derivative","des2ss","des2tf","detectmsifort64tools","detectmsvc64tools","determ","detr","detrend","devtools_run_builder","dft","dhnorm","diff","diophant","dir","dirname","dispfiles","dllinfo","dscr","dsimul","dt_ility","dtsi","edit","edit_error","eigenmarkov","ell1mag","enlarge_shape","entropy","eomday","epred","eqfir","eqiir","equil","equil1","erf","erfc","erfcx","erfinv","etime","eval","evans","evstr","expression2code","extract_help_examples","factor","factorial","factors","faurre","ffilt","fft2","fftshift","fieldnames","filt_sinc","filter","findABCD","findAC","findBDK","findR","find_freq","find_links","find_scicos_version","findm","findmsifortcompiler","findmsvccompiler","findx0BD","firstnonsingleton","fit_dat","fix","fixedpointgcd","flipdim","flts","fminsearch","format_txt","fourplan","fprintf","frep2tf","freson","frfit","frmag","fscanf","fseek_origin","fsfirlin","fspec","fspecg","fstabst","ftest","ftuneq","fullfile","fullrf","fullrfk","fun2string","g_margin","gainplot","gamitg","gcare","gcd","gencompilationflags_unix","generateBlockImage","generateBlockImages","generic_i_ce","generic_i_h","generic_i_hm","generic_i_s","generic_i_st","genlib","genlib_old","genmarkov","geomean","getDiagramVersion","getModelicaPath","get_file_path","get_function_path","get_param","get_profile","get_scicos_version","getd","getscilabkeywords","getshell","gettklib","gfare","gfrancis","givens","glever","gmres","group","gschur","gspec","gtild","h2norm","h_cl","h_inf","h_inf_st","h_norm","hallchart","halt","hank","hankelsv","harmean","haveacompiler","head_comments","help","help_from_sci","help_skeleton","hermit","hex2dec","hilb","hilbert","horner","householder","hrmt","htrianr","hypermat","ifft","iir","iirgroup","iirlp","iirmod","ilib_build","ilib_compile","ilib_for_link","ilib_gen_Make","ilib_gen_Make_unix","ilib_gen_cleaner","ilib_gen_gateway","ilib_gen_loader","ilib_include_flag","ilib_mex_build","im_inv","importScicosDiagram","importScicosPal","importXcosDiagram","imrep2ss","ind2sub","inistate","init_ga_default","init_param","initial_scicos_tables","input","instruction2code","intc","intdec","integrate","interp1","interpln","intersect","intl","intsplin","inttrap","inv_coeff","invr","invrs","invsyslin","iqr","isLeapYear","is_absolute_path","is_param","iscell","iscellstr","isempty","isfield","isinf","isnan","isnum","issparse","isstruct","isvector","jmat","justify","kalm","karmarkar","kernel","kpure","krac2","kroneck","lattn","launchtest","lcf","lcm","lcmdiag","leastsq","leqe","leqr","lev","levin","lex_sort","lft","lin","lin2mu","lincos","lindquist","linf","linfn","linsolve","linspace","list2vec","list_param","listfiles","listfunctions","listvarinfile","lmisolver","lmitool","loadXcosLibs","loadmatfile","loadwave","log10","log2","logm","logspace","lqe","lqg","lqg2stan","lqg_ltr","lqr","ls","lyap","m2sci_gui","m_circle","macglov","macrovar","mad","makecell","manedit","mapsound","markp2ss","matfile2sci","mdelete","mean","meanf","median","mese","meshgrid","mfft","mfile2sci","minreal","minss","mkdir","modulo","moment","mrfit","msd","mstr2sci","mtlb","mtlb_0","mtlb_a","mtlb_all","mtlb_any","mtlb_axes","mtlb_axis","mtlb_beta","mtlb_box","mtlb_choices","mtlb_close","mtlb_colordef","mtlb_cond","mtlb_conv","mtlb_cov","mtlb_cumprod","mtlb_cumsum","mtlb_dec2hex","mtlb_delete","mtlb_diag","mtlb_diff","mtlb_dir","mtlb_double","mtlb_e","mtlb_echo","mtlb_error","mtlb_eval","mtlb_exist","mtlb_eye","mtlb_false","mtlb_fft","mtlb_fftshift","mtlb_filter","mtlb_find","mtlb_findstr","mtlb_fliplr","mtlb_fopen","mtlb_format","mtlb_fprintf","mtlb_fread","mtlb_fscanf","mtlb_full","mtlb_fwrite","mtlb_get","mtlb_grid","mtlb_hold","mtlb_i","mtlb_ifft","mtlb_image","mtlb_imp","mtlb_int16","mtlb_int32","mtlb_int8","mtlb_is","mtlb_isa","mtlb_isfield","mtlb_isletter","mtlb_isspace","mtlb_l","mtlb_legendre","mtlb_linspace","mtlb_logic","mtlb_logical","mtlb_loglog","mtlb_lower","mtlb_max","mtlb_mean","mtlb_median","mtlb_mesh","mtlb_meshdom","mtlb_min","mtlb_more","mtlb_num2str","mtlb_ones","mtlb_pcolor","mtlb_plot","mtlb_prod","mtlb_qr","mtlb_qz","mtlb_rand","mtlb_randn","mtlb_rcond","mtlb_realmax","mtlb_realmin","mtlb_repmat","mtlb_s","mtlb_semilogx","mtlb_semilogy","mtlb_setstr","mtlb_size","mtlb_sort","mtlb_sortrows","mtlb_sprintf","mtlb_sscanf","mtlb_std","mtlb_strcmp","mtlb_strcmpi","mtlb_strfind","mtlb_strrep","mtlb_subplot","mtlb_sum","mtlb_t","mtlb_toeplitz","mtlb_tril","mtlb_triu","mtlb_true","mtlb_type","mtlb_uint16","mtlb_uint32","mtlb_uint8","mtlb_upper","mtlb_var","mtlb_zeros","mu2lin","mutation_ga_binary","mutation_ga_default","mvcorrel","mvvacov","nancumsum","nand2mean","nanmax","nanmean","nanmeanf","nanmedian","nanmin","nanstdev","nansum","narsimul","ndgrid","ndims","nehari","neigh_func_csa","neigh_func_default","neigh_func_fsa","neigh_func_vfsa","neldermead_cget","neldermead_configure","neldermead_costf","neldermead_defaultoutput","neldermead_destroy","neldermead_display","neldermead_function","neldermead_get","neldermead_log","neldermead_new","neldermead_restart","neldermead_search","neldermead_updatesimp","nextpow2","nfreq","nicholschart","nlev","nmplot_cget","nmplot_configure","nmplot_contour","nmplot_destroy","nmplot_display","nmplot_function","nmplot_get","nmplot_historyplot","nmplot_log","nmplot_new","nmplot_outputcmd","nmplot_restart","nmplot_search","nmplot_simplexhistory","noisegen","nonreg_test_run","norm","now","null","num2cell","numdiff","numer","nyquist","nyquistfrequencybounds","obs_gram","obscont","observer","obsv_mat","obsvss","oct2dec","odeoptions","optim_ga","optim_moga","optim_nsga","optim_nsga2","optim_sa","optimbase_cget","optimbase_checkbounds","optimbase_checkcostfun","optimbase_checkx0","optimbase_configure","optimbase_destroy","optimbase_display","optimbase_function","optimbase_get","optimbase_hasbounds","optimbase_hasconstraints","optimbase_hasnlcons","optimbase_histget","optimbase_histset","optimbase_incriter","optimbase_isfeasible","optimbase_isinbounds","optimbase_isinnonlincons","optimbase_log","optimbase_logshutdown","optimbase_logstartup","optimbase_new","optimbase_outputcmd","optimbase_outstruct","optimbase_proj2bnds","optimbase_set","optimbase_stoplog","optimbase_terminate","optimget","optimplotfunccount","optimplotfval","optimplotx","optimset","optimsimplex_center","optimsimplex_check","optimsimplex_compsomefv","optimsimplex_computefv","optimsimplex_deltafv","optimsimplex_deltafvmax","optimsimplex_destroy","optimsimplex_dirmat","optimsimplex_fvmean","optimsimplex_fvstdev","optimsimplex_fvvariance","optimsimplex_getall","optimsimplex_getallfv","optimsimplex_getallx","optimsimplex_getfv","optimsimplex_getn","optimsimplex_getnbve","optimsimplex_getve","optimsimplex_getx","optimsimplex_gradientfv","optimsimplex_log","optimsimplex_new","optimsimplex_print","optimsimplex_reflect","optimsimplex_setall","optimsimplex_setallfv","optimsimplex_setallx","optimsimplex_setfv","optimsimplex_setn","optimsimplex_setnbve","optimsimplex_setve","optimsimplex_setx","optimsimplex_shrink","optimsimplex_size","optimsimplex_sort","optimsimplex_tostring","optimsimplex_xbar","orth","p_margin","pack","pareto_filter","parrot","pbig","pca","pcg","pdiv","pen2ea","pencan","pencost","penlaur","perctl","perl","perms","permute","pertrans","pfactors","pfss","phasemag","phaseplot","phc","pinv","playsnd","plotprofile","plzr","pmodulo","pol2des","pol2str","polar","polfact","prbs_a","prettyprint","primes","princomp","profile","proj","projsl","projspec","psmall","pspect","qmr","qpsolve","quart","quaskro","rafiter","randpencil","range","rank","read_csv","readxls","recompilefunction","recons","reglin","regress","remezb","remove_param","remove_profiling","repfreq","replace_Ix_by_Fx","repmat","reset_profiling","resize_matrix","returntoscilab","rhs2code","ric_desc","riccati","rmdir","routh_t","rowcomp","rowcompr","rowinout","rowregul","rowshuff","rref","sample","samplef","samwr","savematfile","savewave","scanf","sci2exp","sciGUI_init","sci_sparse","scicos_getvalue","scicos_simulate","scicos_workspace_init","scisptdemo","scitest","sdiff","sec","secd","sech","selection_ga_elitist","selection_ga_random","sensi","set_param","setdiff","sgrid","show_margins","show_pca","showprofile","signm","sinc","sincd","sind","sinh","sinhm","sinm","sm2des","sm2ss","smga","smooth","solve","sound","soundsec","sp2adj","spaninter","spanplus","spantwo","specfact","speye","sprand","spzeros","sqroot","sqrtm","squarewave","squeeze","srfaur","srkf","ss2des","ss2ss","ss2tf","sscanf","sskf","ssprint","ssrand","st_deviation","st_i_generic","st_ility","stabil","statgain","stdev","stdevf","steadycos","strange","strcmpi","struct","sub2ind","sva","svplot","sylm","sylv","sysconv","sysdiag","sysfact","syslin","syssize","system","systmat","tabul","tand","tanh","tanhm","tanm","tbx_build_blocks","tbx_build_cleaner","tbx_build_gateway","tbx_build_gateway_clean","tbx_build_gateway_loader","tbx_build_help","tbx_build_help_loader","tbx_build_loader","tbx_build_macros","tbx_build_src","tbx_builder","tbx_builder_gateway","tbx_builder_gateway_lang","tbx_builder_help","tbx_builder_help_lang","tbx_builder_macros","tbx_builder_src","tbx_builder_src_lang","temp_law_csa","temp_law_default","temp_law_fsa","temp_law_huang","temp_law_vfsa","test_clean","test_on_columns","test_run","test_run_level","testexamples","tf2des","tf2ss","thrownan","tic","time_id","toc","toeplitz","tokenpos","toolboxes","trace","trans","translatepaths","tree2code","trfmod","trianfml","trimmean","trisolve","trzeros","typeof","ui_observer","union","unique","unit_test_run","unix_g","unix_s","unix_w","unix_x","unobs","unpack","variance","variancef","vec2list","vectorfind","ver","warnobsolete","wavread","wavwrite","wcenter","weekday","wfir","wfir_gui","whereami","who_user","whos","wiener","wigner","winclose","window","winlist","with_javasci","with_macros_source","with_modelica_compiler","with_pvm","with_texmacs","with_tk","write_csv","xcosBlockEval","xcosBlockInterface","xcosCodeGeneration","xcosConfigureModelica","xcosPal","xcosPalAdd","xcosPalAddBlock","xcosPalExport","xcosShowBlockWarning","xcosValidateBlockSet","xcosValidateCompareBlock","xcos_compile","xcos_run","xcos_simulate","xcos_workspace_init","xmltochm","xmltoformat","xmltohtml","xmltojar","xmltopdf","xmltops","xmltoweb","yulewalk","zeropen","zgrid","zpbutt","zpch1","zpch2","zpell"] - -builtin_consts = ["\\$","%F","%T","%e","%eps","%f","%fftw","%gui","%i","%inf","%io","%modalWarning","%nan","%pi","%s","%t","%tk","%toolboxes","%toolboxes_dir","%z","PWD","SCI","SCIHOME","TMPDIR","a","ans","assertlib","atomslib","cacsdlib","compatibility_functilib","corelib","data_structureslib","demo_toolslib","development_toolslib","differential_equationlib","dynamic_linklib","elementary_functionslib","fd","fileiolib","functionslib","genetic_algorithmslib","helptoolslib","home","i","integerlib","interpolationlib","iolib","j","linear_algebralib","m2scilib","matiolib","modules_managerlib","myStr","neldermeadlib","optimbaselib","optimizationlib","optimsimplexlib","output_streamlib","overloadinglib","parameterslib","polynomialslib","scicos_autolib","scicos_utilslib","scinoteslib","signal_processinglib","simulated_annealinglib","soundlib","sparselib","special_functionslib","spreadsheetlib","statisticslib","stringlib","tclscilib","timelib","umfpacklib","varType","xcoslib"] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_sourcemodbuiltins.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_sourcemodbuiltins.py deleted file mode 100644 index 0f6b477..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_sourcemodbuiltins.py +++ /dev/null @@ -1,1072 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers._sourcemodbuiltins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - This file contains the names of SourceMod functions. - It is able to re-generate itself. - - Do not edit the FUNCTIONS list by hand. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -FUNCTIONS = ['TopMenuHandler', - 'CreateTopMenu', - 'LoadTopMenuConfig', - 'AddToTopMenu', - 'GetTopMenuInfoString', - 'GetTopMenuObjName', - 'RemoveFromTopMenu', - 'DisplayTopMenu', - 'FindTopMenuCategory', - 'OnAdminMenuCreated', - 'OnAdminMenuReady', - 'GetAdminTopMenu', - 'AddTargetsToMenu', - 'AddTargetsToMenu2', - 'RedisplayAdminMenu', - 'TEHook', - 'AddTempEntHook', - 'RemoveTempEntHook', - 'TE_Start', - 'TE_IsValidProp', - 'TE_WriteNum', - 'TE_ReadNum', - 'TE_WriteFloat', - 'TE_ReadFloat', - 'TE_WriteVector', - 'TE_ReadVector', - 'TE_WriteAngles', - 'TE_WriteFloatArray', - 'TE_Send', - 'TE_WriteEncodedEnt', - 'TE_SendToAll', - 'TE_SendToClient', - 'CreateKeyValues', - 'KvSetString', - 'KvSetNum', - 'KvSetUInt64', - 'KvSetFloat', - 'KvSetColor', - 'KvSetVector', - 'KvGetString', - 'KvGetNum', - 'KvGetFloat', - 'KvGetColor', - 'KvGetUInt64', - 'KvGetVector', - 'KvJumpToKey', - 'KvJumpToKeySymbol', - 'KvGotoFirstSubKey', - 'KvGotoNextKey', - 'KvSavePosition', - 'KvDeleteKey', - 'KvDeleteThis', - 'KvGoBack', - 'KvRewind', - 'KvGetSectionName', - 'KvSetSectionName', - 'KvGetDataType', - 'KeyValuesToFile', - 'FileToKeyValues', - 'KvSetEscapeSequences', - 'KvNodesInStack', - 'KvCopySubkeys', - 'KvFindKeyById', - 'KvGetNameSymbol', - 'KvGetSectionSymbol', - 'TE_SetupSparks', - 'TE_SetupSmoke', - 'TE_SetupDust', - 'TE_SetupMuzzleFlash', - 'TE_SetupMetalSparks', - 'TE_SetupEnergySplash', - 'TE_SetupArmorRicochet', - 'TE_SetupGlowSprite', - 'TE_SetupExplosion', - 'TE_SetupBloodSprite', - 'TE_SetupBeamRingPoint', - 'TE_SetupBeamPoints', - 'TE_SetupBeamLaser', - 'TE_SetupBeamRing', - 'TE_SetupBeamFollow', - 'HookEvent', - 'HookEventEx', - 'UnhookEvent', - 'CreateEvent', - 'FireEvent', - 'CancelCreatedEvent', - 'GetEventBool', - 'SetEventBool', - 'GetEventInt', - 'SetEventInt', - 'GetEventFloat', - 'SetEventFloat', - 'GetEventString', - 'SetEventString', - 'GetEventName', - 'SetEventBroadcast', - 'GetUserMessageId', - 'GetUserMessageName', - 'StartMessage', - 'StartMessageEx', - 'EndMessage', - 'MsgHook', - 'MsgPostHook', - 'HookUserMessage', - 'UnhookUserMessage', - 'StartMessageAll', - 'StartMessageOne', - 'InactivateClient', - 'ReconnectClient', - 'GetMaxEntities', - 'GetEntityCount', - 'IsValidEntity', - 'IsValidEdict', - 'IsEntNetworkable', - 'CreateEdict', - 'RemoveEdict', - 'GetEdictFlags', - 'SetEdictFlags', - 'GetEdictClassname', - 'GetEntityNetClass', - 'ChangeEdictState', - 'GetEntData', - 'SetEntData', - 'GetEntDataFloat', - 'SetEntDataFloat', - 'GetEntDataEnt2', - 'SetEntDataEnt2', - 'GetEntDataVector', - 'SetEntDataVector', - 'GetEntDataString', - 'SetEntDataString', - 'FindSendPropOffs', - 'FindSendPropInfo', - 'FindDataMapOffs', - 'GetEntSendPropOffs', - 'GetEntProp', - 'SetEntProp', - 'GetEntPropFloat', - 'SetEntPropFloat', - 'GetEntPropEnt', - 'SetEntPropEnt', - 'GetEntPropVector', - 'SetEntPropVector', - 'GetEntPropString', - 'SetEntPropString', - 'GetEntPropArraySize', - 'GetEntDataArray', - 'SetEntDataArray', - 'GetEntityClassname', - 'float', - 'FloatMul', - 'FloatDiv', - 'FloatAdd', - 'FloatSub', - 'FloatFraction', - 'RoundToZero', - 'RoundToCeil', - 'RoundToFloor', - 'RoundToNearest', - 'FloatCompare', - 'SquareRoot', - 'Pow', - 'Exponential', - 'Logarithm', - 'Sine', - 'Cosine', - 'Tangent', - 'FloatAbs', - 'ArcTangent', - 'ArcCosine', - 'ArcSine', - 'ArcTangent2', - 'RoundFloat', - 'operator%', - 'DegToRad', - 'RadToDeg', - 'GetURandomInt', - 'GetURandomFloat', - 'SetURandomSeed', - 'SetURandomSeedSimple', - 'RemovePlayerItem', - 'GivePlayerItem', - 'GetPlayerWeaponSlot', - 'IgniteEntity', - 'ExtinguishEntity', - 'TeleportEntity', - 'ForcePlayerSuicide', - 'SlapPlayer', - 'FindEntityByClassname', - 'GetClientEyeAngles', - 'CreateEntityByName', - 'DispatchSpawn', - 'DispatchKeyValue', - 'DispatchKeyValueFloat', - 'DispatchKeyValueVector', - 'GetClientAimTarget', - 'GetTeamCount', - 'GetTeamName', - 'GetTeamScore', - 'SetTeamScore', - 'GetTeamClientCount', - 'SetEntityModel', - 'GetPlayerDecalFile', - 'GetServerNetStats', - 'EquipPlayerWeapon', - 'ActivateEntity', - 'SetClientInfo', - 'SetClientListeningFlags', - 'GetClientListeningFlags', - 'SetListenOverride', - 'GetListenOverride', - 'IsClientMuted', - 'TR_GetPointContents', - 'TR_GetPointContentsEnt', - 'TR_TraceRay', - 'TR_TraceHull', - 'TR_TraceRayFilter', - 'TR_TraceHullFilter', - 'TR_TraceRayEx', - 'TR_TraceHullEx', - 'TR_TraceRayFilterEx', - 'TR_TraceHullFilterEx', - 'TR_GetFraction', - 'TR_GetEndPosition', - 'TR_GetEntityIndex', - 'TR_DidHit', - 'TR_GetHitGroup', - 'TR_GetPlaneNormal', - 'TR_PointOutsideWorld', - 'SortIntegers', - 'SortFloats', - 'SortStrings', - 'SortFunc1D', - 'SortCustom1D', - 'SortCustom2D', - 'SortADTArray', - 'SortFuncADTArray', - 'SortADTArrayCustom', - 'CompileRegex', - 'MatchRegex', - 'GetRegexSubString', - 'SimpleRegexMatch', - 'TF2_GetPlayerClass', - 'TF2_SetPlayerClass', - 'TF2_GetPlayerResourceData', - 'TF2_SetPlayerResourceData', - 'TF2_RemoveWeaponSlot', - 'TF2_RemoveAllWeapons', - 'TF2_IsPlayerInCondition', - 'TF2_GetObjectType', - 'TF2_GetObjectMode', - 'NominateMap', - 'RemoveNominationByMap', - 'RemoveNominationByOwner', - 'GetExcludeMapList', - 'GetNominatedMapList', - 'CanMapChooserStartVote', - 'InitiateMapChooserVote', - 'HasEndOfMapVoteFinished', - 'EndOfMapVoteEnabled', - 'OnNominationRemoved', - 'OnMapVoteStarted', - 'CreateTimer', - 'KillTimer', - 'TriggerTimer', - 'GetTickedTime', - 'GetMapTimeLeft', - 'GetMapTimeLimit', - 'ExtendMapTimeLimit', - 'GetTickInterval', - 'OnMapTimeLeftChanged', - 'IsServerProcessing', - 'CreateDataTimer', - 'ByteCountToCells', - 'CreateArray', - 'ClearArray', - 'CloneArray', - 'ResizeArray', - 'GetArraySize', - 'PushArrayCell', - 'PushArrayString', - 'PushArrayArray', - 'GetArrayCell', - 'GetArrayString', - 'GetArrayArray', - 'SetArrayCell', - 'SetArrayString', - 'SetArrayArray', - 'ShiftArrayUp', - 'RemoveFromArray', - 'SwapArrayItems', - 'FindStringInArray', - 'FindValueInArray', - 'ProcessTargetString', - 'ReplyToTargetError', - 'MultiTargetFilter', - 'AddMultiTargetFilter', - 'RemoveMultiTargetFilter', - 'OnBanClient', - 'OnBanIdentity', - 'OnRemoveBan', - 'BanClient', - 'BanIdentity', - 'RemoveBan', - 'CreateTrie', - 'SetTrieValue', - 'SetTrieArray', - 'SetTrieString', - 'GetTrieValue', - 'GetTrieArray', - 'GetTrieString', - 'RemoveFromTrie', - 'ClearTrie', - 'GetTrieSize', - 'GetFunctionByName', - 'CreateGlobalForward', - 'CreateForward', - 'GetForwardFunctionCount', - 'AddToForward', - 'RemoveFromForward', - 'RemoveAllFromForward', - 'Call_StartForward', - 'Call_StartFunction', - 'Call_PushCell', - 'Call_PushCellRef', - 'Call_PushFloat', - 'Call_PushFloatRef', - 'Call_PushArray', - 'Call_PushArrayEx', - 'Call_PushString', - 'Call_PushStringEx', - 'Call_Finish', - 'Call_Cancel', - 'NativeCall', - 'CreateNative', - 'ThrowNativeError', - 'GetNativeStringLength', - 'GetNativeString', - 'SetNativeString', - 'GetNativeCell', - 'GetNativeCellRef', - 'SetNativeCellRef', - 'GetNativeArray', - 'SetNativeArray', - 'FormatNativeString', - 'OnRebuildAdminCache', - 'DumpAdminCache', - 'AddCommandOverride', - 'GetCommandOverride', - 'UnsetCommandOverride', - 'CreateAdmGroup', - 'FindAdmGroup', - 'SetAdmGroupAddFlag', - 'GetAdmGroupAddFlag', - 'GetAdmGroupAddFlags', - 'SetAdmGroupImmuneFrom', - 'GetAdmGroupImmuneCount', - 'GetAdmGroupImmuneFrom', - 'AddAdmGroupCmdOverride', - 'GetAdmGroupCmdOverride', - 'RegisterAuthIdentType', - 'CreateAdmin', - 'GetAdminUsername', - 'BindAdminIdentity', - 'SetAdminFlag', - 'GetAdminFlag', - 'GetAdminFlags', - 'AdminInheritGroup', - 'GetAdminGroupCount', - 'GetAdminGroup', - 'SetAdminPassword', - 'GetAdminPassword', - 'FindAdminByIdentity', - 'RemoveAdmin', - 'FlagBitsToBitArray', - 'FlagBitArrayToBits', - 'FlagArrayToBits', - 'FlagBitsToArray', - 'FindFlagByName', - 'FindFlagByChar', - 'FindFlagChar', - 'ReadFlagString', - 'CanAdminTarget', - 'CreateAuthMethod', - 'SetAdmGroupImmunityLevel', - 'GetAdmGroupImmunityLevel', - 'SetAdminImmunityLevel', - 'GetAdminImmunityLevel', - 'FlagToBit', - 'BitToFlag', - 'ServerCommand', - 'ServerCommandEx', - 'InsertServerCommand', - 'ServerExecute', - 'ClientCommand', - 'FakeClientCommand', - 'FakeClientCommandEx', - 'PrintToServer', - 'PrintToConsole', - 'ReplyToCommand', - 'GetCmdReplySource', - 'SetCmdReplySource', - 'IsChatTrigger', - 'ShowActivity2', - 'ShowActivity', - 'ShowActivityEx', - 'FormatActivitySource', - 'SrvCmd', - 'RegServerCmd', - 'ConCmd', - 'RegConsoleCmd', - 'RegAdminCmd', - 'GetCmdArgs', - 'GetCmdArg', - 'GetCmdArgString', - 'CreateConVar', - 'FindConVar', - 'ConVarChanged', - 'HookConVarChange', - 'UnhookConVarChange', - 'GetConVarBool', - 'SetConVarBool', - 'GetConVarInt', - 'SetConVarInt', - 'GetConVarFloat', - 'SetConVarFloat', - 'GetConVarString', - 'SetConVarString', - 'ResetConVar', - 'GetConVarDefault', - 'GetConVarFlags', - 'SetConVarFlags', - 'GetConVarBounds', - 'SetConVarBounds', - 'GetConVarName', - 'QueryClientConVar', - 'GetCommandIterator', - 'ReadCommandIterator', - 'CheckCommandAccess', - 'CheckAccess', - 'IsValidConVarChar', - 'GetCommandFlags', - 'SetCommandFlags', - 'FindFirstConCommand', - 'FindNextConCommand', - 'SendConVarValue', - 'AddServerTag', - 'RemoveServerTag', - 'CommandListener', - 'AddCommandListener', - 'RemoveCommandListener', - 'TF2_IgnitePlayer', - 'TF2_RespawnPlayer', - 'TF2_RegeneratePlayer', - 'TF2_AddCondition', - 'TF2_RemoveCondition', - 'TF2_SetPlayerPowerPlay', - 'TF2_DisguisePlayer', - 'TF2_RemovePlayerDisguise', - 'TF2_StunPlayer', - 'TF2_MakeBleed', - 'TF2_GetResourceEntity', - 'TF2_GetClass', - 'TF2_CalcIsAttackCritical', - 'TF2_OnIsHolidayActive', - 'TF2_IsPlayerInDuel', - 'TF2_OnConditionAdded', - 'TF2_OnConditionRemoved', - 'TF2_OnWaitingForPlayersStart', - 'TF2_OnWaitingForPlayersEnd', - 'SQL_Connect', - 'SQL_DefConnect', - 'SQL_ConnectCustom', - 'SQLite_UseDatabase', - 'SQL_CheckConfig', - 'SQL_GetDriver', - 'SQL_ReadDriver', - 'SQL_GetDriverIdent', - 'SQL_GetDriverProduct', - 'SQL_GetAffectedRows', - 'SQL_GetInsertId', - 'SQL_GetError', - 'SQL_EscapeString', - 'SQL_QuoteString', - 'SQL_FastQuery', - 'SQL_Query', - 'SQL_PrepareQuery', - 'SQL_FetchMoreResults', - 'SQL_HasResultSet', - 'SQL_GetRowCount', - 'SQL_GetFieldCount', - 'SQL_FieldNumToName', - 'SQL_FieldNameToNum', - 'SQL_FetchRow', - 'SQL_MoreRows', - 'SQL_Rewind', - 'SQL_FetchString', - 'SQL_FetchFloat', - 'SQL_FetchInt', - 'SQL_IsFieldNull', - 'SQL_FetchSize', - 'SQL_BindParamInt', - 'SQL_BindParamFloat', - 'SQL_BindParamString', - 'SQL_Execute', - 'SQL_LockDatabase', - 'SQL_UnlockDatabase', - 'SQLTCallback', - 'SQL_IsSameConnection', - 'SQL_TConnect', - 'SQL_TQuery', - 'CloseHandle', - 'CloneHandle', - 'MenuHandler', - 'CreateMenu', - 'DisplayMenu', - 'DisplayMenuAtItem', - 'AddMenuItem', - 'InsertMenuItem', - 'RemoveMenuItem', - 'RemoveAllMenuItems', - 'GetMenuItem', - 'GetMenuSelectionPosition', - 'GetMenuItemCount', - 'SetMenuPagination', - 'GetMenuPagination', - 'GetMenuStyle', - 'SetMenuTitle', - 'GetMenuTitle', - 'CreatePanelFromMenu', - 'GetMenuExitButton', - 'SetMenuExitButton', - 'GetMenuExitBackButton', - 'SetMenuExitBackButton', - 'SetMenuNoVoteButton', - 'CancelMenu', - 'GetMenuOptionFlags', - 'SetMenuOptionFlags', - 'IsVoteInProgress', - 'CancelVote', - 'VoteMenu', - 'VoteMenuToAll', - 'VoteHandler', - 'SetVoteResultCallback', - 'CheckVoteDelay', - 'IsClientInVotePool', - 'RedrawClientVoteMenu', - 'GetMenuStyleHandle', - 'CreatePanel', - 'CreateMenuEx', - 'GetClientMenu', - 'CancelClientMenu', - 'GetMaxPageItems', - 'GetPanelStyle', - 'SetPanelTitle', - 'DrawPanelItem', - 'DrawPanelText', - 'CanPanelDrawFlags', - 'SetPanelKeys', - 'SendPanelToClient', - 'GetPanelTextRemaining', - 'GetPanelCurrentKey', - 'SetPanelCurrentKey', - 'RedrawMenuItem', - 'InternalShowMenu', - 'GetMenuVoteInfo', - 'IsNewVoteAllowed', - 'PrefetchSound', - 'EmitAmbientSound', - 'FadeClientVolume', - 'StopSound', - 'EmitSound', - 'EmitSentence', - 'GetDistGainFromSoundLevel', - 'AmbientSHook', - 'NormalSHook', - 'AddAmbientSoundHook', - 'AddNormalSoundHook', - 'RemoveAmbientSoundHook', - 'RemoveNormalSoundHook', - 'EmitSoundToClient', - 'EmitSoundToAll', - 'ATTN_TO_SNDLEVEL', - 'strlen', - 'StrContains', - 'strcmp', - 'strncmp', - 'StrEqual', - 'strcopy', - 'Format', - 'FormatEx', - 'VFormat', - 'StringToInt', - 'StringToIntEx', - 'IntToString', - 'StringToFloat', - 'StringToFloatEx', - 'FloatToString', - 'BreakString', - 'TrimString', - 'SplitString', - 'ReplaceString', - 'ReplaceStringEx', - 'GetCharBytes', - 'IsCharAlpha', - 'IsCharNumeric', - 'IsCharSpace', - 'IsCharMB', - 'IsCharUpper', - 'IsCharLower', - 'StripQuotes', - 'CharToUpper', - 'CharToLower', - 'FindCharInString', - 'StrCat', - 'ExplodeString', - 'ImplodeStrings', - 'GetVectorLength', - 'GetVectorDistance', - 'GetVectorDotProduct', - 'GetVectorCrossProduct', - 'NormalizeVector', - 'GetAngleVectors', - 'GetVectorAngles', - 'GetVectorVectors', - 'AddVectors', - 'SubtractVectors', - 'ScaleVector', - 'NegateVector', - 'MakeVectorFromPoints', - 'BaseComm_IsClientGagged', - 'BaseComm_IsClientMuted', - 'BaseComm_SetClientGag', - 'BaseComm_SetClientMute', - 'FormatUserLogText', - 'FindPluginByFile', - 'FindTarget', - 'AcceptEntityInput', - 'SetVariantBool', - 'SetVariantString', - 'SetVariantInt', - 'SetVariantFloat', - 'SetVariantVector3D', - 'SetVariantPosVector3D', - 'SetVariantColor', - 'SetVariantEntity', - 'GameRules_GetProp', - 'GameRules_SetProp', - 'GameRules_GetPropFloat', - 'GameRules_SetPropFloat', - 'GameRules_GetPropEnt', - 'GameRules_SetPropEnt', - 'GameRules_GetPropVector', - 'GameRules_SetPropVector', - 'GameRules_GetPropString', - 'GameRules_SetPropString', - 'GameRules_GetRoundState', - 'OnClientConnect', - 'OnClientConnected', - 'OnClientPutInServer', - 'OnClientDisconnect', - 'OnClientDisconnect_Post', - 'OnClientCommand', - 'OnClientSettingsChanged', - 'OnClientAuthorized', - 'OnClientPreAdminCheck', - 'OnClientPostAdminFilter', - 'OnClientPostAdminCheck', - 'GetMaxClients', - 'GetClientCount', - 'GetClientName', - 'GetClientIP', - 'GetClientAuthString', - 'GetClientUserId', - 'IsClientConnected', - 'IsClientInGame', - 'IsClientInKickQueue', - 'IsClientAuthorized', - 'IsFakeClient', - 'IsClientSourceTV', - 'IsClientReplay', - 'IsClientObserver', - 'IsPlayerAlive', - 'GetClientInfo', - 'GetClientTeam', - 'SetUserAdmin', - 'GetUserAdmin', - 'AddUserFlags', - 'RemoveUserFlags', - 'SetUserFlagBits', - 'GetUserFlagBits', - 'CanUserTarget', - 'RunAdminCacheChecks', - 'NotifyPostAdminCheck', - 'CreateFakeClient', - 'SetFakeClientConVar', - 'GetClientHealth', - 'GetClientModel', - 'GetClientWeapon', - 'GetClientMaxs', - 'GetClientMins', - 'GetClientAbsAngles', - 'GetClientAbsOrigin', - 'GetClientArmor', - 'GetClientDeaths', - 'GetClientFrags', - 'GetClientDataRate', - 'IsClientTimingOut', - 'GetClientTime', - 'GetClientLatency', - 'GetClientAvgLatency', - 'GetClientAvgLoss', - 'GetClientAvgChoke', - 'GetClientAvgData', - 'GetClientAvgPackets', - 'GetClientOfUserId', - 'KickClient', - 'KickClientEx', - 'ChangeClientTeam', - 'GetClientSerial', - 'GetClientFromSerial', - 'FindStringTable', - 'GetNumStringTables', - 'GetStringTableNumStrings', - 'GetStringTableMaxStrings', - 'GetStringTableName', - 'FindStringIndex', - 'ReadStringTable', - 'GetStringTableDataLength', - 'GetStringTableData', - 'SetStringTableData', - 'AddToStringTable', - 'LockStringTables', - 'AddFileToDownloadsTable', - 'GetEntityFlags', - 'SetEntityFlags', - 'GetEntityMoveType', - 'SetEntityMoveType', - 'GetEntityRenderMode', - 'SetEntityRenderMode', - 'GetEntityRenderFx', - 'SetEntityRenderFx', - 'SetEntityRenderColor', - 'GetEntityGravity', - 'SetEntityGravity', - 'SetEntityHealth', - 'GetClientButtons', - 'EntityOutput', - 'HookEntityOutput', - 'UnhookEntityOutput', - 'HookSingleEntityOutput', - 'UnhookSingleEntityOutput', - 'SMC_CreateParser', - 'SMC_ParseFile', - 'SMC_GetErrorString', - 'SMC_ParseStart', - 'SMC_SetParseStart', - 'SMC_ParseEnd', - 'SMC_SetParseEnd', - 'SMC_NewSection', - 'SMC_KeyValue', - 'SMC_EndSection', - 'SMC_SetReaders', - 'SMC_RawLine', - 'SMC_SetRawLine', - 'BfWriteBool', - 'BfWriteByte', - 'BfWriteChar', - 'BfWriteShort', - 'BfWriteWord', - 'BfWriteNum', - 'BfWriteFloat', - 'BfWriteString', - 'BfWriteEntity', - 'BfWriteAngle', - 'BfWriteCoord', - 'BfWriteVecCoord', - 'BfWriteVecNormal', - 'BfWriteAngles', - 'BfReadBool', - 'BfReadByte', - 'BfReadChar', - 'BfReadShort', - 'BfReadWord', - 'BfReadNum', - 'BfReadFloat', - 'BfReadString', - 'BfReadEntity', - 'BfReadAngle', - 'BfReadCoord', - 'BfReadVecCoord', - 'BfReadVecNormal', - 'BfReadAngles', - 'BfGetNumBytesLeft', - 'CreateProfiler', - 'StartProfiling', - 'StopProfiling', - 'GetProfilerTime', - 'OnPluginStart', - 'AskPluginLoad2', - 'OnPluginEnd', - 'OnPluginPauseChange', - 'OnGameFrame', - 'OnMapStart', - 'OnMapEnd', - 'OnConfigsExecuted', - 'OnAutoConfigsBuffered', - 'OnAllPluginsLoaded', - 'GetMyHandle', - 'GetPluginIterator', - 'MorePlugins', - 'ReadPlugin', - 'GetPluginStatus', - 'GetPluginFilename', - 'IsPluginDebugging', - 'GetPluginInfo', - 'FindPluginByNumber', - 'SetFailState', - 'ThrowError', - 'GetTime', - 'FormatTime', - 'LoadGameConfigFile', - 'GameConfGetOffset', - 'GameConfGetKeyValue', - 'GetSysTickCount', - 'AutoExecConfig', - 'RegPluginLibrary', - 'LibraryExists', - 'GetExtensionFileStatus', - 'OnLibraryAdded', - 'OnLibraryRemoved', - 'ReadMapList', - 'SetMapListCompatBind', - 'OnClientFloodCheck', - 'OnClientFloodResult', - 'CanTestFeatures', - 'GetFeatureStatus', - 'RequireFeature', - 'LoadFromAddress', - 'StoreToAddress', - 'CreateStack', - 'PushStackCell', - 'PushStackString', - 'PushStackArray', - 'PopStackCell', - 'PopStackString', - 'PopStackArray', - 'IsStackEmpty', - 'PopStack', - 'OnPlayerRunCmd', - 'BuildPath', - 'OpenDirectory', - 'ReadDirEntry', - 'OpenFile', - 'DeleteFile', - 'ReadFileLine', - 'ReadFile', - 'ReadFileString', - 'WriteFile', - 'WriteFileString', - 'WriteFileLine', - 'ReadFileCell', - 'WriteFileCell', - 'IsEndOfFile', - 'FileSeek', - 'FilePosition', - 'FileExists', - 'RenameFile', - 'DirExists', - 'FileSize', - 'FlushFile', - 'RemoveDir', - 'CreateDirectory', - 'GetFileTime', - 'LogToOpenFile', - 'LogToOpenFileEx', - 'SetNextMap', - 'GetNextMap', - 'ForceChangeLevel', - 'GetMapHistorySize', - 'GetMapHistory', - 'GeoipCode2', - 'GeoipCode3', - 'GeoipCountry', - 'MarkNativeAsOptional', - 'RegClientCookie', - 'FindClientCookie', - 'SetClientCookie', - 'GetClientCookie', - 'SetAuthIdCookie', - 'AreClientCookiesCached', - 'OnClientCookiesCached', - 'CookieMenuHandler', - 'SetCookiePrefabMenu', - 'SetCookieMenuItem', - 'ShowCookieMenu', - 'GetCookieIterator', - 'ReadCookieIterator', - 'GetCookieAccess', - 'GetClientCookieTime', - 'LoadTranslations', - 'SetGlobalTransTarget', - 'GetClientLanguage', - 'GetServerLanguage', - 'GetLanguageCount', - 'GetLanguageInfo', - 'SetClientLanguage', - 'GetLanguageByCode', - 'GetLanguageByName', - 'CS_OnBuyCommand', - 'CS_OnCSWeaponDrop', - 'CS_OnGetWeaponPrice', - 'CS_OnTerminateRound', - 'CS_RespawnPlayer', - 'CS_SwitchTeam', - 'CS_DropWeapon', - 'CS_TerminateRound', - 'CS_GetTranslatedWeaponAlias', - 'CS_GetWeaponPrice', - 'CS_GetClientClanTag', - 'CS_SetClientClanTag', - 'LogToGame', - 'SetRandomSeed', - 'GetRandomFloat', - 'GetRandomInt', - 'IsMapValid', - 'IsDedicatedServer', - 'GetEngineTime', - 'GetGameTime', - 'GetGameTickCount', - 'GetGameDescription', - 'GetGameFolderName', - 'GetCurrentMap', - 'PrecacheModel', - 'PrecacheSentenceFile', - 'PrecacheDecal', - 'PrecacheGeneric', - 'IsModelPrecached', - 'IsDecalPrecached', - 'IsGenericPrecached', - 'PrecacheSound', - 'IsSoundPrecached', - 'CreateDialog', - 'GuessSDKVersion', - 'PrintToChat', - 'PrintToChatAll', - 'PrintCenterText', - 'PrintCenterTextAll', - 'PrintHintText', - 'PrintHintTextToAll', - 'ShowVGUIPanel', - 'CreateHudSynchronizer', - 'SetHudTextParams', - 'SetHudTextParamsEx', - 'ShowSyncHudText', - 'ClearSyncHud', - 'ShowHudText', - 'ShowMOTDPanel', - 'DisplayAskConnectBox', - 'EntIndexToEntRef', - 'EntRefToEntIndex', - 'MakeCompatEntRef', - 'SetClientViewEntity', - 'SetLightStyle', - 'GetClientEyePosition', - 'CreateDataPack', - 'WritePackCell', - 'WritePackFloat', - 'WritePackString', - 'ReadPackCell', - 'ReadPackFloat', - 'ReadPackString', - 'ResetPack', - 'GetPackPosition', - 'SetPackPosition', - 'IsPackReadable', - 'LogMessage', - 'LogMessageEx', - 'LogToFile', - 'LogToFileEx', - 'LogAction', - 'LogError', - 'OnLogAction', - 'GameLogHook', - 'AddGameLogHook', - 'RemoveGameLogHook', - 'FindTeamByName', - 'StartPrepSDKCall', - 'PrepSDKCall_SetVirtual', - 'PrepSDKCall_SetSignature', - 'PrepSDKCall_SetFromConf', - 'PrepSDKCall_SetReturnInfo', - 'PrepSDKCall_AddParameter', - 'EndPrepSDKCall', - 'SDKCall'] - -if __name__ == '__main__': - import pprint - import re - import sys - import urllib - - # urllib ends up wanting to import a module called 'math' -- if - # pygments/lexers is in the path, this ends badly. - for i in range(len(sys.path)-1, -1, -1): - if sys.path[i].endswith('/lexers'): - del sys.path[i] - - def get_version(): - f = urllib.urlopen('http://docs.sourcemod.net/api/index.php') - r = re.compile(r'SourceMod v\.([\d\.]+)') - for line in f: - m = r.search(line) - if m is not None: - return m.groups()[0] - - def get_sm_functions(): - f = urllib.urlopen('http://docs.sourcemod.net/api/SMfuncs.js') - r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);') - functions = [] - for line in f: - m = r.match(line) - if m is not None: - functions.append(m.groups()[0]) - return functions - - def regenerate(filename, natives): - f = open(filename) - try: - content = f.read() - finally: - f.close() - - header = content[:content.find('FUNCTIONS = [')] - footer = content[content.find("if __name__ == '__main__':"):] - - - f = open(filename, 'w') - f.write(header) - f.write('FUNCTIONS = %s\n\n' % pprint.pformat(natives)) - f.write(footer) - f.close() - - def run(): - version = get_version() - print '> Downloading function index for SourceMod %s' % version - functions = get_sm_functions() - print '> %d functions found:' % len(functions) - - functionlist = [] - for full_function_name in functions: - print '>> %s' % full_function_name - functionlist.append(full_function_name) - - regenerate(__file__, functionlist) - - - run() diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_vimbuiltins.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_vimbuiltins.py deleted file mode 100644 index 9fc1b15..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_vimbuiltins.py +++ /dev/null @@ -1,13 +0,0 @@ -# Split up in multiple functions so it's importable by jython, which has a -# per-method size limit. - -def _getauto(): - return [('BufAdd','BufAdd'),('BufCreate','BufCreate'),('BufDelete','BufDelete'),('BufEnter','BufEnter'),('BufFilePost','BufFilePost'),('BufFilePre','BufFilePre'),('BufHidden','BufHidden'),('BufLeave','BufLeave'),('BufNew','BufNew'),('BufNewFile','BufNewFile'),('BufRead','BufRead'),('BufReadCmd','BufReadCmd'),('BufReadPost','BufReadPost'),('BufReadPre','BufReadPre'),('BufUnload','BufUnload'),('BufWinEnter','BufWinEnter'),('BufWinLeave','BufWinLeave'),('BufWipeout','BufWipeout'),('BufWrite','BufWrite'),('BufWriteCmd','BufWriteCmd'),('BufWritePost','BufWritePost'),('BufWritePre','BufWritePre'),('Cmd','Cmd'),('CmdwinEnter','CmdwinEnter'),('CmdwinLeave','CmdwinLeave'),('ColorScheme','ColorScheme'),('CursorHold','CursorHold'),('CursorHoldI','CursorHoldI'),('CursorMoved','CursorMoved'),('CursorMovedI','CursorMovedI'),('EncodingChanged','EncodingChanged'),('FileAppendCmd','FileAppendCmd'),('FileAppendPost','FileAppendPost'),('FileAppendPre','FileAppendPre'),('FileChangedRO','FileChangedRO'),('FileChangedShell','FileChangedShell'),('FileChangedShellPost','FileChangedShellPost'),('FileEncoding','FileEncoding'),('FileReadCmd','FileReadCmd'),('FileReadPost','FileReadPost'),('FileReadPre','FileReadPre'),('FileType','FileType'),('FileWriteCmd','FileWriteCmd'),('FileWritePost','FileWritePost'),('FileWritePre','FileWritePre'),('FilterReadPost','FilterReadPost'),('FilterReadPre','FilterReadPre'),('FilterWritePost','FilterWritePost'),('FilterWritePre','FilterWritePre'),('FocusGained','FocusGained'),('FocusLost','FocusLost'),('FuncUndefined','FuncUndefined'),('GUIEnter','GUIEnter'),('GUIFailed','GUIFailed'),('InsertChange','InsertChange'),('InsertCharPre','InsertCharPre'),('InsertEnter','InsertEnter'),('InsertLeave','InsertLeave'),('MenuPopup','MenuPopup'),('QuickFixCmdPost','QuickFixCmdPost'),('QuickFixCmdPre','QuickFixCmdPre'),('RemoteReply','RemoteReply'),('SessionLoadPost','SessionLoadPost'),('ShellCmdPost','ShellCmdPost'),('ShellFilterPost','ShellFilterPost'),('SourceCmd','SourceCmd'),('SourcePre','SourcePre'),('SpellFileMissing','SpellFileMissing'),('StdinReadPost','StdinReadPost'),('StdinReadPre','StdinReadPre'),('SwapExists','SwapExists'),('Syntax','Syntax'),('TabEnter','TabEnter'),('TabLeave','TabLeave'),('TermChanged','TermChanged'),('TermResponse','TermResponse'),('User','User'),('UserGettingBored','UserGettingBored'),('VimEnter','VimEnter'),('VimLeave','VimLeave'),('VimLeavePre','VimLeavePre'),('VimResized','VimResized'),('WinEnter','WinEnter'),('WinLeave','WinLeave'),('event','event')] -def _getcommand(): - return [('Allargs','Allargs'),('DiffOrig','DiffOrig'),('Error','Error'),('Man','Man'),('MyCommand','MyCommand'),('Mycmd','Mycmd'),('N','N'),('N','Next'),('P','P'),('P','Print'),('Ren','Ren'),('Rena','Rena'),('Renu','Renu'),('TOhtml','TOhtml'),('X','X'),('XMLent','XMLent'),('XMLns','XMLns'),('a','a'),('ab','ab'),('abc','abclear'),('abo','aboveleft'),('al','all'),('ar','ar'),('ar','args'),('arga','argadd'),('argd','argdelete'),('argdo','argdo'),('arge','argedit'),('argg','argglobal'),('argl','arglocal'),('argu','argument'),('as','ascii'),('au','au'),('b','buffer'),('bN','bNext'),('ba','ball'),('bad','badd'),('bar','bar'),('bd','bdelete'),('bel','belowright'),('bf','bfirst'),('bl','blast'),('bm','bmodified'),('bn','bnext'),('bo','botright'),('bp','bprevious'),('br','br'),('br','brewind'),('brea','break'),('breaka','breakadd'),('breakd','breakdel'),('breakl','breaklist'),('bro','browse'),('browseset','browseset'),('bu','bu'),('buf','buf'),('bufdo','bufdo'),('buffers','buffers'),('bun','bunload'),('bw','bwipeout'),('c','c'),('c','change'),('cN','cN'),('cN','cNext'),('cNf','cNf'),('cNf','cNfile'),('cabc','cabclear'),('cad','cad'),('cad','caddexpr'),('caddb','caddbuffer'),('caddf','caddfile'),('cal','call'),('cat','catch'),('cb','cbuffer'),('cc','cc'),('ccl','cclose'),('cd','cd'),('ce','center'),('cex','cexpr'),('cf','cfile'),('cfir','cfirst'),('cg','cgetfile'),('cgetb','cgetbuffer'),('cgete','cgetexpr'),('changes','changes'),('chd','chdir'),('che','checkpath'),('checkt','checktime'),('cl','cl'),('cl','clist'),('cla','clast'),('clo','close'),('cmapc','cmapclear'),('cmdname','cmdname'),('cn','cn'),('cn','cnext'),('cnew','cnewer'),('cnf','cnf'),('cnf','cnfile'),('co','copy'),('col','colder'),('colo','colorscheme'),('com','com'),('comc','comclear'),('comment','comment'),('comp','compiler'),('con','con'),('con','continue'),('conf','confirm'),('cope','copen'),('count','count'),('cp','cprevious'),('cpf','cpfile'),('cq','cquit'),('cr','crewind'),('cs','cs'),('cscope','cscope'),('cstag','cstag'),('cuna','cunabbrev'),('cw','cwindow'),('d','d'),('d','delete'),('de','de'),('debug','debug'),('debugg','debuggreedy'),('del','del'),('delc','delcommand'),('delf','delf'),('delf','delfunction'),('delm','delmarks'),('di','di'),('di','display'),('diffg','diffget'),('diffo','diffo'),('diffoff','diffoff'),('diffp','diffp'),('diffpatch','diffpatch'),('diffpu','diffput'),('diffsplit','diffsplit'),('difft','difft'),('diffthis','diffthis'),('diffu','diffupdate'),('dig','dig'),('dig','digraphs'),('dj','djump'),('dl','dlist'),('do','do'),('doau','doau'),('dr','drop'),('ds','dsearch'),('dsp','dsplit'),('dwim','dwim'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','edit'),('ea','ea'),('earlier','earlier'),('ec','ec'),('echoe','echoerr'),('echom','echomsg'),('echon','echon'),('el','else'),('elsei','elseif'),('em','emenu'),('emenu','emenu'),('en','en'),('en','endif'),('endf','endf'),('endf','endfunction'),('endfo','endfor'),('endfun','endfun'),('endt','endtry'),('endw','endwhile'),('ene','enew'),('ex','ex'),('exi','exit'),('exu','exusage'),('f','f'),('f','file'),('filename','filename'),('files','files'),('filet','filet'),('filetype','filetype'),('fin','fin'),('fin','find'),('fina','finally'),('fini','finish'),('fir','first'),('fix','fixdel'),('fo','fold'),('foldc','foldclose'),('foldd','folddoopen'),('folddoc','folddoclosed'),('foldo','foldopen'),('for','for'),('fu','fu'),('fu','function'),('fun','fun'),('g','g'),('get','get'),('go','goto'),('gr','grep'),('grepa','grepadd'),('gs','gs'),('gs','gs'),('gui','gui'),('gvim','gvim'),('h','h'),('h','h'),('h','h'),('h','h'),('h','help'),('ha','hardcopy'),('helpf','helpfind'),('helpg','helpgrep'),('helpt','helptags'),('hi','hi'),('hid','hide'),('his','history'),('i','i'),('ia','ia'),('iabc','iabclear'),('if','if'),('ij','ijump'),('il','ilist'),('imapc','imapclear'),('in','in'),('index','index'),('intro','intro'),('is','isearch'),('isp','isplit'),('iuna','iunabbrev'),('j','join'),('ju','jumps'),('k','k'),('kee','keepmarks'),('keepa','keepa'),('keepalt','keepalt'),('keepj','keepjumps'),('l','l'),('l','list'),('lN','lN'),('lN','lNext'),('lNf','lNf'),('lNf','lNfile'),('la','la'),('la','last'),('lad','lad'),('lad','laddexpr'),('laddb','laddbuffer'),('laddf','laddfile'),('lan','lan'),('lan','language'),('lat','lat'),('later','later'),('lb','lbuffer'),('lc','lcd'),('lch','lchdir'),('lcl','lclose'),('lcs','lcs'),('lcscope','lcscope'),('le','left'),('lefta','leftabove'),('let','let'),('lex','lexpr'),('lf','lfile'),('lfir','lfirst'),('lg','lgetfile'),('lgetb','lgetbuffer'),('lgete','lgetexpr'),('lgr','lgrep'),('lgrepa','lgrepadd'),('lh','lhelpgrep'),('ll','ll'),('lla','llast'),('lli','llist'),('lmak','lmake'),('lmapc','lmapclear'),('lne','lne'),('lne','lnext'),('lnew','lnewer'),('lnf','lnf'),('lnf','lnfile'),('lo','lo'),('lo','loadview'),('loadk','loadk'),('loadkeymap','loadkeymap'),('loc','lockmarks'),('locale','locale'),('lockv','lockvar'),('lol','lolder'),('lop','lopen'),('lp','lprevious'),('lpf','lpfile'),('lr','lrewind'),('ls','ls'),('lt','ltag'),('lua','lua'),('luado','luado'),('luafile','luafile'),('lv','lvimgrep'),('lvimgrepa','lvimgrepadd'),('lw','lwindow'),('m','move'),('ma','ma'),('ma','mark'),('main','main'),('main','main'),('mak','make'),('marks','marks'),('mat','match'),('menut','menut'),('menut','menutranslate'),('mes','mes'),('messages','messages'),('mk','mk'),('mk','mkexrc'),('mkdir','mkdir'),('mks','mksession'),('mksp','mkspell'),('mkv','mkv'),('mkv','mkvimrc'),('mkvie','mkview'),('mo','mo'),('mod','mode'),('mv','mv'),('mz','mz'),('mz','mzscheme'),('mzf','mzfile'),('n','n'),('n','n'),('n','next'),('nb','nbkey'),('nbc','nbclose'),('nbs','nbstart'),('ne','ne'),('new','new'),('nkf','nkf'),('nmapc','nmapclear'),('noa','noa'),('noautocmd','noautocmd'),('noh','nohlsearch'),('nu','number'),('o','o'),('o','open'),('ol','oldfiles'),('omapc','omapclear'),('on','only'),('opt','options'),('ownsyntax','ownsyntax'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','print'),('pat','pat'),('pat','pat'),('pc','pclose'),('pe','pe'),('pe','perl'),('ped','pedit'),('perld','perldo'),('po','pop'),('popu','popu'),('popu','popup'),('pp','ppop'),('pr','pr'),('pre','preserve'),('prev','previous'),('pro','pro'),('prof','profile'),('profd','profdel'),('promptf','promptfind'),('promptr','promptrepl'),('ps','psearch'),('ptN','ptN'),('ptN','ptNext'),('pta','ptag'),('ptf','ptfirst'),('ptj','ptjump'),('ptl','ptlast'),('ptn','ptn'),('ptn','ptnext'),('ptp','ptprevious'),('ptr','ptrewind'),('pts','ptselect'),('pu','put'),('pw','pwd'),('py','py'),('py','python'),('py3','py3'),('py3','py3'),('py3file','py3file'),('pyf','pyfile'),('python3','python3'),('q','q'),('q','quit'),('qa','qall'),('quita','quitall'),('quote','quote'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','read'),('re','re'),('rec','recover'),('red','red'),('red','redo'),('redi','redir'),('redr','redraw'),('redraws','redrawstatus'),('reg','registers'),('res','resize'),('ret','retab'),('retu','return'),('rew','rewind'),('ri','right'),('rightb','rightbelow'),('ru','ru'),('ru','runtime'),('rub','ruby'),('rubyd','rubydo'),('rubyf','rubyfile'),('rundo','rundo'),('rv','rviminfo'),('s','s'),('s','s'),('s','s'),('s','s'),('sN','sNext'),('sa','sargument'),('sal','sall'),('san','sandbox'),('sav','saveas'),('sb','sbuffer'),('sbN','sbNext'),('sba','sball'),('sbf','sbfirst'),('sbl','sblast'),('sbm','sbmodified'),('sbn','sbnext'),('sbp','sbprevious'),('sbr','sbrewind'),('scrip','scrip'),('scrip','scriptnames'),('scripte','scriptencoding'),('scs','scs'),('scscope','scscope'),('se','set'),('setf','setfiletype'),('setg','setglobal'),('setl','setlocal'),('sf','sfind'),('sfir','sfirst'),('sh','shell'),('si','si'),('sig','sig'),('sign','sign'),('sil','silent'),('sim','simalt'),('sl','sl'),('sl','sleep'),('sla','slast'),('sm','smagic'),('sm','smap'),('sme','sme'),('smenu','smenu'),('sn','snext'),('sni','sniff'),('sno','snomagic'),('snoreme','snoreme'),('snoremenu','snoremenu'),('so','so'),('so','source'),('sor','sort'),('sp','split'),('spe','spe'),('spe','spellgood'),('spelld','spelldump'),('spelli','spellinfo'),('spellr','spellrepall'),('spellu','spellundo'),('spellw','spellwrong'),('spr','sprevious'),('sre','srewind'),('st','st'),('st','stop'),('sta','stag'),('star','star'),('star','startinsert'),('start','start'),('startg','startgreplace'),('startr','startreplace'),('stj','stjump'),('stopi','stopinsert'),('sts','stselect'),('sub','sub'),('sub','sub'),('sun','sunhide'),('sunme','sunme'),('sunmenu','sunmenu'),('sus','suspend'),('sv','sview'),('sw','swapname'),('sy','sy'),('syn','syn'),('sync','sync'),('syncbind','syncbind'),('synlist','synlist'),('t','t'),('t','t'),('t','t'),('tN','tN'),('tN','tNext'),('ta','ta'),('ta','tag'),('tab','tab'),('tabN','tabN'),('tabN','tabNext'),('tabc','tabclose'),('tabd','tabdo'),('tabe','tabedit'),('tabf','tabfind'),('tabfir','tabfirst'),('tabl','tablast'),('tabm','tabmove'),('tabn','tabnext'),('tabnew','tabnew'),('tabo','tabonly'),('tabp','tabprevious'),('tabr','tabrewind'),('tabs','tabs'),('tags','tags'),('tc','tcl'),('tcld','tcldo'),('tclf','tclfile'),('te','tearoff'),('tf','tfirst'),('th','throw'),('tj','tjump'),('tl','tlast'),('tm','tm'),('tm','tmenu'),('tn','tn'),('tn','tnext'),('to','topleft'),('tp','tprevious'),('tr','tr'),('tr','trewind'),('try','try'),('ts','tselect'),('tu','tu'),('tu','tunmenu'),('u','u'),('u','undo'),('un','un'),('una','unabbreviate'),('undoj','undojoin'),('undol','undolist'),('unh','unhide'),('unl','unl'),('unlo','unlockvar'),('uns','unsilent'),('up','update'),('v','v'),('ve','ve'),('ve','version'),('verb','verbose'),('version','version'),('version','version'),('vert','vertical'),('vi','vi'),('vi','visual'),('vie','view'),('vim','vimgrep'),('vimgrepa','vimgrepadd'),('viu','viusage'),('vmapc','vmapclear'),('vne','vnew'),('vs','vsplit'),('w','w'),('w','write'),('wN','wNext'),('wa','wall'),('wh','while'),('win','win'),('win','winsize'),('winc','wincmd'),('windo','windo'),('winp','winpos'),('wn','wnext'),('wp','wprevious'),('wq','wq'),('wqa','wqall'),('ws','wsverb'),('wundo','wundo'),('wv','wviminfo'),('x','x'),('x','xit'),('xa','xall'),('xmapc','xmapclear'),('xme','xme'),('xmenu','xmenu'),('xnoreme','xnoreme'),('xnoremenu','xnoremenu'),('xterm','xterm'),('xunme','xunme'),('xunmenu','xunmenu'),('xwininfo','xwininfo'),('y','yank')] -def _getoption(): - return [('acd','acd'),('ai','ai'),('akm','akm'),('al','al'),('aleph','aleph'),('allowrevins','allowrevins'),('altkeymap','altkeymap'),('ambiwidth','ambiwidth'),('ambw','ambw'),('anti','anti'),('antialias','antialias'),('ar','ar'),('arab','arab'),('arabic','arabic'),('arabicshape','arabicshape'),('ari','ari'),('arshape','arshape'),('autochdir','autochdir'),('autoindent','autoindent'),('autoread','autoread'),('autowrite','autowrite'),('autowriteall','autowriteall'),('aw','aw'),('awa','awa'),('background','background'),('backspace','backspace'),('backup','backup'),('backupcopy','backupcopy'),('backupdir','backupdir'),('backupext','backupext'),('backupskip','backupskip'),('balloondelay','balloondelay'),('ballooneval','ballooneval'),('balloonexpr','balloonexpr'),('bdir','bdir'),('bdlay','bdlay'),('beval','beval'),('bex','bex'),('bexpr','bexpr'),('bg','bg'),('bh','bh'),('bin','bin'),('binary','binary'),('biosk','biosk'),('bioskey','bioskey'),('bk','bk'),('bkc','bkc'),('bl','bl'),('bomb','bomb'),('breakat','breakat'),('brk','brk'),('browsedir','browsedir'),('bs','bs'),('bsdir','bsdir'),('bsk','bsk'),('bt','bt'),('bufhidden','bufhidden'),('buflisted','buflisted'),('buftype','buftype'),('casemap','casemap'),('cb','cb'),('cc','cc'),('ccv','ccv'),('cd','cd'),('cdpath','cdpath'),('cedit','cedit'),('cf','cf'),('cfu','cfu'),('ch','ch'),('charconvert','charconvert'),('ci','ci'),('cin','cin'),('cindent','cindent'),('cink','cink'),('cinkeys','cinkeys'),('cino','cino'),('cinoptions','cinoptions'),('cinw','cinw'),('cinwords','cinwords'),('clipboard','clipboard'),('cmdheight','cmdheight'),('cmdwinheight','cmdwinheight'),('cmp','cmp'),('cms','cms'),('co','co'),('cocu','cocu'),('cole','cole'),('colorcolumn','colorcolumn'),('columns','columns'),('com','com'),('comments','comments'),('commentstring','commentstring'),('compatible','compatible'),('complete','complete'),('completefunc','completefunc'),('completeopt','completeopt'),('concealcursor','concealcursor'),('conceallevel','conceallevel'),('confirm','confirm'),('consk','consk'),('conskey','conskey'),('copyindent','copyindent'),('cot','cot'),('cp','cp'),('cpo','cpo'),('cpoptions','cpoptions'),('cpt','cpt'),('crb','crb'),('cryptmethod','cryptmethod'),('cscopepathcomp','cscopepathcomp'),('cscopeprg','cscopeprg'),('cscopequickfix','cscopequickfix'),('cscoperelative','cscoperelative'),('cscopetag','cscopetag'),('cscopetagorder','cscopetagorder'),('cscopeverbose','cscopeverbose'),('cspc','cspc'),('csprg','csprg'),('csqf','csqf'),('csre','csre'),('cst','cst'),('csto','csto'),('csverb','csverb'),('cuc','cuc'),('cul','cul'),('cursorbind','cursorbind'),('cursorcolumn','cursorcolumn'),('cursorline','cursorline'),('cwh','cwh'),('debug','debug'),('deco','deco'),('def','def'),('define','define'),('delcombine','delcombine'),('dex','dex'),('dg','dg'),('dict','dict'),('dictionary','dictionary'),('diff','diff'),('diffexpr','diffexpr'),('diffopt','diffopt'),('digraph','digraph'),('dip','dip'),('dir','dir'),('directory','directory'),('display','display'),('dy','dy'),('ea','ea'),('ead','ead'),('eadirection','eadirection'),('eb','eb'),('ed','ed'),('edcompatible','edcompatible'),('ef','ef'),('efm','efm'),('ei','ei'),('ek','ek'),('enc','enc'),('encoding','encoding'),('endofline','endofline'),('eol','eol'),('ep','ep'),('equalalways','equalalways'),('equalprg','equalprg'),('errorbells','errorbells'),('errorfile','errorfile'),('errorformat','errorformat'),('esckeys','esckeys'),('et','et'),('eventignore','eventignore'),('ex','ex'),('expandtab','expandtab'),('exrc','exrc'),('fcl','fcl'),('fcs','fcs'),('fdc','fdc'),('fde','fde'),('fdi','fdi'),('fdl','fdl'),('fdls','fdls'),('fdm','fdm'),('fdn','fdn'),('fdo','fdo'),('fdt','fdt'),('fen','fen'),('fenc','fenc'),('fencs','fencs'),('fex','fex'),('ff','ff'),('ffs','ffs'),('fileencoding','fileencoding'),('fileencodings','fileencodings'),('fileformat','fileformat'),('fileformats','fileformats'),('filetype','filetype'),('fillchars','fillchars'),('fk','fk'),('fkmap','fkmap'),('flp','flp'),('fml','fml'),('fmr','fmr'),('fo','fo'),('foldclose','foldclose'),('foldcolumn','foldcolumn'),('foldenable','foldenable'),('foldexpr','foldexpr'),('foldignore','foldignore'),('foldlevel','foldlevel'),('foldlevelstart','foldlevelstart'),('foldmarker','foldmarker'),('foldmethod','foldmethod'),('foldminlines','foldminlines'),('foldnestmax','foldnestmax'),('foldopen','foldopen'),('foldtext','foldtext'),('formatexpr','formatexpr'),('formatlistpat','formatlistpat'),('formatoptions','formatoptions'),('formatprg','formatprg'),('fp','fp'),('fs','fs'),('fsync','fsync'),('ft','ft'),('gcr','gcr'),('gd','gd'),('gdefault','gdefault'),('gfm','gfm'),('gfn','gfn'),('gfs','gfs'),('gfw','gfw'),('ghr','ghr'),('go','go'),('gp','gp'),('grepformat','grepformat'),('grepprg','grepprg'),('gtl','gtl'),('gtt','gtt'),('guicursor','guicursor'),('guifont','guifont'),('guifontset','guifontset'),('guifontwide','guifontwide'),('guiheadroom','guiheadroom'),('guioptions','guioptions'),('guipty','guipty'),('guitablabel','guitablabel'),('guitabtooltip','guitabtooltip'),('helpfile','helpfile'),('helpheight','helpheight'),('helplang','helplang'),('hf','hf'),('hh','hh'),('hi','hi'),('hid','hid'),('hidden','hidden'),('highlight','highlight'),('history','history'),('hk','hk'),('hkmap','hkmap'),('hkmapp','hkmapp'),('hkp','hkp'),('hl','hl'),('hlg','hlg'),('hls','hls'),('hlsearch','hlsearch'),('ic','ic'),('icon','icon'),('iconstring','iconstring'),('ignorecase','ignorecase'),('im','im'),('imactivatekey','imactivatekey'),('imak','imak'),('imc','imc'),('imcmdline','imcmdline'),('imd','imd'),('imdisable','imdisable'),('imi','imi'),('iminsert','iminsert'),('ims','ims'),('imsearch','imsearch'),('inc','inc'),('include','include'),('includeexpr','includeexpr'),('incsearch','incsearch'),('inde','inde'),('indentexpr','indentexpr'),('indentkeys','indentkeys'),('indk','indk'),('inex','inex'),('inf','inf'),('infercase','infercase'),('inoremap','inoremap'),('insertmode','insertmode'),('invacd','invacd'),('invai','invai'),('invakm','invakm'),('invallowrevins','invallowrevins'),('invaltkeymap','invaltkeymap'),('invanti','invanti'),('invantialias','invantialias'),('invar','invar'),('invarab','invarab'),('invarabic','invarabic'),('invarabicshape','invarabicshape'),('invari','invari'),('invarshape','invarshape'),('invautochdir','invautochdir'),('invautoindent','invautoindent'),('invautoread','invautoread'),('invautowrite','invautowrite'),('invautowriteall','invautowriteall'),('invaw','invaw'),('invawa','invawa'),('invbackup','invbackup'),('invballooneval','invballooneval'),('invbeval','invbeval'),('invbin','invbin'),('invbinary','invbinary'),('invbiosk','invbiosk'),('invbioskey','invbioskey'),('invbk','invbk'),('invbl','invbl'),('invbomb','invbomb'),('invbuflisted','invbuflisted'),('invcf','invcf'),('invci','invci'),('invcin','invcin'),('invcindent','invcindent'),('invcompatible','invcompatible'),('invconfirm','invconfirm'),('invconsk','invconsk'),('invconskey','invconskey'),('invcopyindent','invcopyindent'),('invcp','invcp'),('invcrb','invcrb'),('invcscopetag','invcscopetag'),('invcscopeverbose','invcscopeverbose'),('invcst','invcst'),('invcsverb','invcsverb'),('invcuc','invcuc'),('invcul','invcul'),('invcursorbind','invcursorbind'),('invcursorcolumn','invcursorcolumn'),('invcursorline','invcursorline'),('invdeco','invdeco'),('invdelcombine','invdelcombine'),('invdg','invdg'),('invdiff','invdiff'),('invdigraph','invdigraph'),('invea','invea'),('inveb','inveb'),('inved','inved'),('invedcompatible','invedcompatible'),('invek','invek'),('invendofline','invendofline'),('inveol','inveol'),('invequalalways','invequalalways'),('inverrorbells','inverrorbells'),('invesckeys','invesckeys'),('invet','invet'),('invex','invex'),('invexpandtab','invexpandtab'),('invexrc','invexrc'),('invfen','invfen'),('invfk','invfk'),('invfkmap','invfkmap'),('invfoldenable','invfoldenable'),('invgd','invgd'),('invgdefault','invgdefault'),('invguipty','invguipty'),('invhid','invhid'),('invhidden','invhidden'),('invhk','invhk'),('invhkmap','invhkmap'),('invhkmapp','invhkmapp'),('invhkp','invhkp'),('invhls','invhls'),('invhlsearch','invhlsearch'),('invic','invic'),('invicon','invicon'),('invignorecase','invignorecase'),('invim','invim'),('invimc','invimc'),('invimcmdline','invimcmdline'),('invimd','invimd'),('invimdisable','invimdisable'),('invincsearch','invincsearch'),('invinf','invinf'),('invinfercase','invinfercase'),('invinsertmode','invinsertmode'),('invis','invis'),('invjoinspaces','invjoinspaces'),('invjs','invjs'),('invlazyredraw','invlazyredraw'),('invlbr','invlbr'),('invlinebreak','invlinebreak'),('invlisp','invlisp'),('invlist','invlist'),('invloadplugins','invloadplugins'),('invlpl','invlpl'),('invlz','invlz'),('invma','invma'),('invmacatsui','invmacatsui'),('invmagic','invmagic'),('invmh','invmh'),('invml','invml'),('invmod','invmod'),('invmodeline','invmodeline'),('invmodifiable','invmodifiable'),('invmodified','invmodified'),('invmore','invmore'),('invmousef','invmousef'),('invmousefocus','invmousefocus'),('invmousehide','invmousehide'),('invnu','invnu'),('invnumber','invnumber'),('invodev','invodev'),('invopendevice','invopendevice'),('invpaste','invpaste'),('invpi','invpi'),('invpreserveindent','invpreserveindent'),('invpreviewwindow','invpreviewwindow'),('invprompt','invprompt'),('invpvw','invpvw'),('invreadonly','invreadonly'),('invrelativenumber','invrelativenumber'),('invremap','invremap'),('invrestorescreen','invrestorescreen'),('invrevins','invrevins'),('invri','invri'),('invrightleft','invrightleft'),('invrl','invrl'),('invrnu','invrnu'),('invro','invro'),('invrs','invrs'),('invru','invru'),('invruler','invruler'),('invsb','invsb'),('invsc','invsc'),('invscb','invscb'),('invscrollbind','invscrollbind'),('invscs','invscs'),('invsecure','invsecure'),('invsft','invsft'),('invshellslash','invshellslash'),('invshelltemp','invshelltemp'),('invshiftround','invshiftround'),('invshortname','invshortname'),('invshowcmd','invshowcmd'),('invshowfulltag','invshowfulltag'),('invshowmatch','invshowmatch'),('invshowmode','invshowmode'),('invsi','invsi'),('invsm','invsm'),('invsmartcase','invsmartcase'),('invsmartindent','invsmartindent'),('invsmarttab','invsmarttab'),('invsmd','invsmd'),('invsn','invsn'),('invsol','invsol'),('invspell','invspell'),('invsplitbelow','invsplitbelow'),('invsplitright','invsplitright'),('invspr','invspr'),('invsr','invsr'),('invssl','invssl'),('invsta','invsta'),('invstartofline','invstartofline'),('invstmp','invstmp'),('invswapfile','invswapfile'),('invswf','invswf'),('invta','invta'),('invtagbsearch','invtagbsearch'),('invtagrelative','invtagrelative'),('invtagstack','invtagstack'),('invtbi','invtbi'),('invtbidi','invtbidi'),('invtbs','invtbs'),('invtermbidi','invtermbidi'),('invterse','invterse'),('invtextauto','invtextauto'),('invtextmode','invtextmode'),('invtf','invtf'),('invtgst','invtgst'),('invtildeop','invtildeop'),('invtimeout','invtimeout'),('invtitle','invtitle'),('invto','invto'),('invtop','invtop'),('invtr','invtr'),('invttimeout','invttimeout'),('invttybuiltin','invttybuiltin'),('invttyfast','invttyfast'),('invtx','invtx'),('invvb','invvb'),('invvisualbell','invvisualbell'),('invwa','invwa'),('invwarn','invwarn'),('invwb','invwb'),('invweirdinvert','invweirdinvert'),('invwfh','invwfh'),('invwfw','invwfw'),('invwildignorecase','invwildignorecase'),('invwildmenu','invwildmenu'),('invwinfixheight','invwinfixheight'),('invwinfixwidth','invwinfixwidth'),('invwiv','invwiv'),('invwmnu','invwmnu'),('invwrap','invwrap'),('invwrapscan','invwrapscan'),('invwrite','invwrite'),('invwriteany','invwriteany'),('invwritebackup','invwritebackup'),('invws','invws'),('is','is'),('isf','isf'),('isfname','isfname'),('isi','isi'),('isident','isident'),('isk','isk'),('iskeyword','iskeyword'),('isp','isp'),('isprint','isprint'),('joinspaces','joinspaces'),('js','js'),('key','key'),('keymap','keymap'),('keymodel','keymodel'),('keywordprg','keywordprg'),('km','km'),('kmp','kmp'),('kp','kp'),('langmap','langmap'),('langmenu','langmenu'),('laststatus','laststatus'),('lazyredraw','lazyredraw'),('lbr','lbr'),('lcs','lcs'),('linebreak','linebreak'),('lines','lines'),('linespace','linespace'),('lisp','lisp'),('lispwords','lispwords'),('list','list'),('listchars','listchars'),('lm','lm'),('lmap','lmap'),('loadplugins','loadplugins'),('lpl','lpl'),('ls','ls'),('lsp','lsp'),('lw','lw'),('lz','lz'),('ma','ma'),('macatsui','macatsui'),('magic','magic'),('makeef','makeef'),('makeprg','makeprg'),('mat','mat'),('matchpairs','matchpairs'),('matchtime','matchtime'),('maxcombine','maxcombine'),('maxfuncdepth','maxfuncdepth'),('maxmapdepth','maxmapdepth'),('maxmem','maxmem'),('maxmempattern','maxmempattern'),('maxmemtot','maxmemtot'),('mco','mco'),('mef','mef'),('menuitems','menuitems'),('mfd','mfd'),('mh','mh'),('mis','mis'),('mkspellmem','mkspellmem'),('ml','ml'),('mls','mls'),('mm','mm'),('mmd','mmd'),('mmp','mmp'),('mmt','mmt'),('mod','mod'),('modeline','modeline'),('modelines','modelines'),('modifiable','modifiable'),('modified','modified'),('more','more'),('mouse','mouse'),('mousef','mousef'),('mousefocus','mousefocus'),('mousehide','mousehide'),('mousem','mousem'),('mousemodel','mousemodel'),('mouses','mouses'),('mouseshape','mouseshape'),('mouset','mouset'),('mousetime','mousetime'),('mp','mp'),('mps','mps'),('msm','msm'),('mzq','mzq'),('mzquantum','mzquantum'),('nf','nf'),('nnoremap','nnoremap'),('noacd','noacd'),('noai','noai'),('noakm','noakm'),('noallowrevins','noallowrevins'),('noaltkeymap','noaltkeymap'),('noanti','noanti'),('noantialias','noantialias'),('noar','noar'),('noarab','noarab'),('noarabic','noarabic'),('noarabicshape','noarabicshape'),('noari','noari'),('noarshape','noarshape'),('noautochdir','noautochdir'),('noautoindent','noautoindent'),('noautoread','noautoread'),('noautowrite','noautowrite'),('noautowriteall','noautowriteall'),('noaw','noaw'),('noawa','noawa'),('nobackup','nobackup'),('noballooneval','noballooneval'),('nobeval','nobeval'),('nobin','nobin'),('nobinary','nobinary'),('nobiosk','nobiosk'),('nobioskey','nobioskey'),('nobk','nobk'),('nobl','nobl'),('nobomb','nobomb'),('nobuflisted','nobuflisted'),('nocf','nocf'),('noci','noci'),('nocin','nocin'),('nocindent','nocindent'),('nocompatible','nocompatible'),('noconfirm','noconfirm'),('noconsk','noconsk'),('noconskey','noconskey'),('nocopyindent','nocopyindent'),('nocp','nocp'),('nocrb','nocrb'),('nocscopetag','nocscopetag'),('nocscopeverbose','nocscopeverbose'),('nocst','nocst'),('nocsverb','nocsverb'),('nocuc','nocuc'),('nocul','nocul'),('nocursorbind','nocursorbind'),('nocursorcolumn','nocursorcolumn'),('nocursorline','nocursorline'),('nodeco','nodeco'),('nodelcombine','nodelcombine'),('nodg','nodg'),('nodiff','nodiff'),('nodigraph','nodigraph'),('noea','noea'),('noeb','noeb'),('noed','noed'),('noedcompatible','noedcompatible'),('noek','noek'),('noendofline','noendofline'),('noeol','noeol'),('noequalalways','noequalalways'),('noerrorbells','noerrorbells'),('noesckeys','noesckeys'),('noet','noet'),('noex','noex'),('noexpandtab','noexpandtab'),('noexrc','noexrc'),('nofen','nofen'),('nofk','nofk'),('nofkmap','nofkmap'),('nofoldenable','nofoldenable'),('nogd','nogd'),('nogdefault','nogdefault'),('noguipty','noguipty'),('nohid','nohid'),('nohidden','nohidden'),('nohk','nohk'),('nohkmap','nohkmap'),('nohkmapp','nohkmapp'),('nohkp','nohkp'),('nohls','nohls'),('nohlsearch','nohlsearch'),('noic','noic'),('noicon','noicon'),('noignorecase','noignorecase'),('noim','noim'),('noimc','noimc'),('noimcmdline','noimcmdline'),('noimd','noimd'),('noimdisable','noimdisable'),('noincsearch','noincsearch'),('noinf','noinf'),('noinfercase','noinfercase'),('noinsertmode','noinsertmode'),('nois','nois'),('nojoinspaces','nojoinspaces'),('nojs','nojs'),('nolazyredraw','nolazyredraw'),('nolbr','nolbr'),('nolinebreak','nolinebreak'),('nolisp','nolisp'),('nolist','nolist'),('noloadplugins','noloadplugins'),('nolpl','nolpl'),('nolz','nolz'),('noma','noma'),('nomacatsui','nomacatsui'),('nomagic','nomagic'),('nomh','nomh'),('noml','noml'),('nomod','nomod'),('nomodeline','nomodeline'),('nomodifiable','nomodifiable'),('nomodified','nomodified'),('nomore','nomore'),('nomousef','nomousef'),('nomousefocus','nomousefocus'),('nomousehide','nomousehide'),('nonu','nonu'),('nonumber','nonumber'),('noodev','noodev'),('noopendevice','noopendevice'),('nopaste','nopaste'),('nopi','nopi'),('nopreserveindent','nopreserveindent'),('nopreviewwindow','nopreviewwindow'),('noprompt','noprompt'),('nopvw','nopvw'),('noreadonly','noreadonly'),('norelativenumber','norelativenumber'),('noremap','noremap'),('norestorescreen','norestorescreen'),('norevins','norevins'),('nori','nori'),('norightleft','norightleft'),('norl','norl'),('nornu','nornu'),('noro','noro'),('nors','nors'),('noru','noru'),('noruler','noruler'),('nosb','nosb'),('nosc','nosc'),('noscb','noscb'),('noscrollbind','noscrollbind'),('noscs','noscs'),('nosecure','nosecure'),('nosft','nosft'),('noshellslash','noshellslash'),('noshelltemp','noshelltemp'),('noshiftround','noshiftround'),('noshortname','noshortname'),('noshowcmd','noshowcmd'),('noshowfulltag','noshowfulltag'),('noshowmatch','noshowmatch'),('noshowmode','noshowmode'),('nosi','nosi'),('nosm','nosm'),('nosmartcase','nosmartcase'),('nosmartindent','nosmartindent'),('nosmarttab','nosmarttab'),('nosmd','nosmd'),('nosn','nosn'),('nosol','nosol'),('nospell','nospell'),('nosplitbelow','nosplitbelow'),('nosplitright','nosplitright'),('nospr','nospr'),('nosr','nosr'),('nossl','nossl'),('nosta','nosta'),('nostartofline','nostartofline'),('nostmp','nostmp'),('noswapfile','noswapfile'),('noswf','noswf'),('nota','nota'),('notagbsearch','notagbsearch'),('notagrelative','notagrelative'),('notagstack','notagstack'),('notbi','notbi'),('notbidi','notbidi'),('notbs','notbs'),('notermbidi','notermbidi'),('noterse','noterse'),('notextauto','notextauto'),('notextmode','notextmode'),('notf','notf'),('notgst','notgst'),('notildeop','notildeop'),('notimeout','notimeout'),('notitle','notitle'),('noto','noto'),('notop','notop'),('notr','notr'),('nottimeout','nottimeout'),('nottybuiltin','nottybuiltin'),('nottyfast','nottyfast'),('notx','notx'),('novb','novb'),('novisualbell','novisualbell'),('nowa','nowa'),('nowarn','nowarn'),('nowb','nowb'),('noweirdinvert','noweirdinvert'),('nowfh','nowfh'),('nowfw','nowfw'),('nowildignorecase','nowildignorecase'),('nowildmenu','nowildmenu'),('nowinfixheight','nowinfixheight'),('nowinfixwidth','nowinfixwidth'),('nowiv','nowiv'),('nowmnu','nowmnu'),('nowrap','nowrap'),('nowrapscan','nowrapscan'),('nowrite','nowrite'),('nowriteany','nowriteany'),('nowritebackup','nowritebackup'),('nows','nows'),('nrformats','nrformats'),('nu','nu'),('number','number'),('numberwidth','numberwidth'),('nuw','nuw'),('odev','odev'),('oft','oft'),('ofu','ofu'),('omnifunc','omnifunc'),('opendevice','opendevice'),('operatorfunc','operatorfunc'),('opfunc','opfunc'),('osfiletype','osfiletype'),('pa','pa'),('para','para'),('paragraphs','paragraphs'),('paste','paste'),('pastetoggle','pastetoggle'),('patchexpr','patchexpr'),('patchmode','patchmode'),('path','path'),('pdev','pdev'),('penc','penc'),('pex','pex'),('pexpr','pexpr'),('pfn','pfn'),('ph','ph'),('pheader','pheader'),('pi','pi'),('pm','pm'),('pmbcs','pmbcs'),('pmbfn','pmbfn'),('popt','popt'),('preserveindent','preserveindent'),('previewheight','previewheight'),('previewwindow','previewwindow'),('printdevice','printdevice'),('printencoding','printencoding'),('printexpr','printexpr'),('printfont','printfont'),('printheader','printheader'),('printmbcharset','printmbcharset'),('printmbfont','printmbfont'),('printoptions','printoptions'),('prompt','prompt'),('pt','pt'),('pumheight','pumheight'),('pvh','pvh'),('pvw','pvw'),('qe','qe'),('quoteescape','quoteescape'),('rdt','rdt'),('readonly','readonly'),('redrawtime','redrawtime'),('relativenumber','relativenumber'),('remap','remap'),('report','report'),('restorescreen','restorescreen'),('revins','revins'),('ri','ri'),('rightleft','rightleft'),('rightleftcmd','rightleftcmd'),('rl','rl'),('rlc','rlc'),('rnu','rnu'),('ro','ro'),('rs','rs'),('rtp','rtp'),('ru','ru'),('ruf','ruf'),('ruler','ruler'),('rulerformat','rulerformat'),('runtimepath','runtimepath'),('sb','sb'),('sbo','sbo'),('sbr','sbr'),('sc','sc'),('scb','scb'),('scr','scr'),('scroll','scroll'),('scrollbind','scrollbind'),('scrolljump','scrolljump'),('scrolloff','scrolloff'),('scrollopt','scrollopt'),('scs','scs'),('sect','sect'),('sections','sections'),('secure','secure'),('sel','sel'),('selection','selection'),('selectmode','selectmode'),('sessionoptions','sessionoptions'),('sft','sft'),('sh','sh'),('shcf','shcf'),('shell','shell'),('shellcmdflag','shellcmdflag'),('shellpipe','shellpipe'),('shellquote','shellquote'),('shellredir','shellredir'),('shellslash','shellslash'),('shelltemp','shelltemp'),('shelltype','shelltype'),('shellxquote','shellxquote'),('shiftround','shiftround'),('shiftwidth','shiftwidth'),('shm','shm'),('shortmess','shortmess'),('shortname','shortname'),('showbreak','showbreak'),('showcmd','showcmd'),('showfulltag','showfulltag'),('showmatch','showmatch'),('showmode','showmode'),('showtabline','showtabline'),('shq','shq'),('si','si'),('sidescroll','sidescroll'),('sidescrolloff','sidescrolloff'),('siso','siso'),('sj','sj'),('slm','slm'),('sm','sm'),('smartcase','smartcase'),('smartindent','smartindent'),('smarttab','smarttab'),('smc','smc'),('smd','smd'),('sn','sn'),('so','so'),('softtabstop','softtabstop'),('sol','sol'),('sp','sp'),('spc','spc'),('spell','spell'),('spellcapcheck','spellcapcheck'),('spellfile','spellfile'),('spelllang','spelllang'),('spellsuggest','spellsuggest'),('spf','spf'),('spl','spl'),('splitbelow','splitbelow'),('splitright','splitright'),('spr','spr'),('sps','sps'),('sr','sr'),('srr','srr'),('ss','ss'),('ssl','ssl'),('ssop','ssop'),('st','st'),('sta','sta'),('stal','stal'),('startofline','startofline'),('statusline','statusline'),('stl','stl'),('stmp','stmp'),('sts','sts'),('su','su'),('sua','sua'),('suffixes','suffixes'),('suffixesadd','suffixesadd'),('sw','sw'),('swapfile','swapfile'),('swapsync','swapsync'),('swb','swb'),('swf','swf'),('switchbuf','switchbuf'),('sws','sws'),('sxq','sxq'),('syn','syn'),('synmaxcol','synmaxcol'),('syntax','syntax'),('t_AB','t_AB'),('t_AF','t_AF'),('t_AL','t_AL'),('t_CS','t_CS'),('t_CV','t_CV'),('t_Ce','t_Ce'),('t_Co','t_Co'),('t_Cs','t_Cs'),('t_DL','t_DL'),('t_EI','t_EI'),('t_F1','t_F1'),('t_F2','t_F2'),('t_F3','t_F3'),('t_F4','t_F4'),('t_F5','t_F5'),('t_F6','t_F6'),('t_F7','t_F7'),('t_F8','t_F8'),('t_F9','t_F9'),('t_IE','t_IE'),('t_IS','t_IS'),('t_K1','t_K1'),('t_K3','t_K3'),('t_K4','t_K4'),('t_K5','t_K5'),('t_K6','t_K6'),('t_K7','t_K7'),('t_K8','t_K8'),('t_K9','t_K9'),('t_KA','t_KA'),('t_KB','t_KB'),('t_KC','t_KC'),('t_KD','t_KD'),('t_KE','t_KE'),('t_KF','t_KF'),('t_KG','t_KG'),('t_KH','t_KH'),('t_KI','t_KI'),('t_KJ','t_KJ'),('t_KK','t_KK'),('t_KL','t_KL'),('t_RI','t_RI'),('t_RV','t_RV'),('t_SI','t_SI'),('t_Sb','t_Sb'),('t_Sf','t_Sf'),('t_WP','t_WP'),('t_WS','t_WS'),('t_ZH','t_ZH'),('t_ZR','t_ZR'),('t_al','t_al'),('t_bc','t_bc'),('t_cd','t_cd'),('t_ce','t_ce'),('t_cl','t_cl'),('t_cm','t_cm'),('t_cs','t_cs'),('t_da','t_da'),('t_db','t_db'),('t_dl','t_dl'),('t_fs','t_fs'),('t_k1','t_k1'),('t_k2','t_k2'),('t_k3','t_k3'),('t_k4','t_k4'),('t_k5','t_k5'),('t_k6','t_k6'),('t_k7','t_k7'),('t_k8','t_k8'),('t_k9','t_k9'),('t_kB','t_kB'),('t_kD','t_kD'),('t_kI','t_kI'),('t_kN','t_kN'),('t_kP','t_kP'),('t_kb','t_kb'),('t_kd','t_kd'),('t_ke','t_ke'),('t_kh','t_kh'),('t_kl','t_kl'),('t_kr','t_kr'),('t_ks','t_ks'),('t_ku','t_ku'),('t_le','t_le'),('t_mb','t_mb'),('t_md','t_md'),('t_me','t_me'),('t_mr','t_mr'),('t_ms','t_ms'),('t_nd','t_nd'),('t_op','t_op'),('t_se','t_se'),('t_so','t_so'),('t_sr','t_sr'),('t_te','t_te'),('t_ti','t_ti'),('t_ts','t_ts'),('t_ue','t_ue'),('t_us','t_us'),('t_ut','t_ut'),('t_vb','t_vb'),('t_ve','t_ve'),('t_vi','t_vi'),('t_vs','t_vs'),('t_xs','t_xs'),('ta','ta'),('tabline','tabline'),('tabpagemax','tabpagemax'),('tabstop','tabstop'),('tag','tag'),('tagbsearch','tagbsearch'),('taglength','taglength'),('tagrelative','tagrelative'),('tags','tags'),('tagstack','tagstack'),('tal','tal'),('tb','tb'),('tbi','tbi'),('tbidi','tbidi'),('tbis','tbis'),('tbs','tbs'),('tenc','tenc'),('term','term'),('termbidi','termbidi'),('termencoding','termencoding'),('terse','terse'),('textauto','textauto'),('textmode','textmode'),('textwidth','textwidth'),('tf','tf'),('tgst','tgst'),('thesaurus','thesaurus'),('tildeop','tildeop'),('timeout','timeout'),('timeoutlen','timeoutlen'),('title','title'),('titlelen','titlelen'),('titleold','titleold'),('titlestring','titlestring'),('tl','tl'),('tm','tm'),('to','to'),('toolbar','toolbar'),('toolbariconsize','toolbariconsize'),('top','top'),('tpm','tpm'),('tr','tr'),('ts','ts'),('tsl','tsl'),('tsr','tsr'),('ttimeout','ttimeout'),('ttimeoutlen','ttimeoutlen'),('ttm','ttm'),('tty','tty'),('ttybuiltin','ttybuiltin'),('ttyfast','ttyfast'),('ttym','ttym'),('ttymouse','ttymouse'),('ttyscroll','ttyscroll'),('ttytype','ttytype'),('tw','tw'),('tx','tx'),('uc','uc'),('udf','udf'),('udir','udir'),('ul','ul'),('undodir','undodir'),('undofile','undofile'),('undolevels','undolevels'),('undoreload','undoreload'),('updatecount','updatecount'),('updatetime','updatetime'),('ur','ur'),('ut','ut'),('vb','vb'),('vbs','vbs'),('vdir','vdir'),('ve','ve'),('verbose','verbose'),('verbosefile','verbosefile'),('vfile','vfile'),('vi','vi'),('viewdir','viewdir'),('viewoptions','viewoptions'),('viminfo','viminfo'),('virtualedit','virtualedit'),('visualbell','visualbell'),('vnoremap','vnoremap'),('vop','vop'),('wa','wa'),('wak','wak'),('warn','warn'),('wb','wb'),('wc','wc'),('wcm','wcm'),('wd','wd'),('weirdinvert','weirdinvert'),('wfh','wfh'),('wfw','wfw'),('wh','wh'),('whichwrap','whichwrap'),('wi','wi'),('wic','wic'),('wig','wig'),('wildchar','wildchar'),('wildcharm','wildcharm'),('wildignore','wildignore'),('wildignorecase','wildignorecase'),('wildmenu','wildmenu'),('wildmode','wildmode'),('wildoptions','wildoptions'),('wim','wim'),('winaltkeys','winaltkeys'),('window','window'),('winfixheight','winfixheight'),('winfixwidth','winfixwidth'),('winheight','winheight'),('winminheight','winminheight'),('winminwidth','winminwidth'),('winwidth','winwidth'),('wiv','wiv'),('wiw','wiw'),('wm','wm'),('wmh','wmh'),('wmnu','wmnu'),('wmw','wmw'),('wop','wop'),('wrap','wrap'),('wrapmargin','wrapmargin'),('wrapscan','wrapscan'),('write','write'),('writeany','writeany'),('writebackup','writebackup'),('writedelay','writedelay'),('ws','ws'),('ww','ww')] - -option = _getoption() -command = _getcommand() -auto = _getauto() diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/agile.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/agile.py deleted file mode 100644 index 1f81365..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/agile.py +++ /dev/null @@ -1,2290 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.agile - ~~~~~~~~~~~~~~~~~~~~~ - - Lexers for agile languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \ - LexerContext, include, combined, do_insertions, bygroups, using, this -from pygments.token import Error, Text, Other, \ - Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation -from pygments.util import get_bool_opt, get_list_opt, shebang_matches -from pygments import unistring as uni - - -__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', - 'Python3Lexer', 'Python3TracebackLexer', 'RubyLexer', - 'RubyConsoleLexer', 'PerlLexer', 'LuaLexer', 'MoonScriptLexer', - 'CrocLexer', 'MiniDLexer', 'IoLexer', 'TclLexer', 'FactorLexer', - 'FancyLexer', 'DgLexer', 'Perl6Lexer'] - -# b/w compatibility -from pygments.lexers.functional import SchemeLexer -from pygments.lexers.jvm import IokeLexer, ClojureLexer - -line_re = re.compile('.*?\n') - - -class PythonLexer(RegexLexer): - """ - For `Python `_ source code. - """ - - name = 'Python' - aliases = ['python', 'py', 'sage'] - filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'] - mimetypes = ['text/x-python', 'application/x-python'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)), - (r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)), - (r'[^\S\n]+', Text), - (r'#.*$', Comment), - (r'[]{}:(),;[]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), - include('keywords'), - (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), - (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), - (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'fromimport'), - (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'import'), - include('builtins'), - include('backtick'), - ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), - ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), - ('[uU]?"""', String, combined('stringescape', 'tdqs')), - ("[uU]?'''", String, combined('stringescape', 'tsqs')), - ('[uU]?"', String, combined('stringescape', 'dqs')), - ("[uU]?'", String, combined('stringescape', 'sqs')), - include('name'), - include('numbers'), - ], - 'keywords': [ - (r'(assert|break|continue|del|elif|else|except|exec|' - r'finally|for|global|if|lambda|pass|print|raise|' - r'return|try|while|yield(\s+from)?|as|with)\b', Keyword), - ], - 'builtins': [ - (r'(?`_ source code (version 3.0). - - *New in Pygments 0.10.* - """ - - name = 'Python 3' - aliases = ['python3', 'py3'] - filenames = [] # Nothing until Python 3 gets widespread - mimetypes = ['text/x-python3', 'application/x-python3'] - - flags = re.MULTILINE | re.UNICODE - - uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) - - tokens = PythonLexer.tokens.copy() - tokens['keywords'] = [ - (r'(assert|break|continue|del|elif|else|except|' - r'finally|for|global|if|lambda|pass|raise|nonlocal|' - r'return|try|while|yield(\s+from)?|as|with|True|False|None)\b', - Keyword), - ] - tokens['builtins'] = [ - (r'(?>> a = 'foo' - >>> print a - foo - >>> 1 / 0 - Traceback (most recent call last): - File "", line 1, in - ZeroDivisionError: integer division or modulo by zero - - Additional options: - - `python3` - Use Python 3 lexer for code. Default is ``False``. - *New in Pygments 1.0.* - """ - name = 'Python console session' - aliases = ['pycon'] - mimetypes = ['text/x-python-doctest'] - - def __init__(self, **options): - self.python3 = get_bool_opt(options, 'python3', False) - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - if self.python3: - pylexer = Python3Lexer(**self.options) - tblexer = Python3TracebackLexer(**self.options) - else: - pylexer = PythonLexer(**self.options) - tblexer = PythonTracebackLexer(**self.options) - - curcode = '' - insertions = [] - curtb = '' - tbindex = 0 - tb = 0 - for match in line_re.finditer(text): - line = match.group() - if line.startswith(u'>>> ') or line.startswith(u'... '): - tb = 0 - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:4])])) - curcode += line[4:] - elif line.rstrip() == u'...' and not tb: - # only a new >>> prompt can end an exception block - # otherwise an ellipsis in place of the traceback frames - # will be mishandled - insertions.append((len(curcode), - [(0, Generic.Prompt, u'...')])) - curcode += line[3:] - else: - if curcode: - for item in do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - if (line.startswith(u'Traceback (most recent call last):') or - re.match(ur' File "[^"]+", line \d+\n$', line)): - tb = 1 - curtb = line - tbindex = match.start() - elif line == 'KeyboardInterrupt\n': - yield match.start(), Name.Class, line - elif tb: - curtb += line - if not (line.startswith(' ') or line.strip() == u'...'): - tb = 0 - for i, t, v in tblexer.get_tokens_unprocessed(curtb): - yield tbindex+i, t, v - else: - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)): - yield item - - -class PythonTracebackLexer(RegexLexer): - """ - For Python tracebacks. - - *New in Pygments 0.7.* - """ - - name = 'Python Traceback' - aliases = ['pytb'] - filenames = ['*.pytb'] - mimetypes = ['text/x-python-traceback'] - - tokens = { - 'root': [ - (r'^Traceback \(most recent call last\):\n', - Generic.Traceback, 'intb'), - # SyntaxError starts with this. - (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), - (r'^.*\n', Other), - ], - 'intb': [ - (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), - (r'^( File )("[^"]+")(, line )(\d+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text)), - (r'^( )(.+)(\n)', - bygroups(Text, using(PythonLexer), Text)), - (r'^([ \t]*)(\.\.\.)(\n)', - bygroups(Text, Comment, Text)), # for doctests... - (r'^([^:]+)(: )(.+)(\n)', - bygroups(Generic.Error, Text, Name, Text), '#pop'), - (r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)', - bygroups(Generic.Error, Text), '#pop') - ], - } - - -class Python3TracebackLexer(RegexLexer): - """ - For Python 3.0 tracebacks, with support for chained exceptions. - - *New in Pygments 1.0.* - """ - - name = 'Python 3.0 Traceback' - aliases = ['py3tb'] - filenames = ['*.py3tb'] - mimetypes = ['text/x-python3-traceback'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), - (r'^During handling of the above exception, another ' - r'exception occurred:\n\n', Generic.Traceback), - (r'^The above exception was the direct cause of the ' - r'following exception:\n\n', Generic.Traceback), - (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), - ], - 'intb': [ - (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), - (r'^( File )("[^"]+")(, line )(\d+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text)), - (r'^( )(.+)(\n)', - bygroups(Text, using(Python3Lexer), Text)), - (r'^([ \t]*)(\.\.\.)(\n)', - bygroups(Text, Comment, Text)), # for doctests... - (r'^([^:]+)(: )(.+)(\n)', - bygroups(Generic.Error, Text, Name, Text), '#pop'), - (r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)', - bygroups(Generic.Error, Text), '#pop') - ], - } - - -class RubyLexer(ExtendedRegexLexer): - """ - For `Ruby `_ source code. - """ - - name = 'Ruby' - aliases = ['rb', 'ruby', 'duby'] - filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', - '*.rbx', '*.duby'] - mimetypes = ['text/x-ruby', 'application/x-ruby'] - - flags = re.DOTALL | re.MULTILINE - - def heredoc_callback(self, match, ctx): - # okay, this is the hardest part of parsing Ruby... - # match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line - - start = match.start(1) - yield start, Operator, match.group(1) # <<-? - yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` - yield match.start(3), Name.Constant, match.group(3) # heredoc name - yield match.start(4), String.Heredoc, match.group(4) # quote again - - heredocstack = ctx.__dict__.setdefault('heredocstack', []) - outermost = not bool(heredocstack) - heredocstack.append((match.group(1) == '<<-', match.group(3))) - - ctx.pos = match.start(5) - ctx.end = match.end(5) - # this may find other heredocs - for i, t, v in self.get_tokens_unprocessed(context=ctx): - yield i, t, v - ctx.pos = match.end() - - if outermost: - # this is the outer heredoc again, now we can process them all - for tolerant, hdname in heredocstack: - lines = [] - for match in line_re.finditer(ctx.text, ctx.pos): - if tolerant: - check = match.group().strip() - else: - check = match.group().rstrip() - if check == hdname: - for amatch in lines: - yield amatch.start(), String.Heredoc, amatch.group() - yield match.start(), Name.Constant, match.group() - ctx.pos = match.end() - break - else: - lines.append(match) - else: - # end of heredoc not found -- error! - for amatch in lines: - yield amatch.start(), Error, amatch.group() - ctx.end = len(ctx.text) - del heredocstack[:] - - - def gen_rubystrings_rules(): - def intp_regex_callback(self, match, ctx): - yield match.start(1), String.Regex, match.group(1) # begin - nctx = LexerContext(match.group(3), 0, ['interpolated-regex']) - for i, t, v in self.get_tokens_unprocessed(context=nctx): - yield match.start(3)+i, t, v - yield match.start(4), String.Regex, match.group(4) # end[mixounse]* - ctx.pos = match.end() - - def intp_string_callback(self, match, ctx): - yield match.start(1), String.Other, match.group(1) - nctx = LexerContext(match.group(3), 0, ['interpolated-string']) - for i, t, v in self.get_tokens_unprocessed(context=nctx): - yield match.start(3)+i, t, v - yield match.start(4), String.Other, match.group(4) # end - ctx.pos = match.end() - - states = {} - states['strings'] = [ - # easy ones - (r'\:@{0,2}([a-zA-Z_]\w*[\!\?]?|\*\*?|[-+]@?|' - r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', String.Symbol), - (r":'(\\\\|\\'|[^'])*'", String.Symbol), - (r"'(\\\\|\\'|[^'])*'", String.Single), - (r':"', String.Symbol, 'simple-sym'), - (r'([a-zA-Z_][a-zA-Z0-9]*)(:)(?!:)', - bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9 - (r'"', String.Double, 'simple-string'), - (r'(?', 'ab'): - states[name+'-intp-string'] = [ - (r'\\[\\' + lbrace + rbrace + ']', String.Other), - (r'(?! - states['strings'] += [ - # %r regex - (r'(%r([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)', - intp_regex_callback), - # regular fancy strings with qsw - (r'%[qsw]([^a-zA-Z0-9])((?:\\\1|(?!\1).)*)\1', String.Other), - (r'(%[QWx]([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2)', - intp_string_callback), - # special forms of fancy strings after operators or - # in method calls with braces - (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', - bygroups(Text, String.Other, None)), - # and because of fixed width lookbehinds the whole thing a - # second time for line startings... - (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', - bygroups(Text, String.Other, None)), - # all regular fancy strings without qsw - (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)', - intp_string_callback), - ] - - return states - - tokens = { - 'root': [ - (r'#.*?$', Comment.Single), - (r'=begin\s.*?\n=end.*?$', Comment.Multiline), - # keywords - (r'(BEGIN|END|alias|begin|break|case|defined\?|' - r'do|else|elsif|end|ensure|for|if|in|next|redo|' - r'rescue|raise|retry|return|super|then|undef|unless|until|when|' - r'while|yield)\b', Keyword), - # start of function, class and module names - (r'(module)(\s+)([a-zA-Z_][a-zA-Z0-9_]*' - r'(?:::[a-zA-Z_][a-zA-Z0-9_]*)*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'), - (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'), - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - # special methods - (r'(initialize|new|loop|include|extend|raise|attr_reader|' - r'attr_writer|attr_accessor|attr|catch|throw|private|' - r'module_function|public|protected|true|false|nil)\b', - Keyword.Pseudo), - (r'(not|and|or)\b', Operator.Word), - (r'(autoload|block_given|const_defined|eql|equal|frozen|include|' - r'instance_of|is_a|iterator|kind_of|method_defined|nil|' - r'private_method_defined|protected_method_defined|' - r'public_method_defined|respond_to|tainted)\?', Name.Builtin), - (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin), - (r'(?~!:])|' - r'(?<=(?:\s|;)when\s)|' - r'(?<=(?:\s|;)or\s)|' - r'(?<=(?:\s|;)and\s)|' - r'(?<=(?:\s|;|\.)index\s)|' - r'(?<=(?:\s|;|\.)scan\s)|' - r'(?<=(?:\s|;|\.)sub\s)|' - r'(?<=(?:\s|;|\.)sub!\s)|' - r'(?<=(?:\s|;|\.)gsub\s)|' - r'(?<=(?:\s|;|\.)gsub!\s)|' - r'(?<=(?:\s|;|\.)match\s)|' - r'(?<=(?:\s|;)if\s)|' - r'(?<=(?:\s|;)elsif\s)|' - r'(?<=^when\s)|' - r'(?<=^index\s)|' - r'(?<=^scan\s)|' - r'(?<=^sub\s)|' - r'(?<=^gsub\s)|' - r'(?<=^sub!\s)|' - r'(?<=^gsub!\s)|' - r'(?<=^match\s)|' - r'(?<=^if\s)|' - r'(?<=^elsif\s)' - r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'), - # multiline regex (in method calls or subscripts) - (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'), - # multiline regex (this time the funny no whitespace rule) - (r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex), - 'multiline-regex'), - # lex numbers and ignore following regular expressions which - # are division operators in fact (grrrr. i hate that. any - # better ideas?) - # since pygments 0.7 we also eat a "?" operator after numbers - # so that the char operator does not work. Chars are not allowed - # there so that you can use the ternary operator. - # stupid example: - # x>=0?n[x]:"" - (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', - bygroups(Number.Oct, Text, Operator)), - (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', - bygroups(Number.Hex, Text, Operator)), - (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?', - bygroups(Number.Bin, Text, Operator)), - (r'([\d]+(?:_\d+)*)(\s*)([/?])?', - bygroups(Number.Integer, Text, Operator)), - # Names - (r'@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class), - (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance), - (r'\$[a-zA-Z0-9_]+', Name.Variable.Global), - (r'\$[!@&`\'+~=/\\,;.<>_*$?:"]', Name.Variable.Global), - (r'\$-[0adFiIlpvw]', Name.Variable.Global), - (r'::', Operator), - include('strings'), - # chars - (r'\?(\\[MC]-)*' # modifiers - r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)' - r'(?!\w)', - String.Char), - (r'[A-Z][a-zA-Z0-9_]+', Name.Constant), - # this is needed because ruby attributes can look - # like keywords (class) or like this: ` ?!? - (r'(\.|::)([a-zA-Z_]\w*[\!\?]?|[*%&^`~+-/\[<>=])', - bygroups(Operator, Name)), - (r'[a-zA-Z_]\w*[\!\?]?', Name), - (r'(\[|\]|\*\*|<>?|>=|<=|<=>|=~|={3}|' - r'!~|&&?|\|\||\.{1,3})', Operator), - (r'[-+/*%=<>&!^|~]=?', Operator), - (r'[(){};,/?:\\]', Punctuation), - (r'\s+', Text) - ], - 'funcname': [ - (r'\(', Punctuation, 'defexpr'), - (r'(?:([a-zA-Z_][a-zA-Z0-9_]*)(\.))?' - r'([a-zA-Z_]\w*[\!\?]?|\*\*?|[-+]@?|' - r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', - bygroups(Name.Class, Operator, Name.Function), '#pop'), - (r'', Text, '#pop') - ], - 'classname': [ - (r'\(', Punctuation, 'defexpr'), - (r'<<', Operator, '#pop'), - (r'[A-Z_]\w*', Name.Class, '#pop'), - (r'', Text, '#pop') - ], - 'defexpr': [ - (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'), - (r'\(', Operator, '#push'), - include('root') - ], - 'in-intp': [ - ('}', String.Interpol, '#pop'), - include('root'), - ], - 'string-intp': [ - (r'#{', String.Interpol, 'in-intp'), - (r'#@@?[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol), - (r'#\$[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol) - ], - 'string-intp-escaped': [ - include('string-intp'), - (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', - String.Escape) - ], - 'interpolated-regex': [ - include('string-intp'), - (r'[\\#]', String.Regex), - (r'[^\\#]+', String.Regex), - ], - 'interpolated-string': [ - include('string-intp'), - (r'[\\#]', String.Other), - (r'[^\\#]+', String.Other), - ], - 'multiline-regex': [ - include('string-intp'), - (r'\\\\', String.Regex), - (r'\\/', String.Regex), - (r'[\\#]', String.Regex), - (r'[^\\/#]+', String.Regex), - (r'/[mixounse]*', String.Regex, '#pop'), - ], - 'end-part': [ - (r'.+', Comment.Preproc, '#pop') - ] - } - tokens.update(gen_rubystrings_rules()) - - def analyse_text(text): - return shebang_matches(text, r'ruby(1\.\d)?') - - -class RubyConsoleLexer(Lexer): - """ - For Ruby interactive console (**irb**) output like: - - .. sourcecode:: rbcon - - irb(main):001:0> a = 1 - => 1 - irb(main):002:0> puts a - 1 - => nil - """ - name = 'Ruby irb session' - aliases = ['rbcon', 'irb'] - mimetypes = ['text/x-ruby-shellsession'] - - _prompt_re = re.compile('irb\([a-zA-Z_][a-zA-Z0-9_]*\):\d{3}:\d+[>*"\'] ' - '|>> |\?> ') - - def get_tokens_unprocessed(self, text): - rblexer = RubyLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - m = self._prompt_re.match(line) - if m is not None: - end = m.end() - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:end])])) - curcode += line[end:] - else: - if curcode: - for item in do_insertions(insertions, - rblexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - rblexer.get_tokens_unprocessed(curcode)): - yield item - - -class PerlLexer(RegexLexer): - """ - For `Perl `_ source code. - """ - - name = 'Perl' - aliases = ['perl', 'pl'] - filenames = ['*.pl', '*.pm'] - mimetypes = ['text/x-perl', 'application/x-perl'] - - flags = re.DOTALL | re.MULTILINE - # TODO: give this to a perl guy who knows how to parse perl... - tokens = { - 'balanced-regex': [ - (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'), - (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'), - (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), - (r'{(\\\\|\\[^\\]|[^\\}])*}[egimosx]*', String.Regex, '#pop'), - (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'), - (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'), - (r'\((\\\\|\\[^\\]|[^\\\)])*\)[egimosx]*', String.Regex, '#pop'), - (r'@(\\\\|\\[^\\]|[^\\\@])*@[egimosx]*', String.Regex, '#pop'), - (r'%(\\\\|\\[^\\]|[^\\\%])*%[egimosx]*', String.Regex, '#pop'), - (r'\$(\\\\|\\[^\\]|[^\\\$])*\$[egimosx]*', String.Regex, '#pop'), - ], - 'root': [ - (r'\#.*?$', Comment.Single), - (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline), - (r'(case|continue|do|else|elsif|for|foreach|if|last|my|' - r'next|our|redo|reset|then|unless|until|while|use|' - r'print|new|BEGIN|CHECK|INIT|END|return)\b', Keyword), - (r'(format)(\s+)([a-zA-Z0-9_]+)(\s*)(=)(\s*\n)', - bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'), - (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word), - # common delimiters - (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', - String.Regex), - (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex), - (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex), - (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', - String.Regex), - (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', - String.Regex), - # balanced delimiters - (r's{(\\\\|\\[^\\]|[^\\}])*}\s*', String.Regex, 'balanced-regex'), - (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'), - (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex, - 'balanced-regex'), - (r's\((\\\\|\\[^\\]|[^\\\)])*\)\s*', String.Regex, - 'balanced-regex'), - - (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex), - (r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'), - (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*', - String.Regex), - (r'\s+', Text), - (r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|' - r'chmod|chomp|chop|chown|chr|chroot|close|closedir|connect|' - r'continue|cos|crypt|dbmclose|dbmopen|defined|delete|die|' - r'dump|each|endgrent|endhostent|endnetent|endprotoent|' - r'endpwent|endservent|eof|eval|exec|exists|exit|exp|fcntl|' - r'fileno|flock|fork|format|formline|getc|getgrent|getgrgid|' - r'getgrnam|gethostbyaddr|gethostbyname|gethostent|getlogin|' - r'getnetbyaddr|getnetbyname|getnetent|getpeername|getpgrp|' - r'getppid|getpriority|getprotobyname|getprotobynumber|' - r'getprotoent|getpwent|getpwnam|getpwuid|getservbyname|' - r'getservbyport|getservent|getsockname|getsockopt|glob|gmtime|' - r'goto|grep|hex|import|index|int|ioctl|join|keys|kill|last|' - r'lc|lcfirst|length|link|listen|local|localtime|log|lstat|' - r'map|mkdir|msgctl|msgget|msgrcv|msgsnd|my|next|no|oct|open|' - r'opendir|ord|our|pack|package|pipe|pop|pos|printf|' - r'prototype|push|quotemeta|rand|read|readdir|' - r'readline|readlink|readpipe|recv|redo|ref|rename|require|' - r'reverse|rewinddir|rindex|rmdir|scalar|seek|seekdir|' - r'select|semctl|semget|semop|send|setgrent|sethostent|setnetent|' - r'setpgrp|setpriority|setprotoent|setpwent|setservent|' - r'setsockopt|shift|shmctl|shmget|shmread|shmwrite|shutdown|' - r'sin|sleep|socket|socketpair|sort|splice|split|sprintf|sqrt|' - r'srand|stat|study|substr|symlink|syscall|sysopen|sysread|' - r'sysseek|system|syswrite|tell|telldir|tie|tied|time|times|tr|' - r'truncate|uc|ucfirst|umask|undef|unlink|unpack|unshift|untie|' - r'utime|values|vec|wait|waitpid|wantarray|warn|write' - r')\b', Name.Builtin), - (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo), - (r'<<([\'"]?)([a-zA-Z_][a-zA-Z0-9_]*)\1;?\n.*?\n\2\n', String), - (r'__END__', Comment.Preproc, 'end-part'), - (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global), - (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global), - (r'[$@%#]+', Name.Variable, 'varname'), - (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), - (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), - (r'0b[01]+(_[01]+)*', Number.Bin), - (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', - Number.Float), - (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), - (r'\d+(_\d+)*', Number.Integer), - (r"'(\\\\|\\[^\\]|[^'\\])*'", String), - (r'"(\\\\|\\[^\\]|[^"\\])*"', String), - (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick), - (r'<([^\s>]+)>', String.Regex), - (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'), - (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'), - (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'), - (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'), - (r'(q|qq|qw|qr|qx)([^a-zA-Z0-9])(.|\n)*?\2', String.Other), - (r'package\s+', Keyword, 'modulename'), - (r'sub\s+', Keyword, 'funcname'), - (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|' - r'!~|&&?|\|\||\.{1,3})', Operator), - (r'[-+/*%=<>&^|!\\~]=?', Operator), - (r'[\(\)\[\]:;,<>/\?\{\}]', Punctuation), # yes, there's no shortage - # of punctuation in Perl! - (r'(?=\w)', Name, 'name'), - ], - 'format': [ - (r'\.\n', String.Interpol, '#pop'), - (r'[^\n]*\n', String.Interpol), - ], - 'varname': [ - (r'\s+', Text), - (r'\{', Punctuation, '#pop'), # hash syntax? - (r'\)|,', Punctuation, '#pop'), # argument specifier - (r'[a-zA-Z0-9_]+::', Name.Namespace), - (r'[a-zA-Z0-9_:]+', Name.Variable, '#pop'), - ], - 'name': [ - (r'[a-zA-Z0-9_]+::', Name.Namespace), - (r'[a-zA-Z0-9_:]+', Name, '#pop'), - (r'[A-Z_]+(?=[^a-zA-Z0-9_])', Name.Constant, '#pop'), - (r'(?=[^a-zA-Z0-9_])', Text, '#pop'), - ], - 'modulename': [ - (r'[a-zA-Z_]\w*', Name.Namespace, '#pop') - ], - 'funcname': [ - (r'[a-zA-Z_]\w*[\!\?]?', Name.Function), - (r'\s+', Text), - # argument declaration - (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)), - (r'.*?{', Punctuation, '#pop'), - (r';', Punctuation, '#pop'), - ], - 'cb-string': [ - (r'\\[\{\}\\]', String.Other), - (r'\\', String.Other), - (r'\{', String.Other, 'cb-string'), - (r'\}', String.Other, '#pop'), - (r'[^\{\}\\]+', String.Other) - ], - 'rb-string': [ - (r'\\[\(\)\\]', String.Other), - (r'\\', String.Other), - (r'\(', String.Other, 'rb-string'), - (r'\)', String.Other, '#pop'), - (r'[^\(\)]+', String.Other) - ], - 'sb-string': [ - (r'\\[\[\]\\]', String.Other), - (r'\\', String.Other), - (r'\[', String.Other, 'sb-string'), - (r'\]', String.Other, '#pop'), - (r'[^\[\]]+', String.Other) - ], - 'lt-string': [ - (r'\\[\<\>\\]', String.Other), - (r'\\', String.Other), - (r'\<', String.Other, 'lt-string'), - (r'\>', String.Other, '#pop'), - (r'[^\<\>]+', String.Other) - ], - 'end-part': [ - (r'.+', Comment.Preproc, '#pop') - ] - } - - def analyse_text(text): - if shebang_matches(text, r'perl'): - return True - if 'my $' in text: - return 0.9 - return 0.1 # who knows, might still be perl! - - -class LuaLexer(RegexLexer): - """ - For `Lua `_ source code. - - Additional options accepted: - - `func_name_highlighting` - If given and ``True``, highlight builtin function names - (default: ``True``). - `disabled_modules` - If given, must be a list of module names whose function names - should not be highlighted. By default all modules are highlighted. - - To get a list of allowed modules have a look into the - `_luabuiltins` module: - - .. sourcecode:: pycon - - >>> from pygments.lexers._luabuiltins import MODULES - >>> MODULES.keys() - ['string', 'coroutine', 'modules', 'io', 'basic', ...] - """ - - name = 'Lua' - aliases = ['lua'] - filenames = ['*.lua', '*.wlua'] - mimetypes = ['text/x-lua', 'application/x-lua'] - - tokens = { - 'root': [ - # lua allows a file to start with a shebang - (r'#!(.*?)$', Comment.Preproc), - (r'', Text, 'base'), - ], - 'base': [ - (r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline), - ('--.*$', Comment.Single), - - (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), - (r'(?i)\d+e[+-]?\d+', Number.Float), - ('(?i)0x[0-9a-f]*', Number.Hex), - (r'\d+', Number.Integer), - - (r'\n', Text), - (r'[^\S\n]', Text), - # multiline strings - (r'(?s)\[(=*)\[.*?\]\1\]', String), - - (r'(==|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#])', Operator), - (r'[\[\]\{\}\(\)\.,:;]', Punctuation), - (r'(and|or|not)\b', Operator.Word), - - ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|' - r'while)\b', Keyword), - (r'(local)\b', Keyword.Declaration), - (r'(true|false|nil)\b', Keyword.Constant), - - (r'(function)\b', Keyword, 'funcname'), - - (r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name), - - ("'", String.Single, combined('stringescape', 'sqs')), - ('"', String.Double, combined('stringescape', 'dqs')) - ], - - 'funcname': [ - (r'\s+', Text), - ('(?:([A-Za-z_][A-Za-z0-9_]*)(\.))?([A-Za-z_][A-Za-z0-9_]*)', - bygroups(Name.Class, Punctuation, Name.Function), '#pop'), - # inline function - ('\(', Punctuation, '#pop'), - ], - - # if I understand correctly, every character is valid in a lua string, - # so this state is only for later corrections - 'string': [ - ('.', String) - ], - - 'stringescape': [ - (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) - ], - - 'sqs': [ - ("'", String, '#pop'), - include('string') - ], - - 'dqs': [ - ('"', String, '#pop'), - include('string') - ] - } - - def __init__(self, **options): - self.func_name_highlighting = get_bool_opt( - options, 'func_name_highlighting', True) - self.disabled_modules = get_list_opt(options, 'disabled_modules', []) - - self._functions = set() - if self.func_name_highlighting: - from pygments.lexers._luabuiltins import MODULES - for mod, func in MODULES.iteritems(): - if mod not in self.disabled_modules: - self._functions.update(func) - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - if token is Name: - if value in self._functions: - yield index, Name.Builtin, value - continue - elif '.' in value: - a, b = value.split('.') - yield index, Name, a - yield index + len(a), Punctuation, u'.' - yield index + len(a) + 1, Name, b - continue - yield index, token, value - - -class MoonScriptLexer(LuaLexer): - """ - For `MoonScript `_ source code. - - *New in Pygments 1.5.* - """ - - name = "MoonScript" - aliases = ["moon", "moonscript"] - filenames = ["*.moon"] - mimetypes = ['text/x-moonscript', 'application/x-moonscript'] - - tokens = { - 'root': [ - (r'#!(.*?)$', Comment.Preproc), - (r'', Text, 'base'), - ], - 'base': [ - ('--.*$', Comment.Single), - (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), - (r'(?i)\d+e[+-]?\d+', Number.Float), - (r'(?i)0x[0-9a-f]*', Number.Hex), - (r'\d+', Number.Integer), - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'(?s)\[(=*)\[.*?\]\1\]', String), - (r'(->|=>)', Name.Function), - (r':[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable), - (r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator), - (r'[;,]', Punctuation), - (r'[\[\]\{\}\(\)]', Keyword.Type), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Variable), - (r"(class|extends|if|then|super|do|with|import|export|" - r"while|elseif|return|for|in|from|when|using|else|" - r"and|or|not|switch|break)\b", Keyword), - (r'(true|false|nil)\b', Keyword.Constant), - (r'(and|or|not)\b', Operator.Word), - (r'(self)\b', Name.Builtin.Pseudo), - (r'@@?([a-zA-Z_][a-zA-Z0-9_]*)?', Name.Variable.Class), - (r'[A-Z]\w*', Name.Class), # proper name - (r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name), - ("'", String.Single, combined('stringescape', 'sqs')), - ('"', String.Double, combined('stringescape', 'dqs')) - ], - 'stringescape': [ - (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) - ], - 'sqs': [ - ("'", String.Single, '#pop'), - (".", String) - ], - 'dqs': [ - ('"', String.Double, '#pop'), - (".", String) - ] - } - - def get_tokens_unprocessed(self, text): - # set . as Operator instead of Punctuation - for index, token, value in \ - LuaLexer.get_tokens_unprocessed(self, text): - if token == Punctuation and value == ".": - token = Operator - yield index, token, value - - -class CrocLexer(RegexLexer): - """ - For `Croc `_ source. - """ - name = 'Croc' - filenames = ['*.croc'] - aliases = ['croc'] - mimetypes = ['text/x-crocsrc'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text), - # Comments - (r'//(.*?)\n', Comment.Single), - (r'/\*', Comment.Multiline, 'nestedcomment'), - # Keywords - (r'(as|assert|break|case|catch|class|continue|default' - r'|do|else|finally|for|foreach|function|global|namespace' - r'|if|import|in|is|local|module|return|scope|super|switch' - r'|this|throw|try|vararg|while|with|yield)\b', Keyword), - (r'(false|true|null)\b', Keyword.Constant), - # FloatLiteral - (r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?', - Number.Float), - # IntegerLiteral - # -- Binary - (r'0[bB][01][01_]*', Number), - # -- Hexadecimal - (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), - # -- Decimal - (r'([0-9][0-9_]*)(?![.eE])', Number.Integer), - # CharacterLiteral - (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", - String.Char - ), - # StringLiteral - # -- WysiwygString - (r'@"(""|[^"])*"', String), - (r'@`(``|[^`])*`', String), - (r"@'(''|[^'])*'", String), - # -- DoubleQuotedString - (r'"(\\\\|\\"|[^"])*"', String), - # Tokens - ( - r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>' - r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)' - r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation - ), - # Identifier - (r'[a-zA-Z_]\w*', Name), - ], - 'nestedcomment': [ - (r'[^*/]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline), - ], - } - - -class MiniDLexer(CrocLexer): - """ - For MiniD source. MiniD is now known as Croc. - """ - name = 'MiniD' - filenames = ['*.md'] - aliases = ['minid'] - mimetypes = ['text/x-minidsrc'] - - -class IoLexer(RegexLexer): - """ - For `Io `_ (a small, prototype-based - programming language) source. - - *New in Pygments 0.10.* - """ - name = 'Io' - filenames = ['*.io'] - aliases = ['io'] - mimetypes = ['text/x-iosrc'] - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text), - # Comments - (r'//(.*?)\n', Comment.Single), - (r'#(.*?)\n', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'/\+', Comment.Multiline, 'nestedcomment'), - # DoubleQuotedString - (r'"(\\\\|\\"|[^"])*"', String), - # Operators - (r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}', - Operator), - # keywords - (r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b', - Keyword), - # constants - (r'(nil|false|true)\b', Name.Constant), - # names - (r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b', - Name.Builtin), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - # numbers - (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+', Number.Integer) - ], - 'nestedcomment': [ - (r'[^+/]+', Comment.Multiline), - (r'/\+', Comment.Multiline, '#push'), - (r'\+/', Comment.Multiline, '#pop'), - (r'[+/]', Comment.Multiline), - ] - } - - -class TclLexer(RegexLexer): - """ - For Tcl source code. - - *New in Pygments 0.10.* - """ - - keyword_cmds_re = ( - r'\b(after|apply|array|break|catch|continue|elseif|else|error|' - r'eval|expr|for|foreach|global|if|namespace|proc|rename|return|' - r'set|switch|then|trace|unset|update|uplevel|upvar|variable|' - r'vwait|while)\b' - ) - - builtin_cmds_re = ( - r'\b(append|bgerror|binary|cd|chan|clock|close|concat|dde|dict|' - r'encoding|eof|exec|exit|fblocked|fconfigure|fcopy|file|' - r'fileevent|flush|format|gets|glob|history|http|incr|info|interp|' - r'join|lappend|lassign|lindex|linsert|list|llength|load|loadTk|' - r'lrange|lrepeat|lreplace|lreverse|lsearch|lset|lsort|mathfunc|' - r'mathop|memory|msgcat|open|package|pid|pkg::create|pkg_mkIndex|' - r'platform|platform::shell|puts|pwd|re_syntax|read|refchan|' - r'regexp|registry|regsub|scan|seek|socket|source|split|string|' - r'subst|tell|time|tm|unknown|unload)\b' - ) - - name = 'Tcl' - aliases = ['tcl'] - filenames = ['*.tcl'] - mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl'] - - def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""): - return [ - (keyword_cmds_re, Keyword, 'params' + context), - (builtin_cmds_re, Name.Builtin, 'params' + context), - (r'([\w\.\-]+)', Name.Variable, 'params' + context), - (r'#', Comment, 'comment'), - ] - - tokens = { - 'root': [ - include('command'), - include('basic'), - include('data'), - (r'}', Keyword), # HACK: somehow we miscounted our braces - ], - 'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re), - 'command-in-brace': _gen_command_rules(keyword_cmds_re, - builtin_cmds_re, - "-in-brace"), - 'command-in-bracket': _gen_command_rules(keyword_cmds_re, - builtin_cmds_re, - "-in-bracket"), - 'command-in-paren': _gen_command_rules(keyword_cmds_re, - builtin_cmds_re, - "-in-paren"), - 'basic': [ - (r'\(', Keyword, 'paren'), - (r'\[', Keyword, 'bracket'), - (r'\{', Keyword, 'brace'), - (r'"', String.Double, 'string'), - (r'(eq|ne|in|ni)\b', Operator.Word), - (r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator), - ], - 'data': [ - (r'\s+', Text), - (r'0x[a-fA-F0-9]+', Number.Hex), - (r'0[0-7]+', Number.Oct), - (r'\d+\.\d+', Number.Float), - (r'\d+', Number.Integer), - (r'\$([\w\.\-\:]+)', Name.Variable), - (r'([\w\.\-\:]+)', Text), - ], - 'params': [ - (r';', Keyword, '#pop'), - (r'\n', Text, '#pop'), - (r'(else|elseif|then)\b', Keyword), - include('basic'), - include('data'), - ], - 'params-in-brace': [ - (r'}', Keyword, ('#pop', '#pop')), - include('params') - ], - 'params-in-paren': [ - (r'\)', Keyword, ('#pop', '#pop')), - include('params') - ], - 'params-in-bracket': [ - (r'\]', Keyword, ('#pop', '#pop')), - include('params') - ], - 'string': [ - (r'\[', String.Double, 'string-square'), - (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double), - (r'"', String.Double, '#pop') - ], - 'string-square': [ - (r'\[', String.Double, 'string-square'), - (r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double), - (r'\]', String.Double, '#pop') - ], - 'brace': [ - (r'}', Keyword, '#pop'), - include('command-in-brace'), - include('basic'), - include('data'), - ], - 'paren': [ - (r'\)', Keyword, '#pop'), - include('command-in-paren'), - include('basic'), - include('data'), - ], - 'bracket': [ - (r'\]', Keyword, '#pop'), - include('command-in-bracket'), - include('basic'), - include('data'), - ], - 'comment': [ - (r'.*[^\\]\n', Comment, '#pop'), - (r'.*\\\n', Comment), - ], - } - - def analyse_text(text): - return shebang_matches(text, r'(tcl)') - - -class FactorLexer(RegexLexer): - """ - Lexer for the `Factor `_ language. - - *New in Pygments 1.4.* - """ - name = 'Factor' - aliases = ['factor'] - filenames = ['*.factor'] - mimetypes = ['text/x-factor'] - - flags = re.MULTILINE | re.UNICODE - - builtin_kernel = ( - r'(?:or|2bi|2tri|while|wrapper|nip|4dip|wrapper\\?|bi\\*|' - r'callstack>array|both\\?|hashcode|die|dupd|callstack|' - r'callstack\\?|3dup|tri@|pick|curry|build|\\?execute|3bi|' - r'prepose|>boolean|\\?if|clone|eq\\?|tri\\*|\\?|=|swapd|' - r'2over|2keep|3keep|clear|2dup|when|not|tuple\\?|dup|2bi\\*|' - r'2tri\\*|call|tri-curry|object|bi@|do|unless\\*|if\\*|loop|' - r'bi-curry\\*|drop|when\\*|assert=|retainstack|assert\\?|-rot|' - r'execute|2bi@|2tri@|boa|with|either\\?|3drop|bi|curry\\?|' - r'datastack|until|3dip|over|3curry|tri-curry\\*|tri-curry@|swap|' - r'and|2nip|throw|bi-curry|\\(clone\\)|hashcode\\*|compose|2dip|if|3tri|' - r'unless|compose\\?|tuple|keep|2curry|equal\\?|assert|tri|2drop|' - r'most||boolean\\?|identity-hashcode|identity-tuple\\?|' - r'null|new|dip|bi-curry@|rot|xor|identity-tuple|boolean)\s' - ) - - builtin_assocs = ( - r'(?:\\?at|assoc\\?|assoc-clone-like|assoc=|delete-at\\*|' - r'assoc-partition|extract-keys|new-assoc|value\\?|assoc-size|' - r'map>assoc|push-at|assoc-like|key\\?|assoc-intersect|' - r'assoc-refine|update|assoc-union|assoc-combine|at\\*|' - r'assoc-empty\\?|at\\+|set-at|assoc-all\\?|assoc-subset\\?|' - r'assoc-hashcode|change-at|assoc-each|assoc-diff|zip|values|' - r'value-at|rename-at|inc-at|enum\\?|at|cache|assoc>map||' - r'assoc|assoc-map|enum|value-at\\*|assoc-map-as|>alist|' - r'assoc-filter-as|clear-assoc|assoc-stack|maybe-set-at|' - r'substitute|assoc-filter|2cache|delete-at|assoc-find|keys|' - r'assoc-any\\?|unzip)\s' - ) - - builtin_combinators = ( - r'(?:case|execute-effect|no-cond|no-case\\?|3cleave>quot|2cleave|' - r'cond>quot|wrong-values\\?|no-cond\\?|cleave>quot|no-case|' - r'case>quot|3cleave|wrong-values|to-fixed-point|alist>quot|' - r'case-find|cond|cleave|call-effect|2cleave>quot|recursive-hashcode|' - r'linear-case-quot|spread|spread>quot)\s' - ) - - builtin_math = ( - r'(?:number=|if-zero|next-power-of-2|each-integer|\\?1\\+|' - r'fp-special\\?|imaginary-part|unless-zero|float>bits|number\\?|' - r'fp-infinity\\?|bignum\\?|fp-snan\\?|denominator|fp-bitwise=|\\*|' - r'\\+|power-of-2\\?|-|u>=|/|>=|bitand|log2-expects-positive|<|' - r'log2|>|integer\\?|number|bits>double|2/|zero\\?|(find-integer)|' - r'bits>float|float\\?|shift|ratio\\?|even\\?|ratio|fp-sign|bitnot|' - r'>fixnum|complex\\?|/i|/f|byte-array>bignum|when-zero|sgn|>bignum|' - r'next-float|u<|u>|mod|recip|rational|find-last-integer|>float|' - r'(all-integers\\?)|2^|times|integer|fixnum\\?|neg|fixnum|sq|' - r'bignum|(each-integer)|bit\\?|fp-qnan\\?|find-integer|complex|' - r'|real|double>bits|bitor|rem|fp-nan-payload|all-integers\\?|' - r'real-part|log2-expects-positive\\?|prev-float|align|unordered\\?|' - r'float|fp-nan\\?|abs|bitxor|u<=|odd\\?|<=|/mod|rational\\?|>integer|' - r'real\\?|numerator)\s' - ) - - builtin_sequences = ( - r'(?:member-eq\\?|append|assert-sequence=|find-last-from|trim-head-slice|' - r'clone-like|3sequence|assert-sequence\\?|map-as|last-index-from|' - r'reversed|index-from|cut\\*|pad-tail|remove-eq!|concat-as|' - r'but-last|snip|trim-tail|nths|nth|2selector|sequence|slice\\?|' - r'|partition|remove-nth|tail-slice|empty\\?|tail\\*|' - r'if-empty|find-from|virtual-sequence\\?|member\\?|set-length|' - r'drop-prefix|unclip|unclip-last-slice|iota|map-sum|' - r'bounds-error\\?|sequence-hashcode-step|selector-for|' - r'accumulate-as|map|start|midpoint@|\\(accumulate\\)|rest-slice|' - r'prepend|fourth|sift|accumulate!|new-sequence|follow|map!|' - r'like|first4|1sequence|reverse|slice|unless-empty|padding|' - r'virtual@|repetition\\?|set-last|index|4sequence|max-length|' - r'set-second|immutable-sequence|first2|first3|replicate-as|' - r'reduce-index|unclip-slice|supremum|suffix!|insert-nth|' - r'trim-tail-slice|tail|3append|short|count|suffix|concat|' - r'flip|filter|sum|immutable\\?|reverse!|2sequence|map-integers|' - r'delete-all|start\\*|indices|snip-slice|check-slice|sequence\\?|' - r'head|map-find|filter!|append-as|reduce|sequence=|halves|' - r'collapse-slice|interleave|2map|filter-as|binary-reduce|' - r'slice-error\\?|product|bounds-check\\?|bounds-check|harvest|' - r'immutable|virtual-exemplar|find|produce|remove|pad-head|last|' - r'replicate|set-fourth|remove-eq|shorten|reversed\\?|' - r'map-find-last|3map-as|2unclip-slice|shorter\\?|3map|find-last|' - r'head-slice|pop\\*|2map-as|tail-slice\\*|but-last-slice|' - r'2map-reduce|iota\\?|collector-for|accumulate|each|selector|' - r'append!|new-resizable|cut-slice|each-index|head-slice\\*|' - r'2reverse-each|sequence-hashcode|pop|set-nth|\\?nth|' - r'|second|join|when-empty|collector|' - r'immutable-sequence\\?||all\\?|3append-as|' - r'virtual-sequence|subseq\\?|remove-nth!|push-either|new-like|' - r'length|last-index|push-if|2all\\?|lengthen|assert-sequence|' - r'copy|map-reduce|move|third|first|3each|tail\\?|set-first|' - r'prefix|bounds-error|any\\?||trim-slice|exchange|' - r'surround|2reduce|cut|change-nth|min-length|set-third|produce-as|' - r'push-all|head\\?|delete-slice|rest|sum-lengths|2each|head\\*|' - r'infimum|remove!|glue|slice-error|subseq|trim|replace-slice|' - r'push|repetition|map-index|trim-head|unclip-last|mismatch)\s' - ) - - builtin_namespaces = ( - r'(?:global|\\+@|change|set-namestack|change-global|init-namespaces|' - r'on|off|set-global|namespace|set|with-scope|bind|with-variable|' - r'inc|dec|counter|initialize|namestack|get|get-global|make-assoc)\s' - ) - - builtin_arrays = ( - r'(?:|2array|3array|pair|>array|1array|4array|pair\\?|' - r'array|resize-array|array\\?)\s' - ) - - builtin_io = ( - r'(?:\\+character\\+|bad-seek-type\\?|readln|each-morsel|stream-seek|' - r'read|print|with-output-stream|contents|write1|stream-write1|' - r'stream-copy|stream-element-type|with-input-stream|' - r'stream-print|stream-read|stream-contents|stream-tell|' - r'tell-output|bl|seek-output|bad-seek-type|nl|stream-nl|write|' - r'flush|stream-lines|\\+byte\\+|stream-flush|read1|' - r'seek-absolute\\?|stream-read1|lines|stream-readln|' - r'stream-read-until|each-line|seek-end|with-output-stream\\*|' - r'seek-absolute|with-streams|seek-input|seek-relative\\?|' - r'input-stream|stream-write|read-partial|seek-end\\?|' - r'seek-relative|error-stream|read-until|with-input-stream\\*|' - r'with-streams\\*|tell-input|each-block|output-stream|' - r'stream-read-partial|each-stream-block|each-stream-line)\s' - ) - - builtin_strings = ( - r'(?:resize-string|>string||1string|string|string\\?)\s' - ) - - builtin_vectors = ( - r'(?:vector\\?||\\?push|vector|>vector|1vector)\s' - ) - - builtin_continuations = ( - r'(?:with-return|restarts|return-continuation|with-datastack|' - r'recover|rethrow-restarts||ifcc|set-catchstack|' - r'>continuation<|cleanup|ignore-errors|restart\\?|' - r'compute-restarts|attempt-all-error|error-thread|continue|' - r'|attempt-all-error\\?|condition\\?|' - r'|throw-restarts|error|catchstack|continue-with|' - r'thread-error-hook|continuation|rethrow|callcc1|' - r'error-continuation|callcc0|attempt-all|condition|' - r'continuation\\?|restart|return)\s' - ) - - tokens = { - 'root': [ - # TODO: (( inputs -- outputs )) - # TODO: << ... >> - - # defining words - (r'(\s*)(:|::|MACRO:|MEMO:)(\s+)(\S+)', - bygroups(Text, Keyword, Text, Name.Function)), - (r'(\s*)(M:)(\s+)(\S+)(\s+)(\S+)', - bygroups(Text, Keyword, Text, Name.Class, Text, Name.Function)), - (r'(\s*)(GENERIC:)(\s+)(\S+)', - bygroups(Text, Keyword, Text, Name.Function)), - (r'(\s*)(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)', - bygroups(Text, Keyword, Text, Name.Function, Text, Name.Function)), - (r'(\()(\s+)', bygroups(Name.Function, Text), 'stackeffect'), - (r'\;\s', Keyword), - - # imports and namespaces - (r'(USING:)((?:\s|\\\s)+)', - bygroups(Keyword.Namespace, Text), 'import'), - (r'(USE:)(\s+)(\S+)', - bygroups(Keyword.Namespace, Text, Name.Namespace)), - (r'(UNUSE:)(\s+)(\S+)', - bygroups(Keyword.Namespace, Text, Name.Namespace)), - (r'(QUALIFIED:)(\s+)(\S+)', - bygroups(Keyword.Namespace, Text, Name.Namespace)), - (r'(QUALIFIED-WITH:)(\s+)(\S+)', - bygroups(Keyword.Namespace, Text, Name.Namespace)), - (r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+)(=>)', - bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Text)), - (r'(IN:)(\s+)(\S+)', - bygroups(Keyword.Namespace, Text, Name.Namespace)), - (r'(?:ALIAS|DEFER|FORGET|POSTPONE):', Keyword.Namespace), - - # tuples and classes - (r'(TUPLE:)(\s+)(\S+)(\s+<\s+)(\S+)', - bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'), - (r'(TUPLE:)(\s+)(\S+)', - bygroups(Keyword, Text, Name.Class), 'slots'), - (r'(UNION:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)), - (r'(INTERSECTION:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)), - (r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)', - bygroups(Keyword, Text, Name.Class, Text, Name.Class)), - (r'(C:)(\s+)(\S+)(\s+)(\S+)', - bygroups(Keyword, Text, Name.Function, Text, Name.Class)), - (r'INSTANCE:', Keyword), - (r'SLOT:', Keyword), - (r'MIXIN:', Keyword), - (r'(?:SINGLETON|SINGLETONS):', Keyword), - - # other syntax - (r'CONSTANT:', Keyword), - (r'(?:SYMBOL|SYMBOLS):', Keyword), - (r'ERROR:', Keyword), - (r'SYNTAX:', Keyword), - (r'(HELP:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)), - (r'(MAIN:)(\s+)(\S+)', - bygroups(Keyword.Namespace, Text, Name.Function)), - (r'(?:ALIEN|TYPEDEF|FUNCTION|STRUCT):', Keyword), - - # vocab.private - # TODO: words inside vocab.private should have red names? - (r'(?:)', Keyword.Namespace), - - # strings - (r'"""\s+(?:.|\n)*?\s+"""', String), - (r'"(?:\\\\|\\"|[^"])*"', String), - (r'CHAR:\s+(\\[\\abfnrstv]*|\S)\s', String.Char), - - # comments - (r'\!\s+.*$', Comment), - (r'#\!\s+.*$', Comment), - - # boolean constants - (r'(t|f)\s', Name.Constant), - - # numbers - (r'-?\d+\.\d+\s', Number.Float), - (r'-?\d+\s', Number.Integer), - (r'HEX:\s+[a-fA-F\d]+\s', Number.Hex), - (r'BIN:\s+[01]+\s', Number.Integer), - (r'OCT:\s+[0-7]+\s', Number.Oct), - - # operators - (r'[-+/*=<>^]\s', Operator), - - # keywords - (r'(?:deprecated|final|foldable|flushable|inline|recursive)\s', - Keyword), - - # builtins - (builtin_kernel, Name.Builtin), - (builtin_assocs, Name.Builtin), - (builtin_combinators, Name.Builtin), - (builtin_math, Name.Builtin), - (builtin_sequences, Name.Builtin), - (builtin_namespaces, Name.Builtin), - (builtin_arrays, Name.Builtin), - (builtin_io, Name.Builtin), - (builtin_strings, Name.Builtin), - (builtin_vectors, Name.Builtin), - (builtin_continuations, Name.Builtin), - - # whitespaces - usually not relevant - (r'\s+', Text), - - # everything else is text - (r'\S+', Text), - ], - - 'stackeffect': [ - (r'\s*\(', Name.Function, 'stackeffect'), - (r'\)', Name.Function, '#pop'), - (r'\-\-', Name.Function), - (r'\s+', Text), - (r'\S+', Name.Variable), - ], - - 'slots': [ - (r'\s+', Text), - (r';\s', Keyword, '#pop'), - (r'\S+', Name.Variable), - ], - - 'import': [ - (r';', Keyword, '#pop'), - (r'\S+', Name.Namespace), - (r'\s+', Text), - ], - } - - -class FancyLexer(RegexLexer): - """ - Pygments Lexer For `Fancy `_. - - Fancy is a self-hosted, pure object-oriented, dynamic, - class-based, concurrent general-purpose programming language - running on Rubinius, the Ruby VM. - - *New in Pygments 1.5.* - """ - name = 'Fancy' - filenames = ['*.fy', '*.fancypack'] - aliases = ['fancy', 'fy'] - mimetypes = ['text/x-fancysrc'] - - tokens = { - # copied from PerlLexer: - 'balanced-regex': [ - (r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'), - (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'), - (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), - (r'{(\\\\|\\}|[^}])*}[egimosx]*', String.Regex, '#pop'), - (r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'), - (r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'), - (r'\((\\\\|\\\)|[^\)])*\)[egimosx]*', String.Regex, '#pop'), - (r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'), - (r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'), - (r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'), - ], - 'root': [ - (r'\s+', Text), - - # balanced delimiters (copied from PerlLexer): - (r's{(\\\\|\\}|[^}])*}\s*', String.Regex, 'balanced-regex'), - (r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'), - (r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'), - (r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'), - (r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex), - (r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'), - - # Comments - (r'#(.*?)\n', Comment.Single), - # Symbols - (r'\'([^\'\s\[\]\(\)\{\}]+|\[\])', String.Symbol), - # Multi-line DoubleQuotedString - (r'"""(\\\\|\\"|[^"])*"""', String), - # DoubleQuotedString - (r'"(\\\\|\\"|[^"])*"', String), - # keywords - (r'(def|class|try|catch|finally|retry|return|return_local|match|' - r'case|->|=>)\b', Keyword), - # constants - (r'(self|super|nil|false|true)\b', Name.Constant), - (r'[(){};,/?\|:\\]', Punctuation), - # names - (r'(Object|Array|Hash|Directory|File|Class|String|Number|' - r'Enumerable|FancyEnumerable|Block|TrueClass|NilClass|' - r'FalseClass|Tuple|Symbol|Stack|Set|FancySpec|Method|Package|' - r'Range)\b', Name.Builtin), - # functions - (r'[a-zA-Z]([a-zA-Z0-9_]|[-+?!=*/^><%])*:', Name.Function), - # operators, must be below functions - (r'[-+*/~,<>=&!?%^\[\]\.$]+', Operator), - ('[A-Z][a-zA-Z0-9_]*', Name.Constant), - ('@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance), - ('@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class), - ('@@?', Operator), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - # numbers - / checks are necessary to avoid mismarking regexes, - # see comment in RubyLexer - (r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', - bygroups(Number.Oct, Text, Operator)), - (r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', - bygroups(Number.Hex, Text, Operator)), - (r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?', - bygroups(Number.Bin, Text, Operator)), - (r'([\d]+(?:_\d+)*)(\s*)([/?])?', - bygroups(Number.Integer, Text, Operator)), - (r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+', Number.Integer) - ] - } - - -class DgLexer(RegexLexer): - """ - Lexer for `dg `_, - a functional and object-oriented programming language - running on the CPython 3 VM. - - *New in Pygments 1.6.* - """ - name = 'dg' - aliases = ['dg'] - filenames = ['*.dg'] - mimetypes = ['text/x-dg'] - - tokens = { - 'root': [ - # Whitespace: - (r'\s+', Text), - (r'#.*?$', Comment.Single), - # Lexemes: - # Numbers - (r'0[bB][01]+', Number.Bin), - (r'0[oO][0-7]+', Number.Oct), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'[+-]?\d+\.\d+([eE][+-]?\d+)?[jJ]?', Number.Float), - (r'[+-]?\d+[eE][+-]?\d+[jJ]?', Number.Float), - (r'[+-]?\d+[jJ]?', Number.Integer), - # Character/String Literals - (r"[br]*'''", String, combined('stringescape', 'tsqs', 'string')), - (r'[br]*"""', String, combined('stringescape', 'tdqs', 'string')), - (r"[br]*'", String, combined('stringescape', 'sqs', 'string')), - (r'[br]*"', String, combined('stringescape', 'dqs', 'string')), - # Operators - (r"`\w+'*`", Operator), # Infix links - # Reserved infix links - (r'\b(or|and|if|else|where|is|in)\b', Operator.Word), - (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator), - # Identifiers - # Python 3 types - (r"(?`_ source code. - - *New in Pygments 1.7.* - """ - - name = 'Perl6' - aliases = ['perl6', 'pl6'] - filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', - '*.6pm', '*.p6m', '*.pm6'] - mimetypes = ['text/x-perl6', 'application/x-perl6'] - flags = re.MULTILINE | re.DOTALL | re.UNICODE - - PERL6_IDENTIFIER_RANGE = "['a-zA-Z0-9_:-]" - - PERL6_KEYWORDS = ( - 'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT', - 'KEEP', 'LAST', 'LEAVE', 'NEXT', 'POST', 'PRE', 'START', 'TEMP', - 'UNDO', 'as', 'assoc', 'async', 'augment', 'binary', 'break', 'but', - 'cached', 'category', 'class', 'constant', 'contend', 'continue', - 'copy', 'deep', 'default', 'defequiv', 'defer', 'die', 'do', 'else', - 'elsif', 'enum', 'equiv', 'exit', 'export', 'fail', 'fatal', 'for', - 'gather', 'given', 'goto', 'grammar', 'handles', 'has', 'if', 'inline', - 'irs', 'is', 'last', 'leave', 'let', 'lift', 'loop', 'looser', 'macro', - 'make', 'maybe', 'method', 'module', 'multi', 'my', 'next', 'of', - 'ofs', 'only', 'oo', 'ors', 'our', 'package', 'parsed', 'prec', - 'proto', 'readonly', 'redo', 'ref', 'regex', 'reparsed', 'repeat', - 'require', 'required', 'return', 'returns', 'role', 'rule', 'rw', - 'self', 'slang', 'state', 'sub', 'submethod', 'subset', 'supersede', - 'take', 'temp', 'tighter', 'token', 'trusts', 'try', 'unary', - 'unless', 'until', 'use', 'warn', 'when', 'where', 'while', 'will', - ) - - PERL6_BUILTINS = ( - 'ACCEPTS', 'HOW', 'REJECTS', 'VAR', 'WHAT', 'WHENCE', 'WHERE', 'WHICH', - 'WHO', 'abs', 'acos', 'acosec', 'acosech', 'acosh', 'acotan', 'acotanh', - 'all', 'any', 'approx', 'arity', 'asec', 'asech', 'asin', 'asinh' - 'assuming', 'atan', 'atan2', 'atanh', 'attr', 'bless', 'body', 'by' - 'bytes', 'caller', 'callsame', 'callwith', 'can', 'capitalize', 'cat', - 'ceiling', 'chars', 'chmod', 'chomp', 'chop', 'chr', 'chroot', - 'circumfix', 'cis', 'classify', 'clone', 'close', 'cmp_ok', 'codes', - 'comb', 'connect', 'contains', 'context', 'cos', 'cosec', 'cosech', - 'cosh', 'cotan', 'cotanh', 'count', 'defined', 'delete', 'diag', - 'dies_ok', 'does', 'e', 'each', 'eager', 'elems', 'end', 'eof', 'eval', - 'eval_dies_ok', 'eval_elsewhere', 'eval_lives_ok', 'evalfile', 'exists', - 'exp', 'first', 'flip', 'floor', 'flunk', 'flush', 'fmt', 'force_todo', - 'fork', 'from', 'getc', 'gethost', 'getlogin', 'getpeername', 'getpw', - 'gmtime', 'graphs', 'grep', 'hints', 'hyper', 'im', 'index', 'infix', - 'invert', 'is_approx', 'is_deeply', 'isa', 'isa_ok', 'isnt', 'iterator', - 'join', 'key', 'keys', 'kill', 'kv', 'lastcall', 'lazy', 'lc', 'lcfirst', - 'like', 'lines', 'link', 'lives_ok', 'localtime', 'log', 'log10', 'map', - 'max', 'min', 'minmax', 'name', 'new', 'nextsame', 'nextwith', 'nfc', - 'nfd', 'nfkc', 'nfkd', 'nok_error', 'nonce', 'none', 'normalize', 'not', - 'nothing', 'ok', 'once', 'one', 'open', 'opendir', 'operator', 'ord', - 'p5chomp', 'p5chop', 'pack', 'pair', 'pairs', 'pass', 'perl', 'pi', - 'pick', 'plan', 'plan_ok', 'polar', 'pop', 'pos', 'postcircumfix', - 'postfix', 'pred', 'prefix', 'print', 'printf', 'push', 'quasi', - 'quotemeta', 'rand', 're', 'read', 'readdir', 'readline', 'reduce', - 'reverse', 'rewind', 'rewinddir', 'rindex', 'roots', 'round', - 'roundrobin', 'run', 'runinstead', 'sameaccent', 'samecase', 'say', - 'sec', 'sech', 'sech', 'seek', 'shape', 'shift', 'sign', 'signature', - 'sin', 'sinh', 'skip', 'skip_rest', 'sleep', 'slurp', 'sort', 'splice', - 'split', 'sprintf', 'sqrt', 'srand', 'strand', 'subst', 'substr', 'succ', - 'sum', 'symlink', 'tan', 'tanh', 'throws_ok', 'time', 'times', 'to', - 'todo', 'trim', 'trim_end', 'trim_start', 'true', 'truncate', 'uc', - 'ucfirst', 'undef', 'undefine', 'uniq', 'unlike', 'unlink', 'unpack', - 'unpolar', 'unshift', 'unwrap', 'use_ok', 'value', 'values', 'vec', - 'version_lt', 'void', 'wait', 'want', 'wrap', 'write', 'zip', - ) - - PERL6_BUILTIN_CLASSES = ( - 'Abstraction', 'Any', 'AnyChar', 'Array', 'Associative', 'Bag', 'Bit', - 'Blob', 'Block', 'Bool', 'Buf', 'Byte', 'Callable', 'Capture', 'Char', 'Class', - 'Code', 'Codepoint', 'Comparator', 'Complex', 'Decreasing', 'Exception', - 'Failure', 'False', 'Grammar', 'Grapheme', 'Hash', 'IO', 'Increasing', - 'Int', 'Junction', 'KeyBag', 'KeyExtractor', 'KeyHash', 'KeySet', - 'KitchenSink', 'List', 'Macro', 'Mapping', 'Match', 'Matcher', 'Method', - 'Module', 'Num', 'Object', 'Ordered', 'Ordering', 'OrderingPair', - 'Package', 'Pair', 'Positional', 'Proxy', 'Range', 'Rat', 'Regex', - 'Role', 'Routine', 'Scalar', 'Seq', 'Set', 'Signature', 'Str', 'StrLen', - 'StrPos', 'Sub', 'Submethod', 'True', 'UInt', 'Undef', 'Version', 'Void', - 'Whatever', 'bit', 'bool', 'buf', 'buf1', 'buf16', 'buf2', 'buf32', - 'buf4', 'buf64', 'buf8', 'complex', 'int', 'int1', 'int16', 'int2', - 'int32', 'int4', 'int64', 'int8', 'num', 'rat', 'rat1', 'rat16', 'rat2', - 'rat32', 'rat4', 'rat64', 'rat8', 'uint', 'uint1', 'uint16', 'uint2', - 'uint32', 'uint4', 'uint64', 'uint8', 'utf16', 'utf32', 'utf8', - ) - - PERL6_OPERATORS = ( - 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div', - 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm', - 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx', - '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^', - '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&', - 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^', - '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^', - '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv', - '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so', - 'not', '<==', '==>', '<<==', '==>>', - ) - - # Perl 6 has a *lot* of possible bracketing characters - # this list was lifted from STD.pm6 (https://github.com/perl6/std) - PERL6_BRACKETS = { - u'\u0028' : u'\u0029', u'\u003c' : u'\u003e', u'\u005b' : u'\u005d', u'\u007b' : u'\u007d', - u'\u00ab' : u'\u00bb', u'\u0f3a' : u'\u0f3b', u'\u0f3c' : u'\u0f3d', u'\u169b' : u'\u169c', - u'\u2018' : u'\u2019', u'\u201a' : u'\u2019', u'\u201b' : u'\u2019', u'\u201c' : u'\u201d', - u'\u201e' : u'\u201d', u'\u201f' : u'\u201d', u'\u2039' : u'\u203a', u'\u2045' : u'\u2046', - u'\u207d' : u'\u207e', u'\u208d' : u'\u208e', u'\u2208' : u'\u220b', u'\u2209' : u'\u220c', - u'\u220a' : u'\u220d', u'\u2215' : u'\u29f5', u'\u223c' : u'\u223d', u'\u2243' : u'\u22cd', - u'\u2252' : u'\u2253', u'\u2254' : u'\u2255', u'\u2264' : u'\u2265', u'\u2266' : u'\u2267', - u'\u2268' : u'\u2269', u'\u226a' : u'\u226b', u'\u226e' : u'\u226f', u'\u2270' : u'\u2271', - u'\u2272' : u'\u2273', u'\u2274' : u'\u2275', u'\u2276' : u'\u2277', u'\u2278' : u'\u2279', - u'\u227a' : u'\u227b', u'\u227c' : u'\u227d', u'\u227e' : u'\u227f', u'\u2280' : u'\u2281', - u'\u2282' : u'\u2283', u'\u2284' : u'\u2285', u'\u2286' : u'\u2287', u'\u2288' : u'\u2289', - u'\u228a' : u'\u228b', u'\u228f' : u'\u2290', u'\u2291' : u'\u2292', u'\u2298' : u'\u29b8', - u'\u22a2' : u'\u22a3', u'\u22a6' : u'\u2ade', u'\u22a8' : u'\u2ae4', u'\u22a9' : u'\u2ae3', - u'\u22ab' : u'\u2ae5', u'\u22b0' : u'\u22b1', u'\u22b2' : u'\u22b3', u'\u22b4' : u'\u22b5', - u'\u22b6' : u'\u22b7', u'\u22c9' : u'\u22ca', u'\u22cb' : u'\u22cc', u'\u22d0' : u'\u22d1', - u'\u22d6' : u'\u22d7', u'\u22d8' : u'\u22d9', u'\u22da' : u'\u22db', u'\u22dc' : u'\u22dd', - u'\u22de' : u'\u22df', u'\u22e0' : u'\u22e1', u'\u22e2' : u'\u22e3', u'\u22e4' : u'\u22e5', - u'\u22e6' : u'\u22e7', u'\u22e8' : u'\u22e9', u'\u22ea' : u'\u22eb', u'\u22ec' : u'\u22ed', - u'\u22f0' : u'\u22f1', u'\u22f2' : u'\u22fa', u'\u22f3' : u'\u22fb', u'\u22f4' : u'\u22fc', - u'\u22f6' : u'\u22fd', u'\u22f7' : u'\u22fe', u'\u2308' : u'\u2309', u'\u230a' : u'\u230b', - u'\u2329' : u'\u232a', u'\u23b4' : u'\u23b5', u'\u2768' : u'\u2769', u'\u276a' : u'\u276b', - u'\u276c' : u'\u276d', u'\u276e' : u'\u276f', u'\u2770' : u'\u2771', u'\u2772' : u'\u2773', - u'\u2774' : u'\u2775', u'\u27c3' : u'\u27c4', u'\u27c5' : u'\u27c6', u'\u27d5' : u'\u27d6', - u'\u27dd' : u'\u27de', u'\u27e2' : u'\u27e3', u'\u27e4' : u'\u27e5', u'\u27e6' : u'\u27e7', - u'\u27e8' : u'\u27e9', u'\u27ea' : u'\u27eb', u'\u2983' : u'\u2984', u'\u2985' : u'\u2986', - u'\u2987' : u'\u2988', u'\u2989' : u'\u298a', u'\u298b' : u'\u298c', u'\u298d' : u'\u298e', - u'\u298f' : u'\u2990', u'\u2991' : u'\u2992', u'\u2993' : u'\u2994', u'\u2995' : u'\u2996', - u'\u2997' : u'\u2998', u'\u29c0' : u'\u29c1', u'\u29c4' : u'\u29c5', u'\u29cf' : u'\u29d0', - u'\u29d1' : u'\u29d2', u'\u29d4' : u'\u29d5', u'\u29d8' : u'\u29d9', u'\u29da' : u'\u29db', - u'\u29f8' : u'\u29f9', u'\u29fc' : u'\u29fd', u'\u2a2b' : u'\u2a2c', u'\u2a2d' : u'\u2a2e', - u'\u2a34' : u'\u2a35', u'\u2a3c' : u'\u2a3d', u'\u2a64' : u'\u2a65', u'\u2a79' : u'\u2a7a', - u'\u2a7d' : u'\u2a7e', u'\u2a7f' : u'\u2a80', u'\u2a81' : u'\u2a82', u'\u2a83' : u'\u2a84', - u'\u2a8b' : u'\u2a8c', u'\u2a91' : u'\u2a92', u'\u2a93' : u'\u2a94', u'\u2a95' : u'\u2a96', - u'\u2a97' : u'\u2a98', u'\u2a99' : u'\u2a9a', u'\u2a9b' : u'\u2a9c', u'\u2aa1' : u'\u2aa2', - u'\u2aa6' : u'\u2aa7', u'\u2aa8' : u'\u2aa9', u'\u2aaa' : u'\u2aab', u'\u2aac' : u'\u2aad', - u'\u2aaf' : u'\u2ab0', u'\u2ab3' : u'\u2ab4', u'\u2abb' : u'\u2abc', u'\u2abd' : u'\u2abe', - u'\u2abf' : u'\u2ac0', u'\u2ac1' : u'\u2ac2', u'\u2ac3' : u'\u2ac4', u'\u2ac5' : u'\u2ac6', - u'\u2acd' : u'\u2ace', u'\u2acf' : u'\u2ad0', u'\u2ad1' : u'\u2ad2', u'\u2ad3' : u'\u2ad4', - u'\u2ad5' : u'\u2ad6', u'\u2aec' : u'\u2aed', u'\u2af7' : u'\u2af8', u'\u2af9' : u'\u2afa', - u'\u2e02' : u'\u2e03', u'\u2e04' : u'\u2e05', u'\u2e09' : u'\u2e0a', u'\u2e0c' : u'\u2e0d', - u'\u2e1c' : u'\u2e1d', u'\u2e20' : u'\u2e21', u'\u3008' : u'\u3009', u'\u300a' : u'\u300b', - u'\u300c' : u'\u300d', u'\u300e' : u'\u300f', u'\u3010' : u'\u3011', u'\u3014' : u'\u3015', - u'\u3016' : u'\u3017', u'\u3018' : u'\u3019', u'\u301a' : u'\u301b', u'\u301d' : u'\u301e', - u'\ufd3e' : u'\ufd3f', u'\ufe17' : u'\ufe18', u'\ufe35' : u'\ufe36', u'\ufe37' : u'\ufe38', - u'\ufe39' : u'\ufe3a', u'\ufe3b' : u'\ufe3c', u'\ufe3d' : u'\ufe3e', u'\ufe3f' : u'\ufe40', - u'\ufe41' : u'\ufe42', u'\ufe43' : u'\ufe44', u'\ufe47' : u'\ufe48', u'\ufe59' : u'\ufe5a', - u'\ufe5b' : u'\ufe5c', u'\ufe5d' : u'\ufe5e', u'\uff08' : u'\uff09', u'\uff1c' : u'\uff1e', - u'\uff3b' : u'\uff3d', u'\uff5b' : u'\uff5d', u'\uff5f' : u'\uff60', u'\uff62' : u'\uff63', - } - - def _build_word_match(words, boundary_regex_fragment = None, prefix = '', suffix = ''): - if boundary_regex_fragment is None: - return r'\b(' + prefix + r'|'.join([ re.escape(x) for x in words]) + suffix + r')\b' - else: - return r'(? 0: - next_open_pos = text.find(opening_chars, search_pos + n_chars) - next_close_pos = text.find(closing_chars, search_pos + n_chars) - - if next_close_pos == -1: - next_close_pos = len(text) - nesting_level = 0 - elif next_open_pos != -1 and next_open_pos < next_close_pos: - nesting_level += 1 - search_pos = next_open_pos - else: # next_close_pos < next_open_pos - nesting_level -= 1 - search_pos = next_close_pos - - end_pos = next_close_pos - - if adverbs is not None and re.search(r':to\b', adverbs): - heredoc_terminator = text[match.start('delimiter') + n_chars : end_pos] - end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + r'\s*$', text[ match.end('delimiter') : ], re.MULTILINE) - - if end_heredoc: - end_pos = match.end('delimiter') + end_heredoc.end() - else: - end_pos = len(text) - - yield match.start(), token_class, text[match.start() : end_pos + n_chars] - context.pos = end_pos + n_chars - - return callback - - def opening_brace_callback(lexer, match, context): - stack = context.stack - - yield match.start(), Text, context.text[match.start() : match.end()] - context.pos = match.end() - - # if we encounter an opening brace and we're one level - # below a token state, it means we need to increment - # the nesting level for braces so we know later when - # we should return to the token rules. - if len(stack) > 2 and stack[-2] == 'token': - context.perl6_token_nesting_level += 1 - - def closing_brace_callback(lexer, match, context): - stack = context.stack - - yield match.start(), Text, context.text[match.start() : match.end()] - context.pos = match.end() - - # if we encounter a free closing brace and we're one level - # below a token state, it means we need to check the nesting - # level to see if we need to return to the token state. - if len(stack) > 2 and stack[-2] == 'token': - context.perl6_token_nesting_level -= 1 - if context.perl6_token_nesting_level == 0: - stack.pop() - - def embedded_perl6_callback(lexer, match, context): - context.perl6_token_nesting_level = 1 - yield match.start(), Text, context.text[match.start() : match.end()] - context.pos = match.end() - context.stack.append('root') - - # If you're modifying these rules, be careful if you need to process '{' or '}' characters. - # We have special logic for processing these characters (due to the fact that you can nest - # Perl 6 code in regex blocks), so if you need to process one of them, make sure you also - # process the corresponding one! - tokens = { - 'common' : [ - (r'#[`|=](?P(?P[' + ''.join(PERL6_BRACKETS.keys()) + r'])(?P=first_char)*)', brackets_callback(Comment.Multiline)), - (r'#[^\n]*$', Comment.Singleline), - (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline), - (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline), - (r'^=.*?\n\s*?\n', Comment.Multiline), - (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', bygroups(Keyword, Name), 'token-sym-brackets'), - (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + ')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', bygroups(Keyword, Name), 'pre-token'), - # deal with a special case in the Perl 6 grammar (role q { ... }) - (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)), - (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword), - (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix = '(?::[UD])?'), Name.Builtin), - (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin), - # copied from PerlLexer - (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable), - (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), - (r'::\?\w+', Name.Variable.Global), - (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), - (r'\$(?:<.*?>)+', Name.Variable), - (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P:[\w\s:]+)?\s*(?P(?P[^0-9a-zA-Z:\s])(?P=first_char)*)', brackets_callback(String)), - # copied from PerlLexer - (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), - (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), - (r'0b[01]+(_[01]+)*', Number.Bin), - (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', Number.Float), - (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), - (r'\d+(_\d+)*', Number.Integer), - (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex), - (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex), - (r'm\w+(?=\()', Name), - (r'(?:m|ms|rx)\s*(?P:[\w\s:]+)?\s*(?P(?P[^0-9a-zA-Z:\s])(?P=first_char)*)', brackets_callback(String.Regex)), - (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', String.Regex), - (r'<[^\s=].*?\S>', String), - (_build_word_match(PERL6_OPERATORS), Operator), - (r'[0-9a-zA-Z_]' + PERL6_IDENTIFIER_RANGE + '*', Name), - (r"'(\\\\|\\[^\\]|[^'\\])*'", String), - (r'"(\\\\|\\[^\\]|[^"\\])*"', String), - ], - 'root' : [ - include('common'), - (r'\{', opening_brace_callback), - (r'\}', closing_brace_callback), - (r'.+?', Text), - ], - 'pre-token' : [ - include('common'), - (r'\{', Text, ('#pop', 'token')), - (r'.+?', Text), - ], - 'token-sym-brackets' : [ - (r'(?P(?P[' + ''.join(PERL6_BRACKETS.keys()) + '])(?P=first_char)*)', brackets_callback(Name), ('#pop', 'pre-token')), - (r'', Name, ('#pop', 'pre-token')), - ], - 'token': [ - (r'}', Text, '#pop'), - (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)), - # make sure that quotes in character classes aren't treated as strings - (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex), - # make sure that '#' characters in quotes aren't treated as comments - (r"(?/-]', Operator), - (r'[()\[\],.]', Punctuation), - (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), - (r'(auto|break|case|const|continue|default|do|else|enum|extern|' - r'for|goto|if|register|restricted|return|sizeof|static|struct|' - r'switch|typedef|union|volatile|while)\b', Keyword), - (r'(bool|int|long|float|short|double|char|unsigned|signed|void|' - r'[a-z_][a-z0-9_]*_t)\b', - Keyword.Type), - (r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved), - # Vector intrinsics - (r'(__(m128i|m128d|m128|m64))\b', Keyword.Reserved), - # Microsoft-isms - (r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|' - r'declspec|finally|int64|try|leave|wchar_t|w64|unaligned|' - r'raise|noop|identifier|forceinline|assume)\b', Keyword.Reserved), - (r'(true|false|NULL)\b', Name.Builtin), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'root': [ - include('whitespace'), - # functions - (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*\([^;]*?\))' # signature - r'(' + _ws + r')?({)', - bygroups(using(this), Name.Function, using(this), using(this), - Punctuation), - 'function'), - # function declarations - (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*\([^;]*?\))' # signature - r'(' + _ws + r')?(;)', - bygroups(using(this), Name.Function, using(this), using(this), - Punctuation)), - ('', Text, 'statement'), - ], - 'statement' : [ - include('whitespace'), - include('statements'), - ('[{}]', Punctuation), - (';', Punctuation, '#pop'), - ], - 'function': [ - include('whitespace'), - include('statements'), - (';', Punctuation), - ('{', Punctuation, '#push'), - ('}', Punctuation, '#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' - r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'if0': [ - (r'^\s*#if.*?(?)', Text, '#pop'), - ], - } - - def analyse_text(text): - return 0.1 - - -class SwigLexer(CppLexer): - """ - For `SWIG `_ source code. - - *New in Pygments 1.7.* - """ - name = 'SWIG' - aliases = ['Swig', 'swig'] - filenames = ['*.swg', '*.i'] - mimetypes = ['text/swig'] - priority = 0.04 # Lower than C/C++ and Objective C/C++ - - tokens = { - 'statements': [ - (r'(%[a-z_][a-z0-9_]*)', Name.Function), # SWIG directives - ('\$\**\&?[a-zA-Z0-9_]+', Name), # Special variables - (r'##*[a-zA-Z_][a-zA-Z0-9_]*', Comment.Preproc), # Stringification / additional preprocessor directives - inherit, - ], - } - - # This is a far from complete set of SWIG directives - swig_directives = ( - # Most common directives - '%apply', '%define', '%director', '%enddef', '%exception', '%extend', - '%feature', '%fragment', '%ignore', '%immutable', '%import', '%include', - '%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma', - '%rename', '%shared_ptr', '%template', '%typecheck', '%typemap', - # Less common directives - '%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear', - '%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum', - '%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor', - '%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor', - '%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments', - '%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv', - '%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception', - '%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar', - '%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend', - '%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall', - '%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof', - '%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', '%warnfilter') - - def analyse_text(text): - rv = 0.1 # Same as C/C++ - # Search for SWIG directives, which are conventionally at the beginning of - # a line. The probability of them being within a line is low, so let another - # lexer win in this case. - matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M) - for m in matches: - if m in SwigLexer.swig_directives: - rv = 0.98 - break - else: - rv = 0.91 # Fraction higher than MatlabLexer - return rv - - -class ECLexer(CLexer): - """ - For eC source code with preprocessor directives. - - *New in Pygments 1.5.* - """ - name = 'eC' - aliases = ['ec'] - filenames = ['*.ec', '*.eh'] - mimetypes = ['text/x-echdr', 'text/x-ecsrc'] - - tokens = { - 'statements': [ - (r'(virtual|class|private|public|property|import|delete|new|new0|' - r'renew|renew0|define|get|set|remote|dllexport|dllimport|stdcall|' - r'subclass|__on_register_module|namespace|using|typed_object|' - r'any_object|incref|register|watch|stopwatching|firewatchers|' - r'watchable|class_designer|class_fixed|class_no_expansion|isset|' - r'class_default_property|property_category|class_data|' - r'class_property|virtual|thisclass|' - r'dbtable|dbindex|database_open|dbfield)\b', Keyword), - (r'(uint|uint16|uint32|uint64|bool|byte|unichar|int64)\b', - Keyword.Type), - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(null|value|this)\b', Name.Builtin), - inherit, - ], - 'classname': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'), - # template specification - (r'\s*(?=>)', Text, '#pop'), - ], - } - - -class NesCLexer(CLexer): - """ - For `nesC `_ source code with preprocessor - directives. - - *New in Pygments 1.7.* - """ - name = 'nesC' - aliases = ['nesc'] - filenames = ['*.nc'] - mimetypes = ['text/x-nescsrc'] - - tokens = { - 'statements': [ - (r'(abstract|as|async|atomic|call|command|component|components|' - r'configuration|event|extends|generic|implementation|includes|' - r'interface|module|new|norace|post|provides|signal|task|uses)\b', - Keyword), - (r'(nx_struct|nx_union|nx_int8_t|nx_int16_t|nx_int32_t|nx_int64_t|' - r'nx_uint8_t|nx_uint16_t|nx_uint32_t|nx_uint64_t)\b', - Keyword.Type), - inherit, - ], - } - - -class ClayLexer(RegexLexer): - """ - For `Clay `_ source. - - *New in Pygments 1.7.* - """ - name = 'Clay' - filenames = ['*.clay'] - aliases = ['clay'] - mimetypes = ['text/x-clay'] - tokens = { - 'root': [ - (r'\s', Text), - (r'//.*?$', Comment.Singleline), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'\b(public|private|import|as|record|variant|instance' - r'|define|overload|default|external|alias' - r'|rvalue|ref|forward|inline|noinline|forceinline' - r'|enum|var|and|or|not|if|else|goto|return|while' - r'|switch|case|break|continue|for|in|true|false|try|catch|throw' - r'|finally|onerror|staticassert|eval|when|newtype' - r'|__FILE__|__LINE__|__COLUMN__|__ARG__' - r')\b', Keyword), - (r'[~!%^&*+=|:<>/-]', Operator), - (r'[#(){}\[\],;.]', Punctuation), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'\d+[LlUu]*', Number.Integer), - (r'\b(true|false)\b', Name.Builtin), - (r'(?i)[a-z_?][a-z_?0-9]*', Name), - (r'"""', String, 'tdqs'), - (r'"', String, 'dqs'), - ], - 'strings': [ - (r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape), - (r'.', String), - ], - 'nl': [ - (r'\n', String), - ], - 'dqs': [ - (r'"', String, '#pop'), - include('strings'), - ], - 'tdqs': [ - (r'"""', String, '#pop'), - include('strings'), - include('nl'), - ], - } - - -class DLexer(RegexLexer): - """ - For D source. - - *New in Pygments 1.2.* - """ - name = 'D' - filenames = ['*.d', '*.di'] - aliases = ['d'] - mimetypes = ['text/x-dsrc'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text), - #(r'\\\n', Text), # line continuations - # Comments - (r'//(.*?)\n', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'/\+', Comment.Multiline, 'nested_comment'), - # Keywords - (r'(abstract|alias|align|asm|assert|auto|body|break|case|cast' - r'|catch|class|const|continue|debug|default|delegate|delete' - r'|deprecated|do|else|enum|export|extern|finally|final' - r'|foreach_reverse|foreach|for|function|goto|if|import|inout' - r'|interface|invariant|in|is|lazy|mixin|module|new|nothrow|out' - r'|override|package|pragma|private|protected|public|pure|ref|return' - r'|scope|static|struct|super|switch|synchronized|template|this' - r'|throw|try|typedef|typeid|typeof|union|unittest|version|volatile' - r'|while|with|__traits)\b', Keyword - ), - (r'(bool|byte|cdouble|cent|cfloat|char|creal|dchar|double|float' - r'|idouble|ifloat|int|ireal|long|real|short|ubyte|ucent|uint|ulong' - r'|ushort|void|wchar)\b', Keyword.Type - ), - (r'(false|true|null)\b', Keyword.Constant), - (r'macro\b', Keyword.Reserved), - (r'(string|wstring|dstring)\b', Name.Builtin), - # FloatLiteral - # -- HexFloat - (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' - r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float), - # -- DecimalFloat - (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' - r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float), - (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float), - # IntegerLiteral - # -- Binary - (r'0[Bb][01_]+', Number), - # -- Octal - (r'0[0-7_]+', Number.Oct), - # -- Hexadecimal - (r'0[xX][0-9a-fA-F_]+', Number.Hex), - # -- Decimal - (r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer), - # CharacterLiteral - (r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""", - String.Char - ), - # StringLiteral - # -- WysiwygString - (r'r"[^"]*"[cwd]?', String), - # -- AlternateWysiwygString - (r'`[^`]*`[cwd]?', String), - # -- DoubleQuotedString - (r'"(\\\\|\\"|[^"])*"[cwd]?', String), - # -- EscapeSequence - (r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}" - r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)", - String - ), - # -- HexString - (r'x"[0-9a-fA-F_\s]*"[cwd]?', String), - # -- DelimitedString - (r'q"\[', String, 'delimited_bracket'), - (r'q"\(', String, 'delimited_parenthesis'), - (r'q"<', String, 'delimited_angle'), - (r'q"{', String, 'delimited_curly'), - (r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String), - (r'q"(.).*?\1"', String), - # -- TokenString - (r'q{', String, 'token_string'), - # Tokens - (r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>=' - r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)' - r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation - ), - # Identifier - (r'[a-zA-Z_]\w*', Name), - ], - 'nested_comment': [ - (r'[^+/]+', Comment.Multiline), - (r'/\+', Comment.Multiline, '#push'), - (r'\+/', Comment.Multiline, '#pop'), - (r'[+/]', Comment.Multiline), - ], - 'token_string': [ - (r'{', Punctuation, 'token_string_nest'), - (r'}', String, '#pop'), - include('root'), - ], - 'token_string_nest': [ - (r'{', Punctuation, '#push'), - (r'}', Punctuation, '#pop'), - include('root'), - ], - 'delimited_bracket': [ - (r'[^\[\]]+', String), - (r'\[', String, 'delimited_inside_bracket'), - (r'\]"', String, '#pop'), - ], - 'delimited_inside_bracket': [ - (r'[^\[\]]+', String), - (r'\[', String, '#push'), - (r'\]', String, '#pop'), - ], - 'delimited_parenthesis': [ - (r'[^\(\)]+', String), - (r'\(', String, 'delimited_inside_parenthesis'), - (r'\)"', String, '#pop'), - ], - 'delimited_inside_parenthesis': [ - (r'[^\(\)]+', String), - (r'\(', String, '#push'), - (r'\)', String, '#pop'), - ], - 'delimited_angle': [ - (r'[^<>]+', String), - (r'<', String, 'delimited_inside_angle'), - (r'>"', String, '#pop'), - ], - 'delimited_inside_angle': [ - (r'[^<>]+', String), - (r'<', String, '#push'), - (r'>', String, '#pop'), - ], - 'delimited_curly': [ - (r'[^{}]+', String), - (r'{', String, 'delimited_inside_curly'), - (r'}"', String, '#pop'), - ], - 'delimited_inside_curly': [ - (r'[^{}]+', String), - (r'{', String, '#push'), - (r'}', String, '#pop'), - ], - } - - -class DelphiLexer(Lexer): - """ - For `Delphi `_ (Borland Object Pascal), - Turbo Pascal and Free Pascal source code. - - Additional options accepted: - - `turbopascal` - Highlight Turbo Pascal specific keywords (default: ``True``). - `delphi` - Highlight Borland Delphi specific keywords (default: ``True``). - `freepascal` - Highlight Free Pascal specific keywords (default: ``True``). - `units` - A list of units that should be considered builtin, supported are - ``System``, ``SysUtils``, ``Classes`` and ``Math``. - Default is to consider all of them builtin. - """ - name = 'Delphi' - aliases = ['delphi', 'pas', 'pascal', 'objectpascal'] - filenames = ['*.pas'] - mimetypes = ['text/x-pascal'] - - TURBO_PASCAL_KEYWORDS = [ - 'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case', - 'const', 'constructor', 'continue', 'destructor', 'div', 'do', - 'downto', 'else', 'end', 'file', 'for', 'function', 'goto', - 'if', 'implementation', 'in', 'inherited', 'inline', 'interface', - 'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator', - 'or', 'packed', 'procedure', 'program', 'record', 'reintroduce', - 'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to', - 'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor' - ] - - DELPHI_KEYWORDS = [ - 'as', 'class', 'except', 'exports', 'finalization', 'finally', - 'initialization', 'is', 'library', 'on', 'property', 'raise', - 'threadvar', 'try' - ] - - FREE_PASCAL_KEYWORDS = [ - 'dispose', 'exit', 'false', 'new', 'true' - ] - - BLOCK_KEYWORDS = set([ - 'begin', 'class', 'const', 'constructor', 'destructor', 'end', - 'finalization', 'function', 'implementation', 'initialization', - 'label', 'library', 'operator', 'procedure', 'program', 'property', - 'record', 'threadvar', 'type', 'unit', 'uses', 'var' - ]) - - FUNCTION_MODIFIERS = set([ - 'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe', - 'pascal', 'register', 'safecall', 'softfloat', 'stdcall', - 'varargs', 'name', 'dynamic', 'near', 'virtual', 'external', - 'override', 'assembler' - ]) - - # XXX: those aren't global. but currently we know no way for defining - # them just for the type context. - DIRECTIVES = set([ - 'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far', - 'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected', - 'published', 'public' - ]) - - BUILTIN_TYPES = set([ - 'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool', - 'cardinal', 'char', 'comp', 'currency', 'double', 'dword', - 'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint', - 'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean', - 'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency', - 'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle', - 'pint64', 'pinteger', 'plongint', 'plongword', 'pointer', - 'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint', - 'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword', - 'pwordarray', 'pwordbool', 'real', 'real48', 'shortint', - 'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate', - 'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant', - 'widechar', 'widestring', 'word', 'wordbool' - ]) - - BUILTIN_UNITS = { - 'System': [ - 'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8', - 'append', 'arctan', 'assert', 'assigned', 'assignfile', - 'beginthread', 'blockread', 'blockwrite', 'break', 'chdir', - 'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble', - 'concat', 'continue', 'copy', 'cos', 'dec', 'delete', - 'dispose', 'doubletocomp', 'endthread', 'enummodules', - 'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr', - 'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize', - 'fillchar', 'finalize', 'findclasshinstance', 'findhinstance', - 'findresourcehinstance', 'flush', 'frac', 'freemem', - 'get8087cw', 'getdir', 'getlasterror', 'getmem', - 'getmemorymanager', 'getmodulefilename', 'getvariantmanager', - 'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert', - 'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset', - 'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd', - 'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount', - 'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random', - 'randomize', 'read', 'readln', 'reallocmem', - 'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir', - 'round', 'runerror', 'seek', 'seekeof', 'seekeoln', - 'set8087cw', 'setlength', 'setlinebreakstyle', - 'setmemorymanager', 'setstring', 'settextbuf', - 'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt', - 'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar', - 'succ', 'swap', 'trunc', 'truncate', 'typeinfo', - 'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring', - 'upcase', 'utf8decode', 'utf8encode', 'utf8toansi', - 'utf8tounicode', 'val', 'vararrayredim', 'varclear', - 'widecharlentostring', 'widecharlentostrvar', - 'widechartostring', 'widechartostrvar', - 'widestringtoucs4string', 'write', 'writeln' - ], - 'SysUtils': [ - 'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks', - 'allocmem', 'ansicomparefilename', 'ansicomparestr', - 'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr', - 'ansilastchar', 'ansilowercase', 'ansilowercasefilename', - 'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext', - 'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp', - 'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan', - 'ansistrscan', 'ansistrupper', 'ansiuppercase', - 'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep', - 'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype', - 'callterminateprocs', 'changefileext', 'charlength', - 'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr', - 'comparetext', 'createdir', 'createguid', 'currentyear', - 'currtostr', 'currtostrf', 'date', 'datetimetofiledate', - 'datetimetostr', 'datetimetostring', 'datetimetosystemtime', - 'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate', - 'decodedatefully', 'decodetime', 'deletefile', 'directoryexists', - 'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime', - 'exceptionerrormessage', 'excludetrailingbackslash', - 'excludetrailingpathdelimiter', 'expandfilename', - 'expandfilenamecase', 'expanduncfilename', 'extractfiledir', - 'extractfiledrive', 'extractfileext', 'extractfilename', - 'extractfilepath', 'extractrelativepath', 'extractshortpathname', - 'fileage', 'fileclose', 'filecreate', 'filedatetodatetime', - 'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly', - 'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr', - 'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage', - 'findclose', 'findcmdlineswitch', 'findfirst', 'findnext', - 'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr', - 'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr', - 'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr', - 'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir', - 'getenvironmentvariable', 'getfileversion', 'getformatsettings', - 'getlocaleformatsettings', 'getmodulename', 'getpackagedescription', - 'getpackageinfo', 'gettime', 'guidtostring', 'incamonth', - 'includetrailingbackslash', 'includetrailingpathdelimiter', - 'incmonth', 'initializepackage', 'interlockeddecrement', - 'interlockedexchange', 'interlockedexchangeadd', - 'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter', - 'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident', - 'languages', 'lastdelimiter', 'loadpackage', 'loadstr', - 'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now', - 'outofmemoryerror', 'quotedstr', 'raiselastoserror', - 'raiselastwin32error', 'removedir', 'renamefile', 'replacedate', - 'replacetime', 'safeloadlibrary', 'samefilename', 'sametext', - 'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize', - 'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy', - 'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp', - 'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy', - 'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew', - 'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos', - 'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr', - 'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime', - 'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint', - 'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime', - 'strtotimedef', 'strupper', 'supports', 'syserrormessage', - 'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime', - 'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright', - 'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime', - 'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime', - 'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime', - 'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext', - 'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase', - 'widesamestr', 'widesametext', 'wideuppercase', 'win32check', - 'wraptext' - ], - 'Classes': [ - 'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize', - 'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect', - 'extractstrings', 'findclass', 'findglobalcomponent', 'getclass', - 'groupdescendantswith', 'hextobin', 'identtoint', - 'initinheritedcomponent', 'inttoident', 'invalidpoint', - 'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext', - 'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource', - 'pointsequal', 'readcomponentres', 'readcomponentresex', - 'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias', - 'registerclasses', 'registercomponents', 'registerintegerconsts', - 'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup', - 'teststreamformat', 'unregisterclass', 'unregisterclasses', - 'unregisterintegerconsts', 'unregistermoduleclasses', - 'writecomponentresfile' - ], - 'Math': [ - 'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec', - 'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil', - 'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc', - 'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle', - 'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance', - 'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask', - 'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg', - 'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate', - 'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero', - 'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue', - 'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue', - 'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods', - 'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance', - 'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd', - 'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant', - 'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode', - 'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev', - 'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation', - 'tan', 'tanh', 'totalvariance', 'variance' - ] - } - - ASM_REGISTERS = set([ - 'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0', - 'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0', - 'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx', - 'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp', - 'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6', - 'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5', - 'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', - 'xmm6', 'xmm7' - ]) - - ASM_INSTRUCTIONS = set([ - 'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound', - 'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw', - 'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae', - 'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg', - 'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb', - 'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl', - 'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo', - 'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb', - 'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid', - 'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt', - 'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd', - 'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd', - 'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe', - 'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle', - 'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge', - 'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe', - 'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave', - 'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw', - 'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw', - 'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr', - 'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx', - 'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd', - 'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw', - 'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw', - 'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe', - 'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror', - 'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb', - 'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe', - 'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle', - 'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng', - 'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz', - 'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl', - 'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold', - 'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str', - 'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit', - 'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait', - 'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat', - 'xlatb', 'xor' - ]) - - def __init__(self, **options): - Lexer.__init__(self, **options) - self.keywords = set() - if get_bool_opt(options, 'turbopascal', True): - self.keywords.update(self.TURBO_PASCAL_KEYWORDS) - if get_bool_opt(options, 'delphi', True): - self.keywords.update(self.DELPHI_KEYWORDS) - if get_bool_opt(options, 'freepascal', True): - self.keywords.update(self.FREE_PASCAL_KEYWORDS) - self.builtins = set() - for unit in get_list_opt(options, 'units', self.BUILTIN_UNITS.keys()): - self.builtins.update(self.BUILTIN_UNITS[unit]) - - def get_tokens_unprocessed(self, text): - scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE) - stack = ['initial'] - in_function_block = False - in_property_block = False - was_dot = False - next_token_is_function = False - next_token_is_property = False - collect_labels = False - block_labels = set() - brace_balance = [0, 0] - - while not scanner.eos: - token = Error - - if stack[-1] == 'initial': - if scanner.scan(r'\s+'): - token = Text - elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): - if scanner.match.startswith('$'): - token = Comment.Preproc - else: - token = Comment.Multiline - elif scanner.scan(r'//.*?$'): - token = Comment.Single - elif scanner.scan(r'[-+*\/=<>:;,.@\^]'): - token = Operator - # stop label highlighting on next ";" - if collect_labels and scanner.match == ';': - collect_labels = False - elif scanner.scan(r'[\(\)\[\]]+'): - token = Punctuation - # abort function naming ``foo = Function(...)`` - next_token_is_function = False - # if we are in a function block we count the open - # braces because ootherwise it's impossible to - # determine the end of the modifier context - if in_function_block or in_property_block: - if scanner.match == '(': - brace_balance[0] += 1 - elif scanner.match == ')': - brace_balance[0] -= 1 - elif scanner.match == '[': - brace_balance[1] += 1 - elif scanner.match == ']': - brace_balance[1] -= 1 - elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): - lowercase_name = scanner.match.lower() - if lowercase_name == 'result': - token = Name.Builtin.Pseudo - elif lowercase_name in self.keywords: - token = Keyword - # if we are in a special block and a - # block ending keyword occours (and the parenthesis - # is balanced) we end the current block context - if (in_function_block or in_property_block) and \ - lowercase_name in self.BLOCK_KEYWORDS and \ - brace_balance[0] <= 0 and \ - brace_balance[1] <= 0: - in_function_block = False - in_property_block = False - brace_balance = [0, 0] - block_labels = set() - if lowercase_name in ('label', 'goto'): - collect_labels = True - elif lowercase_name == 'asm': - stack.append('asm') - elif lowercase_name == 'property': - in_property_block = True - next_token_is_property = True - elif lowercase_name in ('procedure', 'operator', - 'function', 'constructor', - 'destructor'): - in_function_block = True - next_token_is_function = True - # we are in a function block and the current name - # is in the set of registered modifiers. highlight - # it as pseudo keyword - elif in_function_block and \ - lowercase_name in self.FUNCTION_MODIFIERS: - token = Keyword.Pseudo - # if we are in a property highlight some more - # modifiers - elif in_property_block and \ - lowercase_name in ('read', 'write'): - token = Keyword.Pseudo - next_token_is_function = True - # if the last iteration set next_token_is_function - # to true we now want this name highlighted as - # function. so do that and reset the state - elif next_token_is_function: - # Look if the next token is a dot. If yes it's - # not a function, but a class name and the - # part after the dot a function name - if scanner.test(r'\s*\.\s*'): - token = Name.Class - # it's not a dot, our job is done - else: - token = Name.Function - next_token_is_function = False - # same for properties - elif next_token_is_property: - token = Name.Property - next_token_is_property = False - # Highlight this token as label and add it - # to the list of known labels - elif collect_labels: - token = Name.Label - block_labels.add(scanner.match.lower()) - # name is in list of known labels - elif lowercase_name in block_labels: - token = Name.Label - elif lowercase_name in self.BUILTIN_TYPES: - token = Keyword.Type - elif lowercase_name in self.DIRECTIVES: - token = Keyword.Pseudo - # builtins are just builtins if the token - # before isn't a dot - elif not was_dot and lowercase_name in self.builtins: - token = Name.Builtin - else: - token = Name - elif scanner.scan(r"'"): - token = String - stack.append('string') - elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'): - token = String.Char - elif scanner.scan(r'\$[0-9A-Fa-f]+'): - token = Number.Hex - elif scanner.scan(r'\d+(?![eE]|\.[^.])'): - token = Number.Integer - elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): - token = Number.Float - else: - # if the stack depth is deeper than once, pop - if len(stack) > 1: - stack.pop() - scanner.get_char() - - elif stack[-1] == 'string': - if scanner.scan(r"''"): - token = String.Escape - elif scanner.scan(r"'"): - token = String - stack.pop() - elif scanner.scan(r"[^']*"): - token = String - else: - scanner.get_char() - stack.pop() - - elif stack[-1] == 'asm': - if scanner.scan(r'\s+'): - token = Text - elif scanner.scan(r'end'): - token = Keyword - stack.pop() - elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): - if scanner.match.startswith('$'): - token = Comment.Preproc - else: - token = Comment.Multiline - elif scanner.scan(r'//.*?$'): - token = Comment.Single - elif scanner.scan(r"'"): - token = String - stack.append('string') - elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'): - token = Name.Label - elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): - lowercase_name = scanner.match.lower() - if lowercase_name in self.ASM_INSTRUCTIONS: - token = Keyword - elif lowercase_name in self.ASM_REGISTERS: - token = Name.Builtin - else: - token = Name - elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'): - token = Operator - elif scanner.scan(r'[\(\)\[\]]+'): - token = Punctuation - elif scanner.scan(r'\$[0-9A-Fa-f]+'): - token = Number.Hex - elif scanner.scan(r'\d+(?![eE]|\.[^.])'): - token = Number.Integer - elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): - token = Number.Float - else: - scanner.get_char() - stack.pop() - - # save the dot!!!11 - if scanner.match.strip(): - was_dot = scanner.match == '.' - yield scanner.start_pos, token, scanner.match or '' - - -class DylanLexer(RegexLexer): - """ - For the `Dylan `_ language. - - *New in Pygments 0.7.* - """ - - name = 'Dylan' - aliases = ['dylan'] - filenames = ['*.dylan', '*.dyl', '*.intr'] - mimetypes = ['text/x-dylan'] - - flags = re.IGNORECASE - - builtins = set([ - 'subclass', 'abstract', 'block', 'concrete', 'constant', 'class', - 'compiler-open', 'compiler-sideways', 'domain', 'dynamic', - 'each-subclass', 'exception', 'exclude', 'function', 'generic', - 'handler', 'inherited', 'inline', 'inline-only', 'instance', - 'interface', 'import', 'keyword', 'library', 'macro', 'method', - 'module', 'open', 'primary', 'required', 'sealed', 'sideways', - 'singleton', 'slot', 'thread', 'variable', 'virtual']) - - keywords = set([ - 'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup', - 'create', 'define', 'else', 'elseif', 'end', 'export', 'finally', - 'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename', - 'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when', - 'while']) - - operators = set([ - '~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=', - '>', '>=', '&', '|']) - - functions = set([ - 'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!', - 'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply', - 'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!', - 'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol', - 'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose', - 'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as', - 'condition-format-arguments', 'condition-format-string', 'conjoin', - 'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions', - 'direct-subclasses', 'direct-superclasses', 'disjoin', 'do', - 'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?', - 'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first', - 'first-setter', 'floor', 'floor/', 'forward-iteration-protocol', - 'function-arguments', 'function-return-values', - 'function-specializers', 'gcd', 'generic-function-mandatory-keywords', - 'generic-function-methods', 'head', 'head-setter', 'identity', - 'initialize', 'instance?', 'integral?', 'intersection', - 'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited', - 'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make', - 'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes', - 'min', 'modulo', 'negative', 'negative?', 'next-method', - 'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop', - 'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank', - 'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!', - 'remove-duplicates', 'remove-duplicates!', 'remove-key!', - 'remove-method', 'replace-elements!', 'replace-subsequence!', - 'restart-query', 'return-allowed?', 'return-description', - 'return-query', 'reverse', 'reverse!', 'round', 'round/', - 'row-major-index', 'second', 'second-setter', 'shallow-copy', - 'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?', - 'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position', - 'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third', - 'third-setter', 'truncate', 'truncate/', 'type-error-expected-type', - 'type-error-value', 'type-for-copy', 'type-union', 'union', 'values', - 'vector', 'zero?']) - - valid_name = '\\\\?[a-zA-Z0-9' + re.escape('!&*<>|^$%@_-+~?/=') + ']+' - - def get_tokens_unprocessed(self, text): - for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): - if token is Name: - lowercase_value = value.lower() - if lowercase_value in self.builtins: - yield index, Name.Builtin, value - continue - if lowercase_value in self.keywords: - yield index, Keyword, value - continue - if lowercase_value in self.functions: - yield index, Name.Builtin, value - continue - if lowercase_value in self.operators: - yield index, Operator, value - continue - yield index, token, value - - tokens = { - 'root': [ - # Whitespace - (r'\s+', Text), - - # single line comment - (r'//.*?\n', Comment.Single), - - # lid header - (r'([A-Za-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)', - bygroups(Name.Attribute, Operator, Text, String)), - - ('', Text, 'code') # no header match, switch to code - ], - 'code': [ - # Whitespace - (r'\s+', Text), - - # single line comment - (r'//.*?\n', Comment.Single), - - # multi-line comment - (r'/\*', Comment.Multiline, 'comment'), - - # strings and characters - (r'"', String, 'string'), - (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - - # binary integer - (r'#[bB][01]+', Number), - - # octal integer - (r'#[oO][0-7]+', Number.Oct), - - # floating point - (r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float), - - # decimal integer - (r'[-+]?\d+', Number.Integer), - - # hex integer - (r'#[xX][0-9a-fA-F]+', Number.Hex), - - # Macro parameters - (r'(\?' + valid_name + ')(:)' - r'(token|name|variable|expression|body|case-body|\*)', - bygroups(Name.Tag, Operator, Name.Builtin)), - (r'(\?)(:)(token|name|variable|expression|body|case-body|\*)', - bygroups(Name.Tag, Operator, Name.Builtin)), - (r'\?' + valid_name, Name.Tag), - - # Punctuation - (r'(=>|::|#\(|#\[|##|\?|\?\?|\?=|[(){}\[\],\.;])', Punctuation), - - # Most operators are picked up as names and then re-flagged. - # This one isn't valid in a name though, so we pick it up now. - (r':=', Operator), - - # Pick up #t / #f before we match other stuff with #. - (r'#[tf]', Literal), - - # #"foo" style keywords - (r'#"', String.Symbol, 'keyword'), - - # #rest, #key, #all-keys, etc. - (r'#[a-zA-Z0-9-]+', Keyword), - - # required-init-keyword: style keywords. - (valid_name + ':', Keyword), - - # class names - (r'<' + valid_name + '>', Name.Class), - - # define variable forms. - (r'\*' + valid_name + '\*', Name.Variable.Global), - - # define constant forms. - (r'\$' + valid_name, Name.Constant), - - # everything else. We re-flag some of these in the method above. - (valid_name, Name), - ], - 'comment': [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ], - 'keyword': [ - (r'"', String.Symbol, '#pop'), - (r'[^\\"]+', String.Symbol), # all other characters - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ] - } - - -class DylanLidLexer(RegexLexer): - """ - For Dylan LID (Library Interchange Definition) files. - - *New in Pygments 1.6.* - """ - - name = 'DylanLID' - aliases = ['dylan-lid', 'lid'] - filenames = ['*.lid', '*.hdp'] - mimetypes = ['text/x-dylan-lid'] - - flags = re.IGNORECASE - - tokens = { - 'root': [ - # Whitespace - (r'\s+', Text), - - # single line comment - (r'//.*?\n', Comment.Single), - - # lid header - (r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)', - bygroups(Name.Attribute, Operator, Text, String)), - ] - } - - -class DylanConsoleLexer(Lexer): - """ - For Dylan interactive console output like: - - .. sourcecode:: dylan-console - - ? let a = 1; - => 1 - ? a - => 1 - - This is based on a copy of the RubyConsoleLexer. - - *New in Pygments 1.6.* - """ - name = 'Dylan session' - aliases = ['dylan-console', 'dylan-repl'] - filenames = ['*.dylan-console'] - mimetypes = ['text/x-dylan-console'] - - _line_re = re.compile('.*?\n') - _prompt_re = re.compile('\?| ') - - def get_tokens_unprocessed(self, text): - dylexer = DylanLexer(**self.options) - - curcode = '' - insertions = [] - for match in self._line_re.finditer(text): - line = match.group() - m = self._prompt_re.match(line) - if m is not None: - end = m.end() - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:end])])) - curcode += line[end:] - else: - if curcode: - for item in do_insertions(insertions, - dylexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - dylexer.get_tokens_unprocessed(curcode)): - yield item - - -def objective(baselexer): - """ - Generate a subclass of baselexer that accepts the Objective-C syntax - extensions. - """ - - # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, - # since that's quite common in ordinary C/C++ files. It's OK to match - # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. - # - # The upshot of this is that we CANNOT match @class or @interface - _oc_keywords = re.compile(r'@(?:end|implementation|protocol)') - - # Matches [ ? identifier ( identifier ? ] | identifier? : ) - # (note the identifier is *optional* when there is a ':'!) - _oc_message = re.compile(r'\[\s*[a-zA-Z_][a-zA-Z0-9_]*\s+' - r'(?:[a-zA-Z_][a-zA-Z0-9_]*\s*\]|' - r'(?:[a-zA-Z_][a-zA-Z0-9_]*)?:)') - - class GeneratedObjectiveCVariant(baselexer): - """ - Implements Objective-C syntax on top of an existing C family lexer. - """ - - tokens = { - 'statements': [ - (r'@"', String, 'string'), - (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", - String.Char), - (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex), - (r'@0[0-7]+[Ll]?', Number.Oct), - (r'@\d+[Ll]?', Number.Integer), - (r'(in|@selector|@private|@protected|@public|@encode|' - r'@synchronized|@try|@throw|@catch|@finally|@end|@property|' - r'@synthesize|@dynamic|@optional)\b', Keyword), - (r'(id|Class|IMP|SEL|BOOL|IBOutlet|IBAction|unichar)\b', - Keyword.Type), - (r'@(true|false|YES|NO)\n', Name.Builtin), - (r'(YES|NO|nil)\b', Name.Builtin), - (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), - ('#pop', 'oc_classname')), - (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text), - ('#pop', 'oc_forward_classname')), - # @ can also prefix other expressions like @{...} or @(...) - (r'@', Punctuation), - inherit, - ], - 'oc_classname' : [ - # interface definition that inherits - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?', - bygroups(Name.Class, Text, Name.Class), '#pop'), - # interface definition for a category - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))', - bygroups(Name.Class, Text, Name.Label), '#pop'), - # simple interface / implementation - ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop') - ], - 'oc_forward_classname' : [ - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)', - bygroups(Name.Class, Text), 'oc_forward_classname'), - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)', - bygroups(Name.Class, Text), '#pop') - ], - 'root': [ - # methods - (r'^([-+])(\s*)' # method marker - r'(\(.*?\))?(\s*)' # return type - r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name - bygroups(Keyword, Text, using(this), - Text, Name.Function), - 'method'), - inherit, - ], - 'method': [ - include('whitespace'), - # TODO unsure if ellipses are allowed elsewhere, see - # discussion in Issue 789 - (r',', Punctuation), - (r'\.\.\.', Punctuation), - (r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this), - Name.Variable)), - (r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function), - (';', Punctuation, '#pop'), - ('{', Punctuation, 'function'), - ('', Text, '#pop'), - ], - } - - def analyse_text(text): - if _oc_keywords.search(text): - return 1.0 - elif '@"' in text: # strings - return 0.8 - elif _oc_message.search(text): - return 0.8 - return 0 - - return GeneratedObjectiveCVariant - - -class ObjectiveCLexer(objective(CLexer)): - """ - For Objective-C source code with preprocessor directives. - """ - - name = 'Objective-C' - aliases = ['objective-c', 'objectivec', 'obj-c', 'objc'] - filenames = ['*.m', '*.h'] - mimetypes = ['text/x-objective-c'] - priority = 0.05 # Lower than C - - -class ObjectiveCppLexer(objective(CppLexer)): - """ - For Objective-C++ source code with preprocessor directives. - """ - - name = 'Objective-C++' - aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++'] - filenames = ['*.mm', '*.hh'] - mimetypes = ['text/x-objective-c++'] - priority = 0.05 # Lower than C++ - - -class FortranLexer(RegexLexer): - """ - Lexer for FORTRAN 90 code. - - *New in Pygments 0.10.* - """ - name = 'Fortran' - aliases = ['fortran'] - filenames = ['*.f', '*.f90', '*.F', '*.F90'] - mimetypes = ['text/x-fortran'] - flags = re.IGNORECASE - - # Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION - # Operators: **, *, +, -, /, <, >, <=, >=, ==, /= - # Logical (?): NOT, AND, OR, EQV, NEQV - - # Builtins: - # http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html - - tokens = { - 'root': [ - (r'!.*\n', Comment), - include('strings'), - include('core'), - (r'[a-z][a-z0-9_]*', Name.Variable), - include('nums'), - (r'[\s]+', Text), - ], - 'core': [ - # Statements - (r'\b(ABSTRACT|ACCEPT|ALLOCATABLE|ALLOCATE|ARRAY|ASSIGN|ASYNCHRONOUS|' - r'BACKSPACE|BIND|BLOCK( DATA)?|BYTE|CALL|CASE|CLASS|CLOSE|COMMON|CONTAINS|' - r'CONTINUE|CYCLE|DATA|DEALLOCATE|DECODE|DEFERRED|DIMENSION|DO|' - r'ELEMENTAL|ELSE|ENCODE|END( FILE)?|ENDIF|ENTRY|ENUMERATOR|EQUIVALENCE|' - r'EXIT|EXTERNAL|EXTRINSIC|FINAL|FORALL|FORMAT|FUNCTION|GENERIC|' - r'GOTO|IF|IMPLICIT|IMPORT|INCLUDE|INQUIRE|INTENT|INTERFACE|' - r'INTRINSIC|MODULE|NAMELIST|NULLIFY|NONE|NON_INTRINSIC|' - r'NON_OVERRIDABLE|NOPASS|OPEN|OPTIONAL|OPTIONS|PARAMETER|PASS|' - r'PAUSE|POINTER|PRINT|PRIVATE|PROGRAM|PROTECTED|PUBLIC|PURE|READ|' - r'RECURSIVE|RESULT|RETURN|REWIND|SAVE|SELECT|SEQUENCE|STOP|SUBROUTINE|' - r'TARGET|THEN|TYPE|USE|VALUE|VOLATILE|WHERE|WRITE|WHILE)\s*\b', - Keyword), - - # Data Types - (r'\b(CHARACTER|COMPLEX|DOUBLE PRECISION|DOUBLE COMPLEX|INTEGER|' - r'LOGICAL|REAL|C_INT|C_SHORT|C_LONG|C_LONG_LONG|C_SIGNED_CHAR|' - r'C_SIZE_T|C_INT8_T|C_INT16_T|C_INT32_T|C_INT64_T|C_INT_LEAST8_T|' - r'C_INT_LEAST16_T|C_INT_LEAST32_T|C_INT_LEAST64_T|C_INT_FAST8_T|' - r'C_INT_FAST16_T|C_INT_FAST32_T|C_INT_FAST64_T|C_INTMAX_T|' - r'C_INTPTR_T|C_FLOAT|C_DOUBLE|C_LONG_DOUBLE|C_FLOAT_COMPLEX|' - r'C_DOUBLE_COMPLEX|C_LONG_DOUBLE_COMPLEX|C_BOOL|C_CHAR|C_PTR|' - r'C_FUNPTR)\s*\b', - Keyword.Type), - - # Operators - (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator), - - (r'(::)', Keyword.Declaration), - - (r'[(),:&%;]', Punctuation), - - # Intrinsics - (r'\b(Abort|Abs|Access|AChar|ACos|AdjustL|AdjustR|AImag|AInt|Alarm|' - r'All|Allocated|ALog|AMax|AMin|AMod|And|ANInt|Any|ASin|Associated|' - r'ATan|BesJ|BesJN|BesY|BesYN|Bit_Size|BTest|CAbs|CCos|Ceiling|' - r'CExp|Char|ChDir|ChMod|CLog|Cmplx|Command_Argument_Count|Complex|' - r'Conjg|Cos|CosH|Count|CPU_Time|CShift|CSin|CSqRt|CTime|C_Funloc|' - r'C_Loc|C_Associated|C_Null_Ptr|C_Null_Funptr|C_F_Pointer|' - r'C_Null_Char|C_Alert|C_Backspace|C_Form_Feed|C_New_Line|' - r'C_Carriage_Return|C_Horizontal_Tab|C_Vertical_Tab|' - r'DAbs|DACos|DASin|DATan|Date_and_Time|DbesJ|' - r'DbesJ|DbesJN|DbesY|DbesY|DbesYN|Dble|DCos|DCosH|DDiM|DErF|DErFC|' - r'DExp|Digits|DiM|DInt|DLog|DLog|DMax|DMin|DMod|DNInt|Dot_Product|' - r'DProd|DSign|DSinH|DSin|DSqRt|DTanH|DTan|DTime|EOShift|Epsilon|' - r'ErF|ErFC|ETime|Exit|Exp|Exponent|Extends_Type_Of|FDate|FGet|' - r'FGetC|Float|Floor|Flush|FNum|FPutC|FPut|Fraction|FSeek|FStat|' - r'FTell|GError|GetArg|Get_Command|Get_Command_Argument|' - r'Get_Environment_Variable|GetCWD|GetEnv|GetGId|GetLog|GetPId|' - r'GetUId|GMTime|HostNm|Huge|IAbs|IAChar|IAnd|IArgC|IBClr|IBits|' - r'IBSet|IChar|IDate|IDiM|IDInt|IDNInt|IEOr|IErrNo|IFix|Imag|' - r'ImagPart|Index|Int|IOr|IRand|IsaTty|IShft|IShftC|ISign|' - r'Iso_C_Binding|Is_Iostat_End|Is_Iostat_Eor|ITime|Kill|Kind|' - r'LBound|Len|Len_Trim|LGe|LGt|Link|LLe|LLt|LnBlnk|Loc|Log|' - r'Logical|Long|LShift|LStat|LTime|MatMul|Max|MaxExponent|MaxLoc|' - r'MaxVal|MClock|Merge|Move_Alloc|Min|MinExponent|MinLoc|MinVal|' - r'Mod|Modulo|MvBits|Nearest|New_Line|NInt|Not|Or|Pack|PError|' - r'Precision|Present|Product|Radix|Rand|Random_Number|Random_Seed|' - r'Range|Real|RealPart|Rename|Repeat|Reshape|RRSpacing|RShift|' - r'Same_Type_As|Scale|Scan|Second|Selected_Int_Kind|' - r'Selected_Real_Kind|Set_Exponent|Shape|Short|Sign|Signal|SinH|' - r'Sin|Sleep|Sngl|Spacing|Spread|SqRt|SRand|Stat|Sum|SymLnk|' - r'System|System_Clock|Tan|TanH|Time|Tiny|Transfer|Transpose|Trim|' - r'TtyNam|UBound|UMask|Unlink|Unpack|Verify|XOr|ZAbs|ZCos|ZExp|' - r'ZLog|ZSin|ZSqRt)\s*\b', - Name.Builtin), - - # Booleans - (r'\.(true|false)\.', Name.Builtin), - # Comparing Operators - (r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word), - ], - - 'strings': [ - (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), - (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), - ], - - 'nums': [ - (r'\d+(?![.Ee])', Number.Integer), - (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float), - (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float), - ], - } - - -class GLShaderLexer(RegexLexer): - """ - GLSL (OpenGL Shader) lexer. - - *New in Pygments 1.1.* - """ - name = 'GLSL' - aliases = ['glsl'] - filenames = ['*.vert', '*.frag', '*.geo'] - mimetypes = ['text/x-glslsrc'] - - tokens = { - 'root': [ - (r'^#.*', Comment.Preproc), - (r'//.*', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?', - Operator), - (r'[?:]', Operator), # quick hack for ternary - (r'\bdefined\b', Operator), - (r'[;{}(),\[\]]', Punctuation), - #FIXME when e is present, no decimal point needed - (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float), - (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float), - (r'0[xX][0-9a-fA-F]*', Number.Hex), - (r'0[0-7]*', Number.Oct), - (r'[1-9][0-9]*', Number.Integer), - (r'\b(attribute|const|uniform|varying|centroid|break|continue|' - r'do|for|while|if|else|in|out|inout|float|int|void|bool|true|' - r'false|invariant|discard|return|mat[234]|mat[234]x[234]|' - r'vec[234]|[ib]vec[234]|sampler[123]D|samplerCube|' - r'sampler[12]DShadow|struct)\b', Keyword), - (r'\b(asm|class|union|enum|typedef|template|this|packed|goto|' - r'switch|default|inline|noinline|volatile|public|static|extern|' - r'external|interface|long|short|double|half|fixed|unsigned|' - r'lowp|mediump|highp|precision|input|output|hvec[234]|' - r'[df]vec[234]|sampler[23]DRect|sampler2DRectShadow|sizeof|' - r'cast|namespace|using)\b', Keyword), #future use - (r'[a-zA-Z_][a-zA-Z_0-9]*', Name), - (r'\.', Punctuation), - (r'\s+', Text), - ], - } - - -class PrologLexer(RegexLexer): - """ - Lexer for Prolog files. - """ - name = 'Prolog' - aliases = ['prolog'] - filenames = ['*.prolog', '*.pro', '*.pl'] - mimetypes = ['text/x-prolog'] - - flags = re.UNICODE - - tokens = { - 'root': [ - (r'^#.*', Comment.Single), - (r'/\*', Comment.Multiline, 'nested-comment'), - (r'%.*', Comment.Single), - # character literal - (r'0\'.', String.Char), - (r'0b[01]+', Number.Bin), - (r'0o[0-7]+', Number.Oct), - (r'0x[0-9a-fA-F]+', Number.Hex), - # literal with prepended base - (r'\d\d?\'[a-zA-Z0-9]+', Number.Integer), - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+', Number.Integer), - (r'[\[\](){}|.,;!]', Punctuation), - (r':-|-->', Punctuation), - (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' - r'\\[0-7]+\\|\\[\w\W]|[^"])*"', String.Double), - (r"'(?:''|[^'])*'", String.Atom), # quoted atom - # Needs to not be followed by an atom. - #(r'=(?=\s|[a-zA-Z\[])', Operator), - (r'is\b', Operator), - (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])', - Operator), - (r'(mod|div|not)\b', Operator), - (r'_', Keyword), # The don't-care variable - (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)), - (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' - u'(\\s*)(:-|-->)', - bygroups(Name.Function, Text, Operator)), # function defn - (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' - u'(\\s*)(\\()', - bygroups(Name.Function, Text, Punctuation)), - (u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*', - String.Atom), # atom, characters - # This one includes ! - (u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+', - String.Atom), # atom, graphics - (r'[A-Z_][A-Za-z0-9_]*', Name.Variable), - (u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text), - ], - 'nested-comment': [ - (r'\*/', Comment.Multiline, '#pop'), - (r'/\*', Comment.Multiline, '#push'), - (r'[^*/]+', Comment.Multiline), - (r'[*/]', Comment.Multiline), - ], - } - - def analyse_text(text): - return ':-' in text - - -class CythonLexer(RegexLexer): - """ - For Pyrex and `Cython `_ source code. - - *New in Pygments 1.1.* - """ - - name = 'Cython' - aliases = ['cython', 'pyx', 'pyrex'] - filenames = ['*.pyx', '*.pxd', '*.pxi'] - mimetypes = ['text/x-cython', 'application/x-cython'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)), - (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)), - (r'[^\S\n]+', Text), - (r'#.*$', Comment), - (r'[]{}:(),;[]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'(<)([a-zA-Z0-9.?]+)(>)', - bygroups(Punctuation, Keyword.Type, Punctuation)), - (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), - (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', - bygroups(Keyword, Number.Integer, Operator, Name, Operator, - Name, Punctuation)), - include('keywords'), - (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), - (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), - (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), - (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), - include('builtins'), - include('backtick'), - ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), - ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), - ('[uU]?"""', String, combined('stringescape', 'tdqs')), - ("[uU]?'''", String, combined('stringescape', 'tsqs')), - ('[uU]?"', String, combined('stringescape', 'dqs')), - ("[uU]?'", String, combined('stringescape', 'sqs')), - include('name'), - include('numbers'), - ], - 'keywords': [ - (r'(assert|break|by|continue|ctypedef|del|elif|else|except\??|exec|' - r'finally|for|gil|global|if|include|lambda|nogil|pass|print|raise|' - r'return|try|while|yield|as|with)\b', Keyword), - (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), - ], - 'builtins': [ - (r'(?/-]', Operator), - (r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])', - bygroups(Punctuation, Name.Decorator, Punctuation)), - # TODO: "correctly" parse complex code attributes - (r'(\[)(CCode|(?:Integer|Floating)Type)', - bygroups(Punctuation, Name.Decorator)), - (r'[()\[\],.]', Punctuation), - (r'(as|base|break|case|catch|construct|continue|default|delete|do|' - r'else|enum|finally|for|foreach|get|if|in|is|lock|new|out|params|' - r'return|set|sizeof|switch|this|throw|try|typeof|while|yield)\b', - Keyword), - (r'(abstract|const|delegate|dynamic|ensures|extern|inline|internal|' - r'override|owned|private|protected|public|ref|requires|signal|' - r'static|throws|unowned|var|virtual|volatile|weak|yields)\b', - Keyword.Declaration), - (r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text), - 'namespace'), - (r'(class|errordomain|interface|struct)(\s+)', - bygroups(Keyword.Declaration, Text), 'class'), - (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Name.Attribute)), - # void is an actual keyword, others are in glib-2.0.vapi - (r'(void|bool|char|double|float|int|int8|int16|int32|int64|long|' - r'short|size_t|ssize_t|string|time_t|uchar|uint|uint8|uint16|' - r'uint32|uint64|ulong|unichar|ushort)\b', Keyword.Type), - (r'(true|false|null)\b', Name.Builtin), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'root': [ - include('whitespace'), - ('', Text, 'statement'), - ], - 'statement' : [ - include('whitespace'), - include('statements'), - ('[{}]', Punctuation), - (';', Punctuation, '#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'if0': [ - (r'^\s*#if.*?(?`_ source code - - *New in Pygments 1.2.* - """ - name = 'Ooc' - aliases = ['ooc'] - filenames = ['*.ooc'] - mimetypes = ['text/x-ooc'] - - tokens = { - 'root': [ - (r'\b(class|interface|implement|abstract|extends|from|' - r'this|super|new|const|final|static|import|use|extern|' - r'inline|proto|break|continue|fallthrough|operator|if|else|for|' - r'while|do|switch|case|as|in|version|return|true|false|null)\b', - Keyword), - (r'include\b', Keyword, 'include'), - (r'(cover)([ \t]+)(from)([ \t]+)([a-zA-Z0-9_]+[*@]?)', - bygroups(Keyword, Text, Keyword, Text, Name.Class)), - (r'(func)((?:[ \t]|\\\n)+)(~[a-z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Function)), - (r'\bfunc\b', Keyword), - # Note: %= and ^= not listed on http://ooc-lang.org/syntax - (r'//.*', Comment), - (r'(?s)/\*.*?\*/', Comment.Multiline), - (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' - r'&&?|\|\|?|\^=?)', Operator), - (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, - Name.Function)), - (r'[A-Z][A-Z0-9_]+', Name.Constant), - (r'[A-Z][a-zA-Z0-9_]*([@*]|\[[ \t]*\])?', Name.Class), - - (r'([a-z][a-zA-Z0-9_]*(?:~[a-z][a-zA-Z0-9_]*)?)((?:[ \t]|\\\n)*)(?=\()', - bygroups(Name.Function, Text)), - (r'[a-z][a-zA-Z0-9_]*', Name.Variable), - - # : introduces types - (r'[:(){}\[\];,]', Punctuation), - - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'0c[0-9]+', Number.Oct), - (r'0b[01]+', Number.Binary), - (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), - (r'[0-9_]+', Number.Decimal), - - (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\"])*"', - String.Double), - (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", - String.Char), - (r'@', Punctuation), # pointer dereference - (r'\.', Punctuation), # imports or chain operator - - (r'\\[ \t\n]', Text), - (r'[ \t]+', Text), - ], - 'include': [ - (r'[\w/]+', Name), - (r',', Punctuation), - (r'[ \t]', Text), - (r'[;\n]', Text, '#pop'), - ], - } - - -class GoLexer(RegexLexer): - """ - For `Go `_ source. - """ - name = 'Go' - filenames = ['*.go'] - aliases = ['go'] - mimetypes = ['text/x-gosrc'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuations - (r'//(.*?)\n', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'(import|package)\b', Keyword.Namespace), - (r'(var|func|struct|map|chan|type|interface|const)\b', Keyword.Declaration), - (r'(break|default|select|case|defer|go' - r'|else|goto|switch|fallthrough|if|range' - r'|continue|for|return)\b', Keyword), - (r'(true|false|iota|nil)\b', Keyword.Constant), - # It seems the builtin types aren't actually keywords, but - # can be used as functions. So we need two declarations. - (r'(uint|uint8|uint16|uint32|uint64' - r'|int|int8|int16|int32|int64' - r'|float|float32|float64' - r'|complex64|complex128|byte|rune' - r'|string|bool|error|uintptr' - r'|print|println|panic|recover|close|complex|real|imag' - r'|len|cap|append|copy|delete|new|make)\b(\()', - bygroups(Name.Builtin, Punctuation)), - (r'(uint|uint8|uint16|uint32|uint64' - r'|int|int8|int16|int32|int64' - r'|float|float32|float64' - r'|complex64|complex128|byte|rune' - r'|string|bool|error|uintptr)\b', Keyword.Type), - # imaginary_lit - (r'\d+i', Number), - (r'\d+\.\d*([Ee][-+]\d+)?i', Number), - (r'\.\d+([Ee][-+]\d+)?i', Number), - (r'\d+[Ee][-+]\d+i', Number), - # float_lit - (r'\d+(\.\d+[eE][+\-]?\d+|' - r'\.\d*|[eE][+\-]?\d+)', Number.Float), - (r'\.\d+([eE][+\-]?\d+)?', Number.Float), - # int_lit - # -- octal_lit - (r'0[0-7]+', Number.Oct), - # -- hex_lit - (r'0[xX][0-9a-fA-F]+', Number.Hex), - # -- decimal_lit - (r'(0|[1-9][0-9]*)', Number.Integer), - # char_lit - (r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""", - String.Char - ), - # StringLiteral - # -- raw_string_lit - (r'`[^`]*`', String), - # -- interpreted_string_lit - (r'"(\\\\|\\"|[^"])*"', String), - # Tokens - (r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|' - r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator), - (r'[|^<>=!()\[\]{}.,;:]', Punctuation), - # identifier - (r'[a-zA-Z_]\w*', Name.Other), - ] - } - - -class FelixLexer(RegexLexer): - """ - For `Felix `_ source code. - - *New in Pygments 1.2.* - """ - - name = 'Felix' - aliases = ['felix', 'flx'] - filenames = ['*.flx', '*.flxh'] - mimetypes = ['text/x-felix'] - - preproc = [ - 'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef', - ] - - keywords = [ - '_', '_deref', 'all', 'as', - 'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass', - 'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else', - 'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except', - 'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork', - 'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance', - 'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace', - 'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise', - 'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then', - 'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto', - 'when', 'whilst', 'with', 'yield', - ] - - keyword_directives = [ - '_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export', - 'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn', - 'package', 'private', 'pod', 'property', 'public', 'publish', - 'requires', 'todo', 'virtual', 'use', - ] - - keyword_declarations = [ - 'def', 'let', 'ref', 'val', 'var', - ] - - keyword_types = [ - 'unit', 'void', 'any', 'bool', - 'byte', 'offset', - 'address', 'caddress', 'cvaddress', 'vaddress', - 'tiny', 'short', 'int', 'long', 'vlong', - 'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong', - 'int8', 'int16', 'int32', 'int64', - 'uint8', 'uint16', 'uint32', 'uint64', - 'float', 'double', 'ldouble', - 'complex', 'dcomplex', 'lcomplex', - 'imaginary', 'dimaginary', 'limaginary', - 'char', 'wchar', 'uchar', - 'charp', 'charcp', 'ucharp', 'ucharcp', - 'string', 'wstring', 'ustring', - 'cont', - 'array', 'varray', 'list', - 'lvalue', 'opt', 'slice', - ] - - keyword_constants = [ - 'false', 'true', - ] - - operator_words = [ - 'and', 'not', 'in', 'is', 'isin', 'or', 'xor', - ] - - name_builtins = [ - '_svc', 'while', - ] - - name_pseudo = [ - 'root', 'self', 'this', - ] - - decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?' - - tokens = { - 'root': [ - include('whitespace'), - - # Keywords - (r'(axiom|ctor|fun|gen|proc|reduce|union)\b', Keyword, - 'funcname'), - (r'(class|cclass|cstruct|obj|struct)\b', Keyword, 'classname'), - (r'(instance|module|typeclass)\b', Keyword, 'modulename'), - - (r'(%s)\b' % '|'.join(keywords), Keyword), - (r'(%s)\b' % '|'.join(keyword_directives), Name.Decorator), - (r'(%s)\b' % '|'.join(keyword_declarations), Keyword.Declaration), - (r'(%s)\b' % '|'.join(keyword_types), Keyword.Type), - (r'(%s)\b' % '|'.join(keyword_constants), Keyword.Constant), - - # Operators - include('operators'), - - # Float Literal - # -- Hex Float - (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' - r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float), - # -- DecimalFloat - (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' - r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float), - (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?', - Number.Float), - - # IntegerLiteral - # -- Binary - (r'0[Bb][01_]+%s' % decimal_suffixes, Number), - # -- Octal - (r'0[0-7_]+%s' % decimal_suffixes, Number.Oct), - # -- Hexadecimal - (r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex), - # -- Decimal - (r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer), - - # Strings - ('([rR][cC]?|[cC][rR])"""', String, 'tdqs'), - ("([rR][cC]?|[cC][rR])'''", String, 'tsqs'), - ('([rR][cC]?|[cC][rR])"', String, 'dqs'), - ("([rR][cC]?|[cC][rR])'", String, 'sqs'), - ('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')), - ("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')), - ('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')), - ("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')), - - # Punctuation - (r'[\[\]{}:(),;?]', Punctuation), - - # Labels - (r'[a-zA-Z_]\w*:>', Name.Label), - - # Identifiers - (r'(%s)\b' % '|'.join(name_builtins), Name.Builtin), - (r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo), - (r'[a-zA-Z_]\w*', Name), - ], - 'whitespace': [ - (r'\n', Text), - (r'\s+', Text), - - include('comment'), - - # Preprocessor - (r'#\s*if\s+0', Comment.Preproc, 'if0'), - (r'#', Comment.Preproc, 'macro'), - ], - 'operators': [ - (r'(%s)\b' % '|'.join(operator_words), Operator.Word), - (r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator), - ], - 'comment': [ - (r'//(.*?)\n', Comment.Single), - (r'/[*]', Comment.Multiline, 'comment2'), - ], - 'comment2': [ - (r'[^\/*]', Comment.Multiline), - (r'/[*]', Comment.Multiline, '#push'), - (r'[*]/', Comment.Multiline, '#pop'), - (r'[\/*]', Comment.Multiline), - ], - 'if0': [ - (r'^\s*#if.*?(?]*?>)', - bygroups(Comment.Preproc, Text, String), '#pop'), - (r'(import|include)(\s+)("[^"]*?")', - bygroups(Comment.Preproc, Text, String), '#pop'), - (r"(import|include)(\s+)('[^']*?')", - bygroups(Comment.Preproc, Text, String), '#pop'), - (r'[^/\n]+', Comment.Preproc), - ##(r'/[*](.|\n)*?[*]/', Comment), - ##(r'//.*?\n', Comment, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'funcname': [ - include('whitespace'), - (r'[a-zA-Z_]\w*', Name.Function, '#pop'), - # anonymous functions - (r'(?=\()', Text, '#pop'), - ], - 'classname': [ - include('whitespace'), - (r'[a-zA-Z_]\w*', Name.Class, '#pop'), - # anonymous classes - (r'(?=\{)', Text, '#pop'), - ], - 'modulename': [ - include('whitespace'), - (r'\[', Punctuation, ('modulename2', 'tvarlist')), - (r'', Error, 'modulename2'), - ], - 'modulename2': [ - include('whitespace'), - (r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'), - ], - 'tvarlist': [ - include('whitespace'), - include('operators'), - (r'\[', Punctuation, '#push'), - (r'\]', Punctuation, '#pop'), - (r',', Punctuation), - (r'(with|where)\b', Keyword), - (r'[a-zA-Z_]\w*', Name), - ], - 'stringescape': [ - (r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|' - r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) - ], - 'strings': [ - (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' - '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol), - (r'[^\\\'"%\n]+', String), - # quotes, percents and backslashes must be parsed one at a time - (r'[\'"\\]', String), - # unhandled string formatting sign - (r'%', String) - # newlines are an error (use "nl" state) - ], - 'nl': [ - (r'\n', String) - ], - 'dqs': [ - (r'"', String, '#pop'), - # included here again for raw strings - (r'\\\\|\\"|\\\n', String.Escape), - include('strings') - ], - 'sqs': [ - (r"'", String, '#pop'), - # included here again for raw strings - (r"\\\\|\\'|\\\n", String.Escape), - include('strings') - ], - 'tdqs': [ - (r'"""', String, '#pop'), - include('strings'), - include('nl') - ], - 'tsqs': [ - (r"'''", String, '#pop'), - include('strings'), - include('nl') - ], - } - - -class AdaLexer(RegexLexer): - """ - For Ada source code. - - *New in Pygments 1.3.* - """ - - name = 'Ada' - aliases = ['ada', 'ada95' 'ada2005'] - filenames = ['*.adb', '*.ads', '*.ada'] - mimetypes = ['text/x-ada'] - - flags = re.MULTILINE | re.I # Ignore case - - tokens = { - 'root': [ - (r'[^\S\n]+', Text), - (r'--.*?\n', Comment.Single), - (r'[^\S\n]+', Text), - (r'function|procedure|entry', Keyword.Declaration, 'subprogram'), - (r'(subtype|type)(\s+)([a-z0-9_]+)', - bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), - (r'task|protected', Keyword.Declaration), - (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)), - (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'), - (r'(pragma)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Reserved, Text, - Comment.Preproc)), - (r'(true|false|null)\b', Keyword.Constant), - (r'(Address|Byte|Boolean|Character|Controlled|Count|Cursor|' - r'Duration|File_Mode|File_Type|Float|Generator|Integer|Long_Float|' - r'Long_Integer|Long_Long_Float|Long_Long_Integer|Natural|Positive|' - r'Reference_Type|Short_Float|Short_Integer|Short_Short_Float|' - r'Short_Short_Integer|String|Wide_Character|Wide_String)\b', - Keyword.Type), - (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word), - (r'generic|private', Keyword.Declaration), - (r'package', Keyword.Declaration, 'package'), - (r'array\b', Keyword.Reserved, 'array_def'), - (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)', - bygroups(Name.Constant, Text, Punctuation, Text, - Keyword.Reserved)), - (r'<<[a-z0-9_]+>>', Name.Label), - (r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)', - bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)), - (r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|' - r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|' - r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|' - r'null|of|or|others|out|overriding|pragma|protected|raise|range|' - r'record|renames|requeue|return|reverse|select|separate|subtype|' - r'synchronized|task|tagged|terminate|then|type|until|when|while|' - r'xor)\b', - Keyword.Reserved), - (r'"[^"]*"', String), - include('attribute'), - include('numbers'), - (r"'[^']'", String.Character), - (r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))), - (r"(<>|=>|:=|[()|:;,.'])", Punctuation), - (r'[*<>+=/&-]', Operator), - (r'\n+', Text), - ], - 'numbers' : [ - (r'[0-9_]+#[0-9a-f]+#', Number.Hex), - (r'[0-9_]+\.[0-9_]*', Number.Float), - (r'[0-9_]+', Number.Integer), - ], - 'attribute' : [ - (r"(')([a-zA-Z0-9_]+)", bygroups(Punctuation, Name.Attribute)), - ], - 'subprogram' : [ - (r'\(', Punctuation, ('#pop', 'formal_part')), - (r';', Punctuation, '#pop'), - (r'is\b', Keyword.Reserved, '#pop'), - (r'"[^"]+"|[a-z0-9_]+', Name.Function), - include('root'), - ], - 'end' : [ - ('(if|case|record|loop|select)', Keyword.Reserved), - ('"[^"]+"|[a-zA-Z0-9_.]+', Name.Function), - ('\s+', Text), - (';', Punctuation, '#pop'), - ], - 'type_def': [ - (r';', Punctuation, '#pop'), - (r'\(', Punctuation, 'formal_part'), - (r'with|and|use', Keyword.Reserved), - (r'array\b', Keyword.Reserved, ('#pop', 'array_def')), - (r'record\b', Keyword.Reserved, ('record_def')), - (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'), - include('root'), - ], - 'array_def' : [ - (r';', Punctuation, '#pop'), - (r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text, - Keyword.Reserved)), - include('root'), - ], - 'record_def' : [ - (r'end record', Keyword.Reserved, '#pop'), - include('root'), - ], - 'import': [ - (r'[a-z0-9_.]+', Name.Namespace, '#pop'), - (r'', Text, '#pop'), - ], - 'formal_part' : [ - (r'\)', Punctuation, '#pop'), - (r'[a-z0-9_]+', Name.Variable), - (r',|:[^=]', Punctuation), - (r'(in|not|null|out|access)\b', Keyword.Reserved), - include('root'), - ], - 'package': [ - ('body', Keyword.Declaration), - ('is\s+new|renames', Keyword.Reserved), - ('is', Keyword.Reserved, '#pop'), - (';', Punctuation, '#pop'), - ('\(', Punctuation, 'package_instantiation'), - ('([a-zA-Z0-9_.]+)', Name.Class), - include('root'), - ], - 'package_instantiation': [ - (r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable, - Text, Punctuation)), - (r'[a-z0-9._\'"]', Text), - (r'\)', Punctuation, '#pop'), - include('root'), - ], - } - - -class Modula2Lexer(RegexLexer): - """ - For `Modula-2 `_ source code. - - Additional options that determine which keywords are highlighted: - - `pim` - Select PIM Modula-2 dialect (default: True). - `iso` - Select ISO Modula-2 dialect (default: False). - `objm2` - Select Objective Modula-2 dialect (default: False). - `gm2ext` - Also highlight GNU extensions (default: False). - - *New in Pygments 1.3.* - """ - name = 'Modula-2' - aliases = ['modula2', 'm2'] - filenames = ['*.def', '*.mod'] - mimetypes = ['text/x-modula2'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'whitespace': [ - (r'\n+', Text), # blank lines - (r'\s+', Text), # whitespace - ], - 'identifiers': [ - (r'([a-zA-Z_\$][a-zA-Z0-9_\$]*)', Name), - ], - 'numliterals': [ - (r'[01]+B', Number.Binary), # binary number (ObjM2) - (r'[0-7]+B', Number.Oct), # octal number (PIM + ISO) - (r'[0-7]+C', Number.Oct), # char code (PIM + ISO) - (r'[0-9A-F]+C', Number.Hex), # char code (ObjM2) - (r'[0-9A-F]+H', Number.Hex), # hexadecimal number - (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number - (r'[0-9]+\.[0-9]+', Number.Float), # real number - (r'[0-9]+', Number.Integer), # decimal whole number - ], - 'strings': [ - (r"'(\\\\|\\'|[^'])*'", String), # single quoted string - (r'"(\\\\|\\"|[^"])*"', String), # double quoted string - ], - 'operators': [ - (r'[*/+=#~&<>\^-]', Operator), - (r':=', Operator), # assignment - (r'@', Operator), # pointer deref (ISO) - (r'\.\.', Operator), # ellipsis or range - (r'`', Operator), # Smalltalk message (ObjM2) - (r'::', Operator), # type conversion (ObjM2) - ], - 'punctuation': [ - (r'[\(\)\[\]{},.:;|]', Punctuation), - ], - 'comments': [ - (r'//.*?\n', Comment.Single), # ObjM2 - (r'/\*(.*?)\*/', Comment.Multiline), # ObjM2 - (r'\(\*([^\$].*?)\*\)', Comment.Multiline), - # TO DO: nesting of (* ... *) comments - ], - 'pragmas': [ - (r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM - (r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2 - ], - 'root': [ - include('whitespace'), - include('comments'), - include('pragmas'), - include('identifiers'), - include('numliterals'), - include('strings'), - include('operators'), - include('punctuation'), - ] - } - - pim_reserved_words = [ - # 40 reserved words - 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', - 'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR', - 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', - 'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED', - 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', - 'UNTIL', 'VAR', 'WHILE', 'WITH', - ] - - pim_pervasives = [ - # 31 pervasives - 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC', - 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL', - 'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD', - 'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL', - ] - - iso_reserved_words = [ - # 46 reserved words - 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', - 'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY', - 'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', - 'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER', - 'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY', - 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', - 'WITH', - ] - - iso_pervasives = [ - # 42 pervasives - 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX', - 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', - 'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH', - 'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', - 'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE', - 'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', - ] - - objm2_reserved_words = [ - # base language, 42 reserved words - 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', - 'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF', - 'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD', - 'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE', - 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', - 'UNTIL', 'VAR', 'VARIADIC', 'WHILE', - # OO extensions, 16 reserved words - 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', - 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', - 'SUPER', 'TRY', - ] - - objm2_pervasives = [ - # base language, 38 pervasives - 'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE', - 'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD', - 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL', - 'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX', - 'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF', - # OO extensions, 3 pervasives - 'OBJECT', 'NO', 'YES', - ] - - gnu_reserved_words = [ - # 10 additional reserved words - 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', - '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', - ] - - gnu_pervasives = [ - # 21 identifiers, actually from pseudo-module SYSTEM - # but we will highlight them as if they were pervasives - 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', - 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', - 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', - 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', - ] - - def __init__(self, **options): - self.reserved_words = set() - self.pervasives = set() - # ISO Modula-2 - if get_bool_opt(options, 'iso', False): - self.reserved_words.update(self.iso_reserved_words) - self.pervasives.update(self.iso_pervasives) - # Objective Modula-2 - elif get_bool_opt(options, 'objm2', False): - self.reserved_words.update(self.objm2_reserved_words) - self.pervasives.update(self.objm2_pervasives) - # PIM Modula-2 (DEFAULT) - else: - self.reserved_words.update(self.pim_reserved_words) - self.pervasives.update(self.pim_pervasives) - # GNU extensions - if get_bool_opt(options, 'gm2ext', False): - self.reserved_words.update(self.gnu_reserved_words) - self.pervasives.update(self.gnu_pervasives) - # initialise - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # check for reserved words and pervasives - if token is Name: - if value in self.reserved_words: - token = Keyword.Reserved - elif value in self.pervasives: - token = Keyword.Pervasive - # return result - yield index, token, value - - -class BlitzMaxLexer(RegexLexer): - """ - For `BlitzMax `_ source code. - - *New in Pygments 1.4.* - """ - - name = 'BlitzMax' - aliases = ['blitzmax', 'bmax'] - filenames = ['*.bmx'] - mimetypes = ['text/x-bmx'] - - bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b' - bmax_sktypes = r'@{1,2}|[!#$%]' - bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b' - bmax_name = r'[a-z_][a-z0-9_]*' - bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)' - r'|([ \t]*)([:])([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \ - (bmax_name, bmax_sktypes, bmax_lktypes, bmax_name) - bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])' - - flags = re.MULTILINE | re.IGNORECASE - tokens = { - 'root': [ - # Text - (r'[ \t]+', Text), - (r'\.\.\n', Text), # Line continuation - # Comments - (r"'.*?\n", Comment.Single), - (r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline), - # Data types - ('"', String.Double, 'string'), - # Numbers - (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), - (r'\.[0-9]*(?!\.)', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\$[0-9a-f]+', Number.Hex), - (r'\%[10]+', Number), # Binary - # Other - (r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' % - (bmax_vopwords), Operator), - (r'[(),.:\[\]]', Punctuation), - (r'(?:#[\w \t]*)', Name.Label), - (r'(?:\?[\w \t]*)', Comment.Preproc), - # Identifiers - (r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name), - bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)), - (r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' % - (bmax_name, bmax_name), - bygroups(Keyword.Reserved, Text, Keyword.Namespace)), - (bmax_func, bygroups(Name.Function, Text, Keyword.Type, - Operator, Text, Punctuation, Text, - Keyword.Type, Name.Class, Text, - Keyword.Type, Text, Punctuation)), - (bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator, - Text, Punctuation, Text, Keyword.Type, - Name.Class, Text, Keyword.Type)), - (r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name), - bygroups(Keyword.Reserved, Text, Name.Class)), - # Keywords - (r'\b(Ptr)\b', Keyword.Type), - (r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant), - (r'\b(Local|Global|Const|Field)\b', Keyword.Declaration), - (r'\b(TNullMethodException|TNullFunctionException|' - r'TNullObjectException|TArrayBoundsException|' - r'TRuntimeException)\b', Name.Exception), - (r'\b(Strict|SuperStrict|Module|ModuleInfo|' - r'End|Return|Continue|Exit|Public|Private|' - r'Var|VarPtr|Chr|Len|Asc|SizeOf|Sgn|Abs|Min|Max|' - r'New|Release|Delete|' - r'Incbin|IncbinPtr|IncbinLen|' - r'Framework|Include|Import|Extern|EndExtern|' - r'Function|EndFunction|' - r'Type|EndType|Extends|' - r'Method|EndMethod|' - r'Abstract|Final|' - r'If|Then|Else|ElseIf|EndIf|' - r'For|To|Next|Step|EachIn|' - r'While|Wend|EndWhile|' - r'Repeat|Until|Forever|' - r'Select|Case|Default|EndSelect|' - r'Try|Catch|EndTry|Throw|Assert|' - r'Goto|DefData|ReadData|RestoreData)\b', Keyword.Reserved), - # Final resolve (for variable names and such) - (r'(%s)' % (bmax_name), Name.Variable), - ], - 'string': [ - (r'""', String.Double), - (r'"C?', String.Double, '#pop'), - (r'[^"]+', String.Double), - ], - } - - -class BlitzBasicLexer(RegexLexer): - """ - For `BlitzBasic `_ source code. - - *New in Pygments 1.7.* - """ - - name = 'BlitzBasic' - aliases = ['blitzbasic', 'b3d', 'bplus'] - filenames = ['*.bb', '*.decls'] - mimetypes = ['text/x-bb'] - - bb_vopwords = (r'\b(Shl|Shr|Sar|Mod|Or|And|Not|' - r'Abs|Sgn|Handle|Int|Float|Str|' - r'First|Last|Before|After)\b') - bb_sktypes = r'@{1,2}|[#$%]' - bb_name = r'[a-z][a-z0-9_]*' - bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \ - (bb_name, bb_sktypes, bb_name) - - flags = re.MULTILINE | re.IGNORECASE - tokens = { - 'root': [ - # Text - (r'[ \t]+', Text), - # Comments - (r";.*?\n", Comment.Single), - # Data types - ('"', String.Double, 'string'), - # Numbers - (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), - (r'\.[0-9]+(?!\.)', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\$[0-9a-f]+', Number.Hex), - (r'\%[10]+', Number), # Binary - # Other - (r'(?:%s|([+\-*/~=<>^]))' % (bb_vopwords), Operator), - (r'[(),:\[\]\\]', Punctuation), - (r'\.([ \t]*)(%s)' % bb_name, Name.Label), - # Identifiers - (r'\b(New)\b([ \t]+)(%s)' % (bb_name), - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name), - bygroups(Keyword.Reserved, Text, Name.Label)), - (r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name), - bygroups(Operator, Text, Punctuation, Text, Name.Class)), - (r'\b%s\b([ \t]*)(\()' % bb_var, - bygroups(Name.Function, Text, Keyword.Type,Text, Punctuation, - Text, Name.Class, Text, Punctuation)), - (r'\b(Function)\b([ \t]+)%s' % bb_var, - bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type, - Text, Punctuation, Text, Name.Class)), - (r'\b(Type)([ \t]+)(%s)' % (bb_name), - bygroups(Keyword.Reserved, Text, Name.Class)), - # Keywords - (r'\b(Pi|True|False|Null)\b', Keyword.Constant), - (r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration), - (r'\b(End|Return|Exit|' - r'Chr|Len|Asc|' - r'New|Delete|Insert|' - r'Include|' - r'Function|' - r'Type|' - r'If|Then|Else|ElseIf|EndIf|' - r'For|To|Next|Step|Each|' - r'While|Wend|' - r'Repeat|Until|Forever|' - r'Select|Case|Default|' - r'Goto|Gosub|Data|Read|Restore)\b', Keyword.Reserved), - # Final resolve (for variable names and such) -# (r'(%s)' % (bb_name), Name.Variable), - (bb_var, bygroups(Name.Variable, Text, Keyword.Type, - Text, Punctuation, Text, Name.Class)), - ], - 'string': [ - (r'""', String.Double), - (r'"C?', String.Double, '#pop'), - (r'[^"]+', String.Double), - ], - } - - -class NimrodLexer(RegexLexer): - """ - For `Nimrod `_ source code. - - *New in Pygments 1.5.* - """ - - name = 'Nimrod' - aliases = ['nimrod', 'nim'] - filenames = ['*.nim', '*.nimrod'] - mimetypes = ['text/x-nimrod'] - - flags = re.MULTILINE | re.IGNORECASE | re.UNICODE - - def underscorize(words): - newWords = [] - new = "" - for word in words: - for ch in word: - new += (ch + "_?") - newWords.append(new) - new = "" - return "|".join(newWords) - - keywords = [ - 'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break', - 'case', 'cast', 'const', 'continue', 'converter', 'discard', - 'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally', - 'for', 'generic', 'if', 'implies', 'in', 'yield', - 'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method', - 'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc', - 'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try', - 'tuple', 'type' , 'when', 'while', 'with', 'without', 'xor' - ] - - keywordsPseudo = [ - 'nil', 'true', 'false' - ] - - opWords = [ - 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in', - 'notin', 'is', 'isnot' - ] - - types = [ - 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64', - 'bool', 'char', 'range', 'array', 'seq', 'set', 'string' - ] - - tokens = { - 'root': [ - (r'##.*$', String.Doc), - (r'#.*$', Comment), - (r'\*|=|>|<|\+|-|/|@|\$|~|&|%|\!|\?|\||\\|\[|\]', Operator), - (r'\.\.|\.|,|\[\.|\.\]|{\.|\.}|\(\.|\.\)|{|}|\(|\)|:|\^|`|;', - Punctuation), - - # Strings - (r'(?:[\w]+)"', String, 'rdqs'), - (r'"""', String, 'tdqs'), - ('"', String, 'dqs'), - - # Char - ("'", String.Char, 'chars'), - - # Keywords - (r'(%s)\b' % underscorize(opWords), Operator.Word), - (r'(p_?r_?o_?c_?\s)(?![\(\[\]])', Keyword, 'funcname'), - (r'(%s)\b' % underscorize(keywords), Keyword), - (r'(%s)\b' % underscorize(['from', 'import', 'include']), - Keyword.Namespace), - (r'(v_?a_?r)\b', Keyword.Declaration), - (r'(%s)\b' % underscorize(types), Keyword.Type), - (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), - # Identifiers - (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), - # Numbers - (r'[0-9][0-9_]*(?=([eE.]|\'[fF](32|64)))', - Number.Float, ('float-suffix', 'float-number')), - (r'0[xX][a-fA-F0-9][a-fA-F0-9_]*', Number.Hex, 'int-suffix'), - (r'0[bB][01][01_]*', Number, 'int-suffix'), - (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), - (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), - # Whitespace - (r'\s+', Text), - (r'.+$', Error), - ], - 'chars': [ - (r'\\([\\abcefnrtvl"\']|x[a-fA-F0-9]{2}|[0-9]{1,3})', String.Escape), - (r"'", String.Char, '#pop'), - (r".", String.Char) - ], - 'strings': [ - (r'(?\?]*?', - ) - ) - - - tokens = { - 'comments': [ - (r'(?s)/\*.*?\*/', Comment.Multiline), #Multiline - (r'//.*?\n', Comment.Single), #Single line - #todo: highlight references in fandocs - (r'\*\*.*?\n', Comment.Special), #Fandoc - (r'#.*\n', Comment.Single) #Shell-style - ], - 'literals': [ - (r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), #Duration - (r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), - #Duration with dot - (r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), #Float/Decimal - (r'\b-?0x[0-9a-fA-F_]+', Number.Hex), #Hex - (r'\b-?[\d_]+', Number.Integer), #Int - (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), #Char - (r'"', Punctuation, 'insideStr'), #Opening quote - (r'`', Punctuation, 'insideUri'), #Opening accent - (r'\b(true|false|null)\b', Keyword.Constant), #Bool & null - (r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', #DSL - bygroups(Name.Namespace, Punctuation, Name.Class, - Punctuation, String, Punctuation)), - (r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', #Type/slot literal - bygroups(Name.Namespace, Punctuation, Name.Class, - Punctuation, Name.Function)), - (r'\[,\]', Literal), # Empty list - (s(r'($type)(\[,\])'), # Typed empty list - bygroups(using(this, state = 'inType'), Literal)), - (r'\[:\]', Literal), # Empty Map - (s(r'($type)(\[:\])'), - bygroups(using(this, state = 'inType'), Literal)), - ], - 'insideStr': [ - (r'\\\\', String.Escape), #Escaped backslash - (r'\\"', String.Escape), #Escaped " - (r'\\`', String.Escape), #Escaped ` - (r'\$\w+', String.Interpol), #Subst var - (r'\${.*?}', String.Interpol), #Subst expr - (r'"', Punctuation, '#pop'), #Closing quot - (r'.', String) #String content - ], - 'insideUri': [ #TODO: remove copy/paste str/uri - (r'\\\\', String.Escape), #Escaped backslash - (r'\\"', String.Escape), #Escaped " - (r'\\`', String.Escape), #Escaped ` - (r'\$\w+', String.Interpol), #Subst var - (r'\${.*?}', String.Interpol), #Subst expr - (r'`', Punctuation, '#pop'), #Closing tick - (r'.', String.Backtick) #URI content - ], - 'protectionKeywords': [ - (r'\b(public|protected|private|internal)\b', Keyword), - ], - 'typeKeywords': [ - (r'\b(abstract|final|const|native|facet|enum)\b', Keyword), - ], - 'methodKeywords': [ - (r'\b(abstract|native|once|override|static|virtual|final)\b', - Keyword), - ], - 'fieldKeywords': [ - (r'\b(abstract|const|final|native|override|static|virtual|' - r'readonly)\b', Keyword) - ], - 'otherKeywords': [ - (r'\b(try|catch|throw|finally|for|if|else|while|as|is|isnot|' - r'switch|case|default|continue|break|do|return|get|set)\b', - Keyword), - (r'\b(it|this|super)\b', Name.Builtin.Pseudo), - ], - 'operators': [ - (r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator) - ], - 'inType': [ - (r'[\[\]\|\->:\?]', Punctuation), - (s(r'$id'), Name.Class), - (r'', Text, '#pop'), - - ], - 'root': [ - include('comments'), - include('protectionKeywords'), - include('typeKeywords'), - include('methodKeywords'), - include('fieldKeywords'), - include('literals'), - include('otherKeywords'), - include('operators'), - (r'using\b', Keyword.Namespace, 'using'), # Using stmt - (r'@\w+', Name.Decorator, 'facet'), # Symbol - (r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class), - 'inheritance'), # Inheritance list - - - ### Type var := val - (s(r'($type)([ \t]+)($id)(\s*)(:=)'), - bygroups(using(this, state = 'inType'), Text, - Name.Variable, Text, Operator)), - - ### var := val - (s(r'($id)(\s*)(:=)'), - bygroups(Name.Variable, Text, Operator)), - - ### .someId( or ->someId( ### - (s(r'(\.|(?:\->))($id)(\s*)(\()'), - bygroups(Operator, Name.Function, Text, Punctuation), - 'insideParen'), - - ### .someId or ->someId - (s(r'(\.|(?:\->))($id)'), - bygroups(Operator, Name.Function)), - - ### new makeXXX ( #### - (r'(new)(\s+)(make\w*)(\s*)(\()', - bygroups(Keyword, Text, Name.Function, Text, Punctuation), - 'insideMethodDeclArgs'), - - ### Type name ( #### - (s(r'($type)([ \t]+)' #Return type and whitespace - r'($id)(\s*)(\()'), #method name + open brace - bygroups(using(this, state = 'inType'), Text, - Name.Function, Text, Punctuation), - 'insideMethodDeclArgs'), - - ### ArgType argName, ##### - (s(r'($type)(\s+)($id)(\s*)(,)'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation)), - - #### ArgType argName) #### - ## Covered in 'insideParen' state - - ### ArgType argName -> ArgType| ### - (s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation, Text, using(this, state = 'inType'), - Punctuation)), - - ### ArgType argName| ### - (s(r'($type)(\s+)($id)(\s*)(\|)'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation)), - - ### Type var - (s(r'($type)([ \t]+)($id)'), - bygroups(using(this, state='inType'), Text, - Name.Variable)), - - (r'\(', Punctuation, 'insideParen'), - (r'\{', Punctuation, 'insideBrace'), - (r'.', Text) - ], - 'insideParen': [ - (r'\)', Punctuation, '#pop'), - include('root'), - ], - 'insideMethodDeclArgs': [ - (r'\)', Punctuation, '#pop'), - (s(r'($type)(\s+)($id)(\s*)(\))'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation), '#pop'), - include('root'), - ], - 'insideBrace': [ - (r'\}', Punctuation, '#pop'), - include('root'), - ], - 'inheritance': [ - (r'\s+', Text), #Whitespace - (r':|,', Punctuation), - (r'(?:(\w+)(::))?(\w+)', - bygroups(Name.Namespace, Punctuation, Name.Class)), - (r'{', Punctuation, '#pop') - ], - 'using': [ - (r'[ \t]+', Text), # consume whitespaces - (r'(\[)(\w+)(\])', - bygroups(Punctuation, Comment.Special, Punctuation)), #ffi - (r'(\")?([\w\.]+)(\")?', - bygroups(Punctuation, Name.Namespace, Punctuation)), #podname - (r'::', Punctuation, 'usingClass'), - (r'', Text, '#pop') - ], - 'usingClass': [ - (r'[ \t]+', Text), # consume whitespaces - (r'(as)(\s+)(\w+)', - bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'), - (r'[\w\$]+', Name.Class), - (r'', Text, '#pop:2') # jump out to root state - ], - 'facet': [ - (r'\s+', Text), - (r'{', Punctuation, 'facetFields'), - (r'', Text, '#pop') - ], - 'facetFields': [ - include('comments'), - include('literals'), - include('operators'), - (r'\s+', Text), - (r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)), - (r'}', Punctuation, '#pop'), - (r'.', Text) - ], - } - - -class RustLexer(RegexLexer): - """ - Lexer for Mozilla's Rust programming language. - - *New in Pygments 1.6.* - """ - name = 'Rust' - filenames = ['*.rs', '*.rc'] - aliases = ['rust'] - mimetypes = ['text/x-rustsrc'] - - tokens = { - 'root': [ - # Whitespace and Comments - (r'\n', Text), - (r'\s+', Text), - (r'//(.*?)\n', Comment.Single), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - - # Keywords - (r'(as|assert|break|const' - r'|copy|do|else|enum|extern|fail' - r'|false|fn|for|if|impl|let|log' - r'|loop|match|mod|move|mut|once|priv|pub|pure' - r'|ref|return|static|struct|trait|true|type|unsafe|use|while' - r'|u8|u16|u32|u64|i8|i16|i32|i64|uint' - r'|int|float|f32|f64|str)\b', Keyword), - - # Character Literal - (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", - String.Char), - # Binary Literal - (r'0[Bb][01_]+', Number, 'number_lit'), - # Octal Literal - (r'0[0-7_]+', Number.Oct, 'number_lit'), - # Hexadecimal Literal - (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), - # Decimal Literal - (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?' - r'[0-9_]+|\.[0-9_]*|[eE][+\-]?[0-9_]+)?', Number, 'number_lit'), - # String Literal - (r'"', String, 'string'), - - # Operators and Punctuation - (r'[{}()\[\],.;]', Punctuation), - (r'[+\-*/%&|<>^!~@=:?]', Operator), - - # Identifier - (r'[a-zA-Z_$][a-zA-Z0-9_]*', Name), - - # Attributes - (r'#\[', Comment.Preproc, 'attribute['), - (r'#\(', Comment.Preproc, 'attribute('), - # Macros - (r'[A-Za-z_][A-Za-z0-9_]*!\[', Comment.Preproc, 'attribute['), - (r'[A-Za-z_][A-Za-z0-9_]*!\(', Comment.Preproc, 'attribute('), - ], - 'number_lit': [ - (r'(([ui](8|16|32|64)?)|(f(32|64)?))?', Keyword, '#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), - (r'[^\\"]+', String), - (r'\\', String), - ], - 'attribute_common': [ - (r'"', String, 'string'), - (r'\[', Comment.Preproc, 'attribute['), - (r'\(', Comment.Preproc, 'attribute('), - ], - 'attribute[': [ - include('attribute_common'), - (r'\];?', Comment.Preproc, '#pop'), - (r'[^"\]]+', Comment.Preproc), - ], - 'attribute(': [ - include('attribute_common'), - (r'\);?', Comment.Preproc, '#pop'), - (r'[^"\)]+', Comment.Preproc), - ], - } - - -class CudaLexer(CLexer): - """ - For NVIDIA `CUDA™ `_ - source. - - *New in Pygments 1.6.* - """ - name = 'CUDA' - filenames = ['*.cu', '*.cuh'] - aliases = ['cuda', 'cu'] - mimetypes = ['text/x-cuda'] - - function_qualifiers = ['__device__', '__global__', '__host__', - '__noinline__', '__forceinline__'] - variable_qualifiers = ['__device__', '__constant__', '__shared__', - '__restrict__'] - vector_types = ['char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3', - 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2', - 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1', - 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1', - 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4', - 'ulong4', 'longlong1', 'ulonglong1', 'longlong2', - 'ulonglong2', 'float1', 'float2', 'float3', 'float4', - 'double1', 'double2', 'dim3'] - variables = ['gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'] - functions = ['__threadfence_block', '__threadfence', '__threadfence_system', - '__syncthreads', '__syncthreads_count', '__syncthreads_and', - '__syncthreads_or'] - execution_confs = ['<<<', '>>>'] - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - CLexer.get_tokens_unprocessed(self, text): - if token is Name: - if value in self.variable_qualifiers: - token = Keyword.Type - elif value in self.vector_types: - token = Keyword.Type - elif value in self.variables: - token = Name.Builtin - elif value in self.execution_confs: - token = Keyword.Pseudo - elif value in self.function_qualifiers: - token = Keyword.Reserved - elif value in self.functions: - token = Name.Function - yield index, token, value - - -class MonkeyLexer(RegexLexer): - """ - For - `Monkey `_ - source code. - - *New in Pygments 1.6.* - """ - - name = 'Monkey' - aliases = ['monkey'] - filenames = ['*.monkey'] - mimetypes = ['text/x-monkey'] - - name_variable = r'[a-z_][a-zA-Z0-9_]*' - name_function = r'[A-Z][a-zA-Z0-9_]*' - name_constant = r'[A-Z_][A-Z0-9_]*' - name_class = r'[A-Z][a-zA-Z0-9_]*' - name_module = r'[a-z0-9_]*' - - keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)' - # ? == Bool // % == Int // # == Float // $ == String - keyword_type_special = r'[?%#$]' - - flags = re.MULTILINE - - tokens = { - 'root': [ - #Text - (r'\s+', Text), - # Comments - (r"'.*", Comment), - (r'(?i)^#rem\b', Comment.Multiline, 'comment'), - # preprocessor directives - (r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc), - # preprocessor variable (any line starting with '#' that is not a directive) - (r'^#', Comment.Preproc, 'variables'), - # String - ('"', String.Double, 'string'), - # Numbers - (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), - (r'\.[0-9]+(?!\.)', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\$[0-9a-fA-Z]+', Number.Hex), - (r'\%[10]+', Number), # Binary - # Native data types - (r'\b%s\b' % keyword_type, Keyword.Type), - # Exception handling - (r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved), - (r'Throwable', Name.Exception), - # Builtins - (r'(?i)\b(?:Null|True|False)\b', Name.Builtin), - (r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo), - (r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant), - # Keywords - (r'(?i)^(Import)(\s+)(.*)(\n)', - bygroups(Keyword.Namespace, Text, Name.Namespace, Text)), - (r'(?i)^Strict\b.*\n', Keyword.Reserved), - (r'(?i)(Const|Local|Global|Field)(\s+)', - bygroups(Keyword.Declaration, Text), 'variables'), - (r'(?i)(New|Class|Interface|Extends|Implements)(\s+)', - bygroups(Keyword.Reserved, Text), 'classname'), - (r'(?i)(Function|Method)(\s+)', - bygroups(Keyword.Reserved, Text), 'funcname'), - (r'(?i)(?:End|Return|Public|Private|Extern|Property|' - r'Final|Abstract)\b', Keyword.Reserved), - # Flow Control stuff - (r'(?i)(?:If|Then|Else|ElseIf|EndIf|' - r'Select|Case|Default|' - r'While|Wend|' - r'Repeat|Until|Forever|' - r'For|To|Until|Step|EachIn|Next|' - r'Exit|Continue)\s+', Keyword.Reserved), - # not used yet - (r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved), - # Array - (r'[\[\]]', Punctuation), - # Other - (r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator), - (r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word), - (r'[\(\){}!#,.:]', Punctuation), - # catch the rest - (r'%s\b' % name_constant, Name.Constant), - (r'%s\b' % name_function, Name.Function), - (r'%s\b' % name_variable, Name.Variable), - ], - 'funcname': [ - (r'(?i)%s\b' % name_function, Name.Function), - (r':', Punctuation, 'classname'), - (r'\s+', Text), - (r'\(', Punctuation, 'variables'), - (r'\)', Punctuation, '#pop') - ], - 'classname': [ - (r'%s\.' % name_module, Name.Namespace), - (r'%s\b' % keyword_type, Keyword.Type), - (r'%s\b' % name_class, Name.Class), - # array (of given size) - (r'(\[)(\s*)(\d*)(\s*)(\])', - bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)), - # generics - (r'\s+(?!<)', Text, '#pop'), - (r'<', Punctuation, '#push'), - (r'>', Punctuation, '#pop'), - (r'\n', Text, '#pop'), - (r'', Text, '#pop') - ], - 'variables': [ - (r'%s\b' % name_constant, Name.Constant), - (r'%s\b' % name_variable, Name.Variable), - (r'%s' % keyword_type_special, Keyword.Type), - (r'\s+', Text), - (r':', Punctuation, 'classname'), - (r',', Punctuation, '#push'), - (r'', Text, '#pop') - ], - 'string': [ - (r'[^"~]+', String.Double), - (r'~q|~n|~r|~t|~z|~~', String.Escape), - (r'"', String.Double, '#pop'), - ], - 'comment' : [ - (r'(?i)^#rem.*?', Comment.Multiline, "#push"), - (r'(?i)^#end.*?', Comment.Multiline, "#pop"), - (r'\n', Comment.Multiline), - (r'.+', Comment.Multiline), - ], - } - - -class CobolLexer(RegexLexer): - """ - Lexer for OpenCOBOL code. - - *New in Pygments 1.6.* - """ - name = 'COBOL' - aliases = ['cobol'] - filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY'] - mimetypes = ['text/x-cobol'] - flags = re.IGNORECASE | re.MULTILINE - - # Data Types: by PICTURE and USAGE - # Operators: **, *, +, -, /, <, >, <=, >=, =, <> - # Logical (?): NOT, AND, OR - - # Reserved words: - # http://opencobol.add1tocobol.com/#reserved-words - # Intrinsics: - # http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions - - tokens = { - 'root': [ - include('comment'), - include('strings'), - include('core'), - include('nums'), - (r'[a-z0-9]([_a-z0-9\-]*[a-z0-9]+)?', Name.Variable), - # (r'[\s]+', Text), - (r'[ \t]+', Text), - ], - 'comment': [ - (r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment), - ], - 'core': [ - # Figurative constants - (r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?' - r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)' - r'\s*($|(?=[^0-9a-z_\-]))', - Name.Constant), - - # Reserved words STATEMENTS and other bolds - (r'(^|(?<=[^0-9a-z_\-]))' - r'(ACCEPT|ADD|ALLOCATE|CALL|CANCEL|CLOSE|COMPUTE|' - r'CONFIGURATION|CONTINUE|' - r'DATA|DELETE|DISPLAY|DIVIDE|DIVISION|ELSE|END|END-ACCEPT|' - r'END-ADD|END-CALL|END-COMPUTE|END-DELETE|END-DISPLAY|' - r'END-DIVIDE|END-EVALUATE|END-IF|END-MULTIPLY|END-OF-PAGE|' - r'END-PERFORM|END-READ|END-RETURN|END-REWRITE|END-SEARCH|' - r'END-START|END-STRING|END-SUBTRACT|END-UNSTRING|END-WRITE|' - r'ENVIRONMENT|EVALUATE|EXIT|FD|FILE|FILE-CONTROL|FOREVER|' - r'FREE|GENERATE|GO|GOBACK|' - r'IDENTIFICATION|IF|INITIALIZE|' - r'INITIATE|INPUT-OUTPUT|INSPECT|INVOKE|I-O-CONTROL|LINKAGE|' - r'LOCAL-STORAGE|MERGE|MOVE|MULTIPLY|OPEN|' - r'PERFORM|PROCEDURE|PROGRAM-ID|RAISE|READ|RELEASE|RESUME|' - r'RETURN|REWRITE|SCREEN|' - r'SD|SEARCH|SECTION|SET|SORT|START|STOP|STRING|SUBTRACT|' - r'SUPPRESS|TERMINATE|THEN|UNLOCK|UNSTRING|USE|VALIDATE|' - r'WORKING-STORAGE|WRITE)' - r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Reserved), - - # Reserved words - (r'(^|(?<=[^0-9a-z_\-]))' - r'(ACCESS|ADDRESS|ADVANCING|AFTER|ALL|' - r'ALPHABET|ALPHABETIC|ALPHABETIC-LOWER|ALPHABETIC-UPPER|' - r'ALPHANUMERIC|ALPHANUMERIC-EDITED|ALSO|ALTER|ALTERNATE' - r'ANY|ARE|AREA|AREAS|ARGUMENT-NUMBER|ARGUMENT-VALUE|AS|' - r'ASCENDING|ASSIGN|AT|AUTO|AUTO-SKIP|AUTOMATIC|AUTOTERMINATE|' - r'BACKGROUND-COLOR|BASED|BEEP|BEFORE|BELL|' - r'BLANK|' - r'BLINK|BLOCK|BOTTOM|BY|BYTE-LENGTH|CHAINING|' - r'CHARACTER|CHARACTERS|CLASS|CODE|CODE-SET|COL|COLLATING|' - r'COLS|COLUMN|COLUMNS|COMMA|COMMAND-LINE|COMMIT|COMMON|' - r'CONSTANT|CONTAINS|CONTENT|CONTROL|' - r'CONTROLS|CONVERTING|COPY|CORR|CORRESPONDING|COUNT|CRT|' - r'CURRENCY|CURSOR|CYCLE|DATE|DAY|DAY-OF-WEEK|DE|DEBUGGING|' - r'DECIMAL-POINT|DECLARATIVES|DEFAULT|DELIMITED|' - r'DELIMITER|DEPENDING|DESCENDING|DETAIL|DISK|' - r'DOWN|DUPLICATES|DYNAMIC|EBCDIC|' - r'ENTRY|ENVIRONMENT-NAME|ENVIRONMENT-VALUE|EOL|EOP|' - r'EOS|ERASE|ERROR|ESCAPE|EXCEPTION|' - r'EXCLUSIVE|EXTEND|EXTERNAL|' - r'FILE-ID|FILLER|FINAL|FIRST|FIXED|FLOAT-LONG|FLOAT-SHORT|' - r'FOOTING|FOR|FOREGROUND-COLOR|FORMAT|FROM|FULL|FUNCTION|' - r'FUNCTION-ID|GIVING|GLOBAL|GROUP|' - r'HEADING|HIGHLIGHT|I-O|ID|' - r'IGNORE|IGNORING|IN|INDEX|INDEXED|INDICATE|' - r'INITIAL|INITIALIZED|INPUT|' - r'INTO|INTRINSIC|INVALID|IS|JUST|JUSTIFIED|KEY|LABEL|' - r'LAST|LEADING|LEFT|LENGTH|LIMIT|LIMITS|LINAGE|' - r'LINAGE-COUNTER|LINE|LINES|LOCALE|LOCK|' - r'LOWLIGHT|MANUAL|MEMORY|MINUS|MODE|' - r'MULTIPLE|NATIONAL|NATIONAL-EDITED|NATIVE|' - r'NEGATIVE|NEXT|NO|NULL|NULLS|NUMBER|NUMBERS|NUMERIC|' - r'NUMERIC-EDITED|OBJECT-COMPUTER|OCCURS|OF|OFF|OMITTED|ON|ONLY|' - r'OPTIONAL|ORDER|ORGANIZATION|OTHER|OUTPUT|OVERFLOW|' - r'OVERLINE|PACKED-DECIMAL|PADDING|PAGE|PARAGRAPH|' - r'PLUS|POINTER|POSITION|POSITIVE|PRESENT|PREVIOUS|' - r'PRINTER|PRINTING|PROCEDURE-POINTER|PROCEDURES|' - r'PROCEED|PROGRAM|PROGRAM-POINTER|PROMPT|QUOTE|' - r'QUOTES|RANDOM|RD|RECORD|RECORDING|RECORDS|RECURSIVE|' - r'REDEFINES|REEL|REFERENCE|RELATIVE|REMAINDER|REMOVAL|' - r'RENAMES|REPLACING|REPORT|REPORTING|REPORTS|REPOSITORY|' - r'REQUIRED|RESERVE|RETURNING|REVERSE-VIDEO|REWIND|' - r'RIGHT|ROLLBACK|ROUNDED|RUN|SAME|SCROLL|' - r'SECURE|SEGMENT-LIMIT|SELECT|SENTENCE|SEPARATE|' - r'SEQUENCE|SEQUENTIAL|SHARING|SIGN|SIGNED|SIGNED-INT|' - r'SIGNED-LONG|SIGNED-SHORT|SIZE|SORT-MERGE|SOURCE|' - r'SOURCE-COMPUTER|SPECIAL-NAMES|STANDARD|' - r'STANDARD-1|STANDARD-2|STATUS|SUM|' - r'SYMBOLIC|SYNC|SYNCHRONIZED|TALLYING|TAPE|' - r'TEST|THROUGH|THRU|TIME|TIMES|TO|TOP|TRAILING|' - r'TRANSFORM|TYPE|UNDERLINE|UNIT|UNSIGNED|' - r'UNSIGNED-INT|UNSIGNED-LONG|UNSIGNED-SHORT|UNTIL|UP|' - r'UPDATE|UPON|USAGE|USING|VALUE|VALUES|VARYING|WAIT|WHEN|' - r'WITH|WORDS|YYYYDDD|YYYYMMDD)' - r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Pseudo), - - # inactive reserved words - (r'(^|(?<=[^0-9a-z_\-]))' - r'(ACTIVE-CLASS|ALIGNED|ANYCASE|ARITHMETIC|ATTRIBUTE|B-AND|' - r'B-NOT|B-OR|B-XOR|BIT|BOOLEAN|CD|CENTER|CF|CH|CHAIN|CLASS-ID|' - r'CLASSIFICATION|COMMUNICATION|CONDITION|DATA-POINTER|' - r'DESTINATION|DISABLE|EC|EGI|EMI|ENABLE|END-RECEIVE|' - r'ENTRY-CONVENTION|EO|ESI|EXCEPTION-OBJECT|EXPANDS|FACTORY|' - r'FLOAT-BINARY-16|FLOAT-BINARY-34|FLOAT-BINARY-7|' - r'FLOAT-DECIMAL-16|FLOAT-DECIMAL-34|FLOAT-EXTENDED|FORMAT|' - r'FUNCTION-POINTER|GET|GROUP-USAGE|IMPLEMENTS|INFINITY|' - r'INHERITS|INTERFACE|INTERFACE-ID|INVOKE|LC_ALL|LC_COLLATE|' - r'LC_CTYPE|LC_MESSAGES|LC_MONETARY|LC_NUMERIC|LC_TIME|' - r'LINE-COUNTER|MESSAGE|METHOD|METHOD-ID|NESTED|NONE|NORMAL|' - r'OBJECT|OBJECT-REFERENCE|OPTIONS|OVERRIDE|PAGE-COUNTER|PF|PH|' - r'PROPERTY|PROTOTYPE|PURGE|QUEUE|RAISE|RAISING|RECEIVE|' - r'RELATION|REPLACE|REPRESENTS-NOT-A-NUMBER|RESET|RESUME|RETRY|' - r'RF|RH|SECONDS|SEGMENT|SELF|SEND|SOURCES|STATEMENT|STEP|' - r'STRONG|SUB-QUEUE-1|SUB-QUEUE-2|SUB-QUEUE-3|SUPER|SYMBOL|' - r'SYSTEM-DEFAULT|TABLE|TERMINAL|TEXT|TYPEDEF|UCS-4|UNIVERSAL|' - r'USER-DEFAULT|UTF-16|UTF-8|VAL-STATUS|VALID|VALIDATE|' - r'VALIDATE-STATUS)\s*($|(?=[^0-9a-z_\-]))', Error), - - # Data Types - (r'(^|(?<=[^0-9a-z_\-]))' - r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|' - r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|' - r'BINARY-C-LONG|' - r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|' - r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type), - - # Operators - (r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator), - - # (r'(::)', Keyword.Declaration), - - (r'([(),;:&%.])', Punctuation), - - # Intrinsics - (r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|' - r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|' - r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|' - r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|' - r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|' - r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG10|LOG|' - r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|' - r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|' - r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|' - r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|' - r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|' - r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*' - r'($|(?=[^0-9a-z_\-]))', Name.Function), - - # Booleans - (r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin), - # Comparing Operators - (r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|' - r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word), - ], - - # \"[^\"\n]*\"|\'[^\'\n]*\' - 'strings': [ - # apparently strings can be delimited by EOL if they are continued - # in the next line - (r'"[^"\n]*("|\n)', String.Double), - (r"'[^'\n]*('|\n)", String.Single), - ], - - 'nums': [ - (r'\d+(\s*|\.$|$)', Number.Integer), - (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float), - (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float), - ], - } - - -class CobolFreeformatLexer(CobolLexer): - """ - Lexer for Free format OpenCOBOL code. - - *New in Pygments 1.6.* - """ - name = 'COBOLFree' - aliases = ['cobolfree'] - filenames = ['*.cbl', '*.CBL'] - mimetypes = [] - flags = re.IGNORECASE | re.MULTILINE - - tokens = { - 'comment': [ - (r'(\*>.*\n|^\w*\*.*$)', Comment), - ], - } - - -class LogosLexer(ObjectiveCppLexer): - """ - For Logos + Objective-C source code with preprocessor directives. - - *New in Pygments 1.6.* - """ - - name = 'Logos' - aliases = ['logos'] - filenames = ['*.x', '*.xi', '*.xm', '*.xmi'] - mimetypes = ['text/x-logos'] - priority = 0.25 - - tokens = { - 'statements': [ - (r'(%orig|%log)\b', Keyword), - (r'(%c)\b(\()(\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\))', - bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), - (r'(%init)\b(\()', - bygroups(Keyword, Punctuation), 'logos_init_directive'), - (r'(%init)(?=\s*;)', bygroups(Keyword)), - (r'(%hook|%group)(\s+)([a-zA-Z$_][a-zA-Z0-9$_]+)', - bygroups(Keyword, Text, Name.Class), '#pop'), - (r'(%subclass)(\s+)', bygroups(Keyword, Text), - ('#pop', 'logos_classname')), - inherit, - ], - 'logos_init_directive' : [ - ('\s+', Text), - (',', Punctuation, ('logos_init_directive', '#pop')), - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(=)(\s*)([^);]*)', - bygroups(Name.Class, Text, Punctuation, Text, Text)), - ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class), - ('\)', Punctuation, '#pop'), - ], - 'logos_classname' : [ - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?', - bygroups(Name.Class, Text, Name.Class), '#pop'), - ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop') - ], - 'root': [ - (r'(%subclass)(\s+)', bygroups(Keyword, Text), - 'logos_classname'), - (r'(%hook|%group)(\s+)([a-zA-Z$_][a-zA-Z0-9$_]+)', - bygroups(Keyword, Text, Name.Class)), - (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', - bygroups(Keyword, Text, Name.Variable, Text, String, Text)), - (r'(%ctor)(\s*)({)', bygroups(Keyword, Text, Punctuation), - 'function'), - (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', - bygroups(Keyword, Text, Keyword, String, Keyword)), - (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), - inherit, - ], - } - - _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()') - - def analyse_text(text): - if LogosLexer._logos_keywords.search(text): - return 1.0 - return 0 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/functional.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/functional.py deleted file mode 100644 index 770e6bd..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/functional.py +++ /dev/null @@ -1,2731 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.functional - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for functional languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions -from pygments.token import Text, Comment, Operator, Keyword, Name, \ - String, Number, Punctuation, Literal, Generic, Error - -__all__ = ['RacketLexer', 'SchemeLexer', 'CommonLispLexer', 'HaskellLexer', - 'AgdaLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer', - 'SMLLexer', 'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer', - 'OpaLexer', 'CoqLexer', 'NewLispLexer', 'ElixirLexer', - 'ElixirConsoleLexer', 'KokaLexer'] - - -line_re = re.compile('.*?\n') - - -class RacketLexer(RegexLexer): - """ - Lexer for `Racket `_ source code (formerly known as - PLT Scheme). - - *New in Pygments 1.6.* - """ - - name = 'Racket' - aliases = ['racket', 'rkt'] - filenames = ['*.rkt', '*.rktl'] - mimetypes = ['text/x-racket', 'application/x-racket'] - - # From namespace-mapped-symbols - keywords = [ - '#%app', '#%datum', '#%expression', '#%module-begin', - '#%plain-app', '#%plain-lambda', '#%plain-module-begin', - '#%provide', '#%require', '#%stratified-body', '#%top', - '#%top-interaction', '#%variable-reference', '...', 'and', 'begin', - 'begin-for-syntax', 'begin0', 'case', 'case-lambda', 'cond', - 'datum->syntax-object', 'define', 'define-for-syntax', - 'define-struct', 'define-syntax', 'define-syntax-rule', - 'define-syntaxes', 'define-values', 'define-values-for-syntax', - 'delay', 'do', 'expand-path', 'fluid-let', 'hash-table-copy', - 'hash-table-count', 'hash-table-for-each', 'hash-table-get', - 'hash-table-iterate-first', 'hash-table-iterate-key', - 'hash-table-iterate-next', 'hash-table-iterate-value', - 'hash-table-map', 'hash-table-put!', 'hash-table-remove!', - 'hash-table?', 'if', 'lambda', 'let', 'let*', 'let*-values', - 'let-struct', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc', - 'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes', - 'letrec-syntaxes+values', 'letrec-values', 'list-immutable', - 'make-hash-table', 'make-immutable-hash-table', 'make-namespace', - 'module', 'module-identifier=?', 'module-label-identifier=?', - 'module-template-identifier=?', 'module-transformer-identifier=?', - 'namespace-transformer-require', 'or', 'parameterize', - 'parameterize*', 'parameterize-break', 'provide', - 'provide-for-label', 'provide-for-syntax', 'quasiquote', - 'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax', - 'quote-syntax/prune', 'require', 'require-for-label', - 'require-for-syntax', 'require-for-template', 'set!', - 'set!-values', 'syntax', 'syntax-case', 'syntax-case*', - 'syntax-id-rules', 'syntax-object->datum', 'syntax-rules', - 'syntax/loc', 'time', 'transcript-off', 'transcript-on', 'unless', - 'unquote', 'unquote-splicing', 'unsyntax', 'unsyntax-splicing', - 'when', 'with-continuation-mark', 'with-handlers', - 'with-handlers*', 'with-syntax', 'λ' - ] - - # From namespace-mapped-symbols - builtins = [ - '*', '+', '-', '/', '<', '<=', '=', '>', '>=', - 'abort-current-continuation', 'abs', 'absolute-path?', 'acos', - 'add1', 'alarm-evt', 'always-evt', 'andmap', 'angle', 'append', - 'apply', 'arithmetic-shift', 'arity-at-least', - 'arity-at-least-value', 'arity-at-least?', 'asin', 'assoc', 'assq', - 'assv', 'atan', 'banner', 'bitwise-and', 'bitwise-bit-field', - 'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor', - 'boolean?', 'bound-identifier=?', 'box', 'box-immutable', 'box?', - 'break-enabled', 'break-thread', 'build-path', - 'build-path/convention-type', 'byte-pregexp', 'byte-pregexp?', - 'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes', - 'bytes->immutable-bytes', 'bytes->list', 'bytes->path', - 'bytes->path-element', 'bytes->string/latin-1', - 'bytes->string/locale', 'bytes->string/utf-8', 'bytes-append', - 'bytes-close-converter', 'bytes-convert', 'bytes-convert-end', - 'bytes-converter?', 'bytes-copy', 'bytes-copy!', 'bytes-fill!', - 'bytes-length', 'bytes-open-converter', 'bytes-ref', 'bytes-set!', - 'bytes-utf-8-index', 'bytes-utf-8-length', 'bytes-utf-8-ref', - 'bytes?', 'bytes?', 'caaaar', 'caaadr', - 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', - 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', - 'call-in-nested-thread', 'call-with-break-parameterization', - 'call-with-composable-continuation', - 'call-with-continuation-barrier', 'call-with-continuation-prompt', - 'call-with-current-continuation', 'call-with-escape-continuation', - 'call-with-exception-handler', - 'call-with-immediate-continuation-mark', 'call-with-input-file', - 'call-with-output-file', 'call-with-parameterization', - 'call-with-semaphore', 'call-with-semaphore/enable-break', - 'call-with-values', 'call/cc', 'call/ec', 'car', 'cdaaar', - 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', - 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', - 'ceiling', 'channel-get', 'channel-put', 'channel-put-evt', - 'channel-try-get', 'channel?', 'chaperone-box', 'chaperone-evt', - 'chaperone-hash', 'chaperone-of?', 'chaperone-procedure', - 'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector', - 'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?', - 'char-ci<=?', 'char-ci=?', 'char-ci>?', - 'char-downcase', 'char-foldcase', 'char-general-category', - 'char-graphic?', 'char-iso-control?', 'char-lower-case?', - 'char-numeric?', 'char-punctuation?', 'char-ready?', - 'char-symbolic?', 'char-title-case?', 'char-titlecase', - 'char-upcase', 'char-upper-case?', 'char-utf-8-length', - 'char-whitespace?', 'char<=?', 'char=?', - 'char>?', 'char?', 'check-duplicate-identifier', - 'checked-procedure-check-and-extract', 'choice-evt', - 'cleanse-path', 'close-input-port', 'close-output-port', - 'collect-garbage', 'collection-file-path', 'collection-path', - 'compile', 'compile-allow-set!-undefined', - 'compile-context-preservation-enabled', - 'compile-enforce-module-constants', 'compile-syntax', - 'compiled-expression?', 'compiled-module-expression?', - 'complete-path?', 'complex?', 'cons', - 'continuation-mark-set->context', 'continuation-mark-set->list', - 'continuation-mark-set->list*', 'continuation-mark-set-first', - 'continuation-mark-set?', 'continuation-marks', - 'continuation-prompt-available?', 'continuation-prompt-tag?', - 'continuation?', 'copy-file', 'cos', - 'current-break-parameterization', 'current-code-inspector', - 'current-command-line-arguments', 'current-compile', - 'current-continuation-marks', 'current-custodian', - 'current-directory', 'current-drive', 'current-error-port', - 'current-eval', 'current-evt-pseudo-random-generator', - 'current-gc-milliseconds', 'current-get-interaction-input-port', - 'current-inexact-milliseconds', 'current-input-port', - 'current-inspector', 'current-library-collection-paths', - 'current-load', 'current-load-extension', - 'current-load-relative-directory', 'current-load/use-compiled', - 'current-locale', 'current-memory-use', 'current-milliseconds', - 'current-module-declare-name', 'current-module-declare-source', - 'current-module-name-resolver', 'current-namespace', - 'current-output-port', 'current-parameterization', - 'current-preserved-thread-cell-values', 'current-print', - 'current-process-milliseconds', 'current-prompt-read', - 'current-pseudo-random-generator', 'current-read-interaction', - 'current-reader-guard', 'current-readtable', 'current-seconds', - 'current-security-guard', 'current-subprocess-custodian-mode', - 'current-thread', 'current-thread-group', - 'current-thread-initial-stack-size', - 'current-write-relative-directory', 'custodian-box-value', - 'custodian-box?', 'custodian-limit-memory', - 'custodian-managed-list', 'custodian-memory-accounting-available?', - 'custodian-require-memory', 'custodian-shutdown-all', 'custodian?', - 'custom-print-quotable-accessor', 'custom-print-quotable?', - 'custom-write-accessor', 'custom-write?', 'date', 'date*', - 'date*-nanosecond', 'date*-time-zone-name', 'date*?', 'date-day', - 'date-dst?', 'date-hour', 'date-minute', 'date-month', - 'date-second', 'date-time-zone-offset', 'date-week-day', - 'date-year', 'date-year-day', 'date?', 'datum-intern-literal', - 'default-continuation-prompt-tag', 'delete-directory', - 'delete-file', 'denominator', 'directory-exists?', - 'directory-list', 'display', 'displayln', 'dump-memory-stats', - 'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-wind', - 'eof', 'eof-object?', 'ephemeron-value', 'ephemeron?', 'eprintf', - 'eq-hash-code', 'eq?', 'equal-hash-code', - 'equal-secondary-hash-code', 'equal?', 'equal?/recur', - 'eqv-hash-code', 'eqv?', 'error', 'error-display-handler', - 'error-escape-handler', 'error-print-context-length', - 'error-print-source-location', 'error-print-width', - 'error-value->string-handler', 'eval', 'eval-jit-enabled', - 'eval-syntax', 'even?', 'evt?', 'exact->inexact', 'exact-integer?', - 'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact?', - 'executable-yield-handler', 'exit', 'exit-handler', 'exn', - 'exn-continuation-marks', 'exn-message', 'exn:break', - 'exn:break-continuation', 'exn:break?', 'exn:fail', - 'exn:fail:contract', 'exn:fail:contract:arity', - 'exn:fail:contract:arity?', 'exn:fail:contract:continuation', - 'exn:fail:contract:continuation?', - 'exn:fail:contract:divide-by-zero', - 'exn:fail:contract:divide-by-zero?', - 'exn:fail:contract:non-fixnum-result', - 'exn:fail:contract:non-fixnum-result?', - 'exn:fail:contract:variable', 'exn:fail:contract:variable-id', - 'exn:fail:contract:variable?', 'exn:fail:contract?', - 'exn:fail:filesystem', 'exn:fail:filesystem:exists', - 'exn:fail:filesystem:exists?', 'exn:fail:filesystem:version', - 'exn:fail:filesystem:version?', 'exn:fail:filesystem?', - 'exn:fail:network', 'exn:fail:network?', 'exn:fail:out-of-memory', - 'exn:fail:out-of-memory?', 'exn:fail:read', - 'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?', - 'exn:fail:read:non-char', 'exn:fail:read:non-char?', - 'exn:fail:read?', 'exn:fail:syntax', 'exn:fail:syntax-exprs', - 'exn:fail:syntax:unbound', 'exn:fail:syntax:unbound?', - 'exn:fail:syntax?', 'exn:fail:unsupported', - 'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?', - 'exn:fail?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?', 'exp', - 'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once', - 'expand-syntax-to-top-form', 'expand-to-top-form', - 'expand-user-path', 'expt', 'file-exists?', - 'file-or-directory-identity', 'file-or-directory-modify-seconds', - 'file-or-directory-permissions', 'file-position', 'file-size', - 'file-stream-buffer-mode', 'file-stream-port?', - 'filesystem-root-list', 'find-executable-path', - 'find-library-collection-paths', 'find-system-path', 'fixnum?', - 'floating-point-bytes->real', 'flonum?', 'floor', 'flush-output', - 'for-each', 'force', 'format', 'fprintf', 'free-identifier=?', - 'gcd', 'generate-temporaries', 'gensym', 'get-output-bytes', - 'get-output-string', 'getenv', 'global-port-print-handler', - 'guard-evt', 'handle-evt', 'handle-evt?', 'hash', 'hash-equal?', - 'hash-eqv?', 'hash-has-key?', 'hash-placeholder?', 'hash-ref!', - 'hasheq', 'hasheqv', 'identifier-binding', - 'identifier-label-binding', 'identifier-prune-lexical-context', - 'identifier-prune-to-source-module', - 'identifier-remove-from-definition-context', - 'identifier-template-binding', 'identifier-transformer-binding', - 'identifier?', 'imag-part', 'immutable?', 'impersonate-box', - 'impersonate-hash', 'impersonate-procedure', 'impersonate-struct', - 'impersonate-vector', 'impersonator-of?', - 'impersonator-prop:application-mark', - 'impersonator-property-accessor-procedure?', - 'impersonator-property?', 'impersonator?', 'inexact->exact', - 'inexact-real?', 'inexact?', 'input-port?', 'inspector?', - 'integer->char', 'integer->integer-bytes', - 'integer-bytes->integer', 'integer-length', 'integer-sqrt', - 'integer-sqrt/remainder', 'integer?', - 'internal-definition-context-seal', 'internal-definition-context?', - 'keyword->string', 'keywordbytes', 'list->string', 'list->vector', 'list-ref', - 'list-tail', 'list?', 'load', 'load-extension', - 'load-on-demand-enabled', 'load-relative', - 'load-relative-extension', 'load/cd', 'load/use-compiled', - 'local-expand', 'local-expand/capture-lifts', - 'local-transformer-expand', - 'local-transformer-expand/capture-lifts', 'locale-string-encoding', - 'log', 'magnitude', 'make-arity-at-least', 'make-bytes', - 'make-channel', 'make-continuation-prompt-tag', 'make-custodian', - 'make-custodian-box', 'make-date', 'make-date*', - 'make-derived-parameter', 'make-directory', 'make-ephemeron', - 'make-exn', 'make-exn:break', 'make-exn:fail', - 'make-exn:fail:contract', 'make-exn:fail:contract:arity', - 'make-exn:fail:contract:continuation', - 'make-exn:fail:contract:divide-by-zero', - 'make-exn:fail:contract:non-fixnum-result', - 'make-exn:fail:contract:variable', 'make-exn:fail:filesystem', - 'make-exn:fail:filesystem:exists', - 'make-exn:fail:filesystem:version', 'make-exn:fail:network', - 'make-exn:fail:out-of-memory', 'make-exn:fail:read', - 'make-exn:fail:read:eof', 'make-exn:fail:read:non-char', - 'make-exn:fail:syntax', 'make-exn:fail:syntax:unbound', - 'make-exn:fail:unsupported', 'make-exn:fail:user', - 'make-file-or-directory-link', 'make-hash-placeholder', - 'make-hasheq-placeholder', 'make-hasheqv', - 'make-hasheqv-placeholder', 'make-immutable-hasheqv', - 'make-impersonator-property', 'make-input-port', 'make-inspector', - 'make-known-char-range-list', 'make-output-port', 'make-parameter', - 'make-pipe', 'make-placeholder', 'make-polar', - 'make-prefab-struct', 'make-pseudo-random-generator', - 'make-reader-graph', 'make-readtable', 'make-rectangular', - 'make-rename-transformer', 'make-resolved-module-path', - 'make-security-guard', 'make-semaphore', 'make-set!-transformer', - 'make-shared-bytes', 'make-sibling-inspector', - 'make-special-comment', 'make-srcloc', 'make-string', - 'make-struct-field-accessor', 'make-struct-field-mutator', - 'make-struct-type', 'make-struct-type-property', - 'make-syntax-delta-introducer', 'make-syntax-introducer', - 'make-thread-cell', 'make-thread-group', 'make-vector', - 'make-weak-box', 'make-weak-hasheqv', 'make-will-executor', 'map', - 'max', 'mcar', 'mcdr', 'mcons', 'member', 'memq', 'memv', 'min', - 'module->exports', 'module->imports', 'module->language-info', - 'module->namespace', 'module-compiled-exports', - 'module-compiled-imports', 'module-compiled-language-info', - 'module-compiled-name', 'module-path-index-join', - 'module-path-index-resolve', 'module-path-index-split', - 'module-path-index?', 'module-path?', 'module-predefined?', - 'module-provide-protected?', 'modulo', 'mpair?', 'nack-guard-evt', - 'namespace-attach-module', 'namespace-attach-module-declaration', - 'namespace-base-phase', 'namespace-mapped-symbols', - 'namespace-module-identifier', 'namespace-module-registry', - 'namespace-require', 'namespace-require/constant', - 'namespace-require/copy', 'namespace-require/expansion-time', - 'namespace-set-variable-value!', 'namespace-symbol->identifier', - 'namespace-syntax-introduce', 'namespace-undefine-variable!', - 'namespace-unprotect-module', 'namespace-variable-value', - 'namespace?', 'negative?', 'never-evt', 'newline', - 'normal-case-path', 'not', 'null', 'null?', 'number->string', - 'number?', 'numerator', 'object-name', 'odd?', 'open-input-bytes', - 'open-input-file', 'open-input-output-file', 'open-input-string', - 'open-output-bytes', 'open-output-file', 'open-output-string', - 'ormap', 'output-port?', 'pair?', 'parameter-procedure=?', - 'parameter?', 'parameterization?', 'path->bytes', - 'path->complete-path', 'path->directory-path', 'path->string', - 'path-add-suffix', 'path-convention-type', 'path-element->bytes', - 'path-element->string', 'path-for-some-system?', - 'path-list-string->path-list', 'path-replace-suffix', - 'path-string?', 'path?', 'peek-byte', 'peek-byte-or-special', - 'peek-bytes', 'peek-bytes!', 'peek-bytes-avail!', - 'peek-bytes-avail!*', 'peek-bytes-avail!/enable-break', - 'peek-char', 'peek-char-or-special', 'peek-string', 'peek-string!', - 'pipe-content-length', 'placeholder-get', 'placeholder-set!', - 'placeholder?', 'poll-guard-evt', 'port-closed-evt', - 'port-closed?', 'port-commit-peeked', 'port-count-lines!', - 'port-count-lines-enabled', 'port-display-handler', - 'port-file-identity', 'port-file-unlock', 'port-next-location', - 'port-print-handler', 'port-progress-evt', - 'port-provides-progress-evts?', 'port-read-handler', - 'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?', - 'port-writes-special?', 'port?', 'positive?', - 'prefab-key->struct-type', 'prefab-struct-key', 'pregexp', - 'pregexp?', 'primitive-closure?', 'primitive-result-arity', - 'primitive?', 'print', 'print-as-expression', - 'print-boolean-long-form', 'print-box', 'print-graph', - 'print-hash-table', 'print-mpair-curly-braces', - 'print-pair-curly-braces', 'print-reader-abbreviations', - 'print-struct', 'print-syntax-width', 'print-unreadable', - 'print-vector-length', 'printf', 'procedure->method', - 'procedure-arity', 'procedure-arity-includes?', 'procedure-arity?', - 'procedure-closure-contents-eq?', 'procedure-extract-target', - 'procedure-reduce-arity', 'procedure-rename', - 'procedure-struct-type?', 'procedure?', 'promise?', - 'prop:arity-string', 'prop:checked-procedure', - 'prop:custom-print-quotable', 'prop:custom-write', - 'prop:equal+hash', 'prop:evt', 'prop:exn:srclocs', - 'prop:impersonator-of', 'prop:input-port', - 'prop:liberal-define-context', 'prop:output-port', - 'prop:procedure', 'prop:rename-transformer', - 'prop:set!-transformer', 'pseudo-random-generator->vector', - 'pseudo-random-generator-vector?', 'pseudo-random-generator?', - 'putenv', 'quotient', 'quotient/remainder', 'raise', - 'raise-arity-error', 'raise-mismatch-error', 'raise-syntax-error', - 'raise-type-error', 'raise-user-error', 'random', 'random-seed', - 'rational?', 'rationalize', 'read', 'read-accept-bar-quote', - 'read-accept-box', 'read-accept-compiled', 'read-accept-dot', - 'read-accept-graph', 'read-accept-infix-dot', 'read-accept-lang', - 'read-accept-quasiquote', 'read-accept-reader', 'read-byte', - 'read-byte-or-special', 'read-bytes', 'read-bytes!', - 'read-bytes-avail!', 'read-bytes-avail!*', - 'read-bytes-avail!/enable-break', 'read-bytes-line', - 'read-case-sensitive', 'read-char', 'read-char-or-special', - 'read-curly-brace-as-paren', 'read-decimal-as-inexact', - 'read-eval-print-loop', 'read-language', 'read-line', - 'read-on-demand-source', 'read-square-bracket-as-paren', - 'read-string', 'read-string!', 'read-syntax', - 'read-syntax/recursive', 'read/recursive', 'readtable-mapping', - 'readtable?', 'real->double-flonum', 'real->floating-point-bytes', - 'real->single-flonum', 'real-part', 'real?', 'regexp', - 'regexp-match', 'regexp-match-peek', 'regexp-match-peek-immediate', - 'regexp-match-peek-positions', - 'regexp-match-peek-positions-immediate', - 'regexp-match-peek-positions-immediate/end', - 'regexp-match-peek-positions/end', 'regexp-match-positions', - 'regexp-match-positions/end', 'regexp-match/end', 'regexp-match?', - 'regexp-max-lookbehind', 'regexp-replace', 'regexp-replace*', - 'regexp?', 'relative-path?', 'remainder', - 'rename-file-or-directory', 'rename-transformer-target', - 'rename-transformer?', 'resolve-path', 'resolved-module-path-name', - 'resolved-module-path?', 'reverse', 'round', 'seconds->date', - 'security-guard?', 'semaphore-peek-evt', 'semaphore-post', - 'semaphore-try-wait?', 'semaphore-wait', - 'semaphore-wait/enable-break', 'semaphore?', - 'set!-transformer-procedure', 'set!-transformer?', 'set-box!', - 'set-mcar!', 'set-mcdr!', 'set-port-next-location!', - 'shared-bytes', 'shell-execute', 'simplify-path', 'sin', - 'single-flonum?', 'sleep', 'special-comment-value', - 'special-comment?', 'split-path', 'sqrt', 'srcloc', - 'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source', - 'srcloc-span', 'srcloc?', 'string', 'string->bytes/latin-1', - 'string->bytes/locale', 'string->bytes/utf-8', - 'string->immutable-string', 'string->keyword', 'string->list', - 'string->number', 'string->path', 'string->path-element', - 'string->symbol', 'string->uninterned-symbol', - 'string->unreadable-symbol', 'string-append', 'string-ci<=?', - 'string-ci=?', 'string-ci>?', - 'string-copy', 'string-copy!', 'string-downcase', 'string-fill!', - 'string-foldcase', 'string-length', 'string-locale-ci?', - 'string-locale-downcase', 'string-locale-upcase', - 'string-locale?', - 'string-normalize-nfc', 'string-normalize-nfd', - 'string-normalize-nfkc', 'string-normalize-nfkd', 'string-ref', - 'string-set!', 'string-titlecase', 'string-upcase', - 'string-utf-8-length', 'string<=?', 'string=?', 'string>?', 'string?', 'struct->vector', - 'struct-accessor-procedure?', 'struct-constructor-procedure?', - 'struct-info', 'struct-mutator-procedure?', - 'struct-predicate-procedure?', 'struct-type-info', - 'struct-type-make-constructor', 'struct-type-make-predicate', - 'struct-type-property-accessor-procedure?', - 'struct-type-property?', 'struct-type?', 'struct:arity-at-least', - 'struct:date', 'struct:date*', 'struct:exn', 'struct:exn:break', - 'struct:exn:fail', 'struct:exn:fail:contract', - 'struct:exn:fail:contract:arity', - 'struct:exn:fail:contract:continuation', - 'struct:exn:fail:contract:divide-by-zero', - 'struct:exn:fail:contract:non-fixnum-result', - 'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem', - 'struct:exn:fail:filesystem:exists', - 'struct:exn:fail:filesystem:version', 'struct:exn:fail:network', - 'struct:exn:fail:out-of-memory', 'struct:exn:fail:read', - 'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char', - 'struct:exn:fail:syntax', 'struct:exn:fail:syntax:unbound', - 'struct:exn:fail:unsupported', 'struct:exn:fail:user', - 'struct:srcloc', 'struct?', 'sub1', 'subbytes', 'subprocess', - 'subprocess-group-enabled', 'subprocess-kill', 'subprocess-pid', - 'subprocess-status', 'subprocess-wait', 'subprocess?', 'substring', - 'symbol->string', 'symbol-interned?', 'symbol-unreadable?', - 'symbol?', 'sync', 'sync/enable-break', 'sync/timeout', - 'sync/timeout/enable-break', 'syntax->list', 'syntax-arm', - 'syntax-column', 'syntax-disarm', 'syntax-e', 'syntax-line', - 'syntax-local-bind-syntaxes', 'syntax-local-certifier', - 'syntax-local-context', 'syntax-local-expand-expression', - 'syntax-local-get-shadower', 'syntax-local-introduce', - 'syntax-local-lift-context', 'syntax-local-lift-expression', - 'syntax-local-lift-module-end-declaration', - 'syntax-local-lift-provide', 'syntax-local-lift-require', - 'syntax-local-lift-values-expression', - 'syntax-local-make-definition-context', - 'syntax-local-make-delta-introducer', - 'syntax-local-module-defined-identifiers', - 'syntax-local-module-exports', - 'syntax-local-module-required-identifiers', 'syntax-local-name', - 'syntax-local-phase-level', - 'syntax-local-transforming-module-provides?', 'syntax-local-value', - 'syntax-local-value/immediate', 'syntax-original?', - 'syntax-position', 'syntax-property', - 'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm', - 'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source', - 'syntax-source-module', 'syntax-span', 'syntax-taint', - 'syntax-tainted?', 'syntax-track-origin', - 'syntax-transforming-module-expression?', 'syntax-transforming?', - 'syntax?', 'system-big-endian?', 'system-idle-evt', - 'system-language+country', 'system-library-subpath', - 'system-path-convention-type', 'system-type', 'tan', - 'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt', - 'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses', - 'tcp-close', 'tcp-connect', 'tcp-connect/enable-break', - 'tcp-listen', 'tcp-listener?', 'tcp-port?', 'terminal-port?', - 'thread', 'thread-cell-ref', 'thread-cell-set!', 'thread-cell?', - 'thread-dead-evt', 'thread-dead?', 'thread-group?', - 'thread-resume', 'thread-resume-evt', 'thread-rewind-receive', - 'thread-running?', 'thread-suspend', 'thread-suspend-evt', - 'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply', - 'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?', - 'udp-close', 'udp-connect!', 'udp-connected?', 'udp-open-socket', - 'udp-receive!', 'udp-receive!*', 'udp-receive!-evt', - 'udp-receive!/enable-break', 'udp-receive-ready-evt', 'udp-send', - 'udp-send*', 'udp-send-evt', 'udp-send-ready-evt', 'udp-send-to', - 'udp-send-to*', 'udp-send-to-evt', 'udp-send-to/enable-break', - 'udp-send/enable-break', 'udp?', 'unbox', - 'uncaught-exception-handler', 'use-collection-link-paths', - 'use-compiled-file-paths', 'use-user-specific-search-paths', - 'values', 'variable-reference->empty-namespace', - 'variable-reference->module-base-phase', - 'variable-reference->module-declaration-inspector', - 'variable-reference->module-source', - 'variable-reference->namespace', 'variable-reference->phase', - 'variable-reference->resolved-module-path', - 'variable-reference-constant?', 'variable-reference?', 'vector', - 'vector->immutable-vector', 'vector->list', - 'vector->pseudo-random-generator', - 'vector->pseudo-random-generator!', 'vector->values', - 'vector-fill!', 'vector-immutable', 'vector-length', 'vector-ref', - 'vector-set!', 'vector-set-performance-stats!', 'vector?', - 'version', 'void', 'void?', 'weak-box-value', 'weak-box?', - 'will-execute', 'will-executor?', 'will-register', - 'will-try-execute', 'with-input-from-file', 'with-output-to-file', - 'wrap-evt', 'write', 'write-byte', 'write-bytes', - 'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt', - 'write-bytes-avail/enable-break', 'write-char', 'write-special', - 'write-special-avail*', 'write-special-evt', 'write-string', 'zero?' - ] - - # From SchemeLexer - valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+' - - tokens = { - 'root' : [ - (r';.*$', Comment.Single), - (r'#\|[^|]+\|#', Comment.Multiline), - - # whitespaces - usually not relevant - (r'\s+', Text), - - ## numbers: Keep in mind Racket reader hash prefixes, - ## which can denote the base or the type. These don't map - ## neatly onto pygments token types; some judgment calls - ## here. Note that none of these regexps attempt to - ## exclude identifiers that start with a number, such as a - ## variable named "100-Continue". - - # #b - (r'#b[-+]?[01]+\.[01]+', Number.Float), - (r'#b[01]+e[-+]?[01]+', Number.Float), - (r'#b[-+]?[01]/[01]+', Number), - (r'#b[-+]?[01]+', Number.Integer), - (r'#b\S*', Error), - - # #d OR no hash prefix - (r'(#d)?[-+]?\d+\.\d+', Number.Float), - (r'(#d)?\d+e[-+]?\d+', Number.Float), - (r'(#d)?[-+]?\d+/\d+', Number), - (r'(#d)?[-+]?\d+', Number.Integer), - (r'#d\S*', Error), - - # #e - (r'#e[-+]?\d+\.\d+', Number.Float), - (r'#e\d+e[-+]?\d+', Number.Float), - (r'#e[-+]?\d+/\d+', Number), - (r'#e[-+]?\d+', Number), - (r'#e\S*', Error), - - # #i is always inexact-real, i.e. float - (r'#i[-+]?\d+\.\d+', Number.Float), - (r'#i\d+e[-+]?\d+', Number.Float), - (r'#i[-+]?\d+/\d+', Number.Float), - (r'#i[-+]?\d+', Number.Float), - (r'#i\S*', Error), - - # #o - (r'#o[-+]?[0-7]+\.[0-7]+', Number.Oct), - (r'#o[0-7]+e[-+]?[0-7]+', Number.Oct), - (r'#o[-+]?[0-7]+/[0-7]+', Number.Oct), - (r'#o[-+]?[0-7]+', Number.Oct), - (r'#o\S*', Error), - - # #x - (r'#x[-+]?[0-9a-fA-F]+\.[0-9a-fA-F]+', Number.Hex), - # the exponent variation (e.g. #x1e1) is N/A - (r'#x[-+]?[0-9a-fA-F]+/[0-9a-fA-F]+', Number.Hex), - (r'#x[-+]?[0-9a-fA-F]+', Number.Hex), - (r'#x\S*', Error), - - - # strings, symbols and characters - (r'"(\\\\|\\"|[^"])*"', String), - (r"'" + valid_name, String.Symbol), - (r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char), - (r'#rx".+"', String.Regex), - (r'#px".+"', String.Regex), - - # constants - (r'(#t|#f)', Name.Constant), - - # keyword argument names (e.g. #:keyword) - (r'#:\S+', Keyword.Declaration), - - # #lang - (r'#lang \S+', Keyword.Namespace), - - # special operators - (r"('|#|`|,@|,|\.)", Operator), - - # highlight the keywords - ('(%s)' % '|'.join([ - re.escape(entry) + ' ' for entry in keywords]), - Keyword - ), - - # first variable in a quoted string like - # '(this is syntactic sugar) - (r"(?<='\()" + valid_name, Name.Variable), - (r"(?<=#\()" + valid_name, Name.Variable), - - # highlight the builtins - ("(?<=\()(%s)" % '|'.join([ - re.escape(entry) + ' ' for entry in builtins]), - Name.Builtin - ), - - # the remaining functions; handle both ( and [ - (r'(?<=(\(|\[|\{))' + valid_name, Name.Function), - - # find the remaining variables - (valid_name, Name.Variable), - - # the famous parentheses! - (r'(\(|\)|\[|\]|\{|\})', Punctuation), - ], - } - - -class SchemeLexer(RegexLexer): - """ - A Scheme lexer, parsing a stream and outputting the tokens - needed to highlight scheme code. - This lexer could be most probably easily subclassed to parse - other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp. - - This parser is checked with pastes from the LISP pastebin - at http://paste.lisp.org/ to cover as much syntax as possible. - - It supports the full Scheme syntax as defined in R5RS. - - *New in Pygments 0.6.* - """ - name = 'Scheme' - aliases = ['scheme', 'scm'] - filenames = ['*.scm', '*.ss'] - mimetypes = ['text/x-scheme', 'application/x-scheme'] - - # list of known keywords and builtins taken form vim 6.4 scheme.vim - # syntax file. - keywords = [ - 'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let', - 'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote', - 'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax', - 'let-syntax', 'letrec-syntax', 'syntax-rules' - ] - builtins = [ - '*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle', - 'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan', - 'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', - 'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', - 'cadr', 'call-with-current-continuation', 'call-with-input-file', - 'call-with-output-file', 'call-with-values', 'call/cc', 'car', - 'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', - 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', - 'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?', - 'char-ci=?', 'char-ci>?', 'char-downcase', - 'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase', - 'char-upper-case?', 'char-whitespace?', 'char<=?', 'char=?', 'char>?', 'char?', 'close-input-port', 'close-output-port', - 'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port', - 'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?', - 'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp', - 'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part', - 'inexact->exact', 'inexact?', 'input-port?', 'integer->char', - 'integer?', 'interaction-environment', 'lcm', 'length', 'list', - 'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?', - 'load', 'log', 'magnitude', 'make-polar', 'make-rectangular', - 'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv', - 'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment', - 'null?', 'number->string', 'number?', 'numerator', 'odd?', - 'open-input-file', 'open-output-file', 'output-port?', 'pair?', - 'peek-char', 'port?', 'positive?', 'procedure?', 'quotient', - 'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?', - 'remainder', 'reverse', 'round', 'scheme-report-environment', - 'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list', - 'string->number', 'string->symbol', 'string-append', 'string-ci<=?', - 'string-ci=?', 'string-ci>?', - 'string-copy', 'string-fill!', 'string-length', 'string-ref', - 'string-set!', 'string<=?', 'string=?', - 'string>?', 'string?', 'substring', 'symbol->string', 'symbol?', - 'tan', 'transcript-off', 'transcript-on', 'truncate', 'values', - 'vector', 'vector->list', 'vector-fill!', 'vector-length', - 'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file', - 'with-output-to-file', 'write', 'write-char', 'zero?' - ] - - # valid names for identifiers - # well, names can only not consist fully of numbers - # but this should be good enough for now - valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+' - - tokens = { - 'root' : [ - # the comments - always starting with semicolon - # and going to the end of the line - (r';.*$', Comment.Single), - - # whitespaces - usually not relevant - (r'\s+', Text), - - # numbers - (r'-?\d+\.\d+', Number.Float), - (r'-?\d+', Number.Integer), - # support for uncommon kinds of numbers - - # have to figure out what the characters mean - #(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number), - - # strings, symbols and characters - (r'"(\\\\|\\"|[^"])*"', String), - (r"'" + valid_name, String.Symbol), - (r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char), - - # constants - (r'(#t|#f)', Name.Constant), - - # special operators - (r"('|#|`|,@|,|\.)", Operator), - - # highlight the keywords - ('(%s)' % '|'.join([ - re.escape(entry) + ' ' for entry in keywords]), - Keyword - ), - - # first variable in a quoted string like - # '(this is syntactic sugar) - (r"(?<='\()" + valid_name, Name.Variable), - (r"(?<=#\()" + valid_name, Name.Variable), - - # highlight the builtins - ("(?<=\()(%s)" % '|'.join([ - re.escape(entry) + ' ' for entry in builtins]), - Name.Builtin - ), - - # the remaining functions - (r'(?<=\()' + valid_name, Name.Function), - # find the remaining variables - (valid_name, Name.Variable), - - # the famous parentheses! - (r'(\(|\))', Punctuation), - (r'(\[|\])', Punctuation), - ], - } - - -class CommonLispLexer(RegexLexer): - """ - A Common Lisp lexer. - - *New in Pygments 0.9.* - """ - name = 'Common Lisp' - aliases = ['common-lisp', 'cl', 'lisp'] - filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too - mimetypes = ['text/x-common-lisp'] - - flags = re.IGNORECASE | re.MULTILINE - - ### couple of useful regexes - - # characters that are not macro-characters and can be used to begin a symbol - nonmacro = r'\\.|[a-zA-Z0-9!$%&*+-/<=>?@\[\]^_{}~]' - constituent = nonmacro + '|[#.:]' - terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters - - ### symbol token, reverse-engineered from hyperspec - # Take a deep breath... - symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent) - - def __init__(self, **options): - from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \ - SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \ - BUILTIN_TYPES, BUILTIN_CLASSES - self.builtin_function = BUILTIN_FUNCTIONS - self.special_forms = SPECIAL_FORMS - self.macros = MACROS - self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS - self.declarations = DECLARATIONS - self.builtin_types = BUILTIN_TYPES - self.builtin_classes = BUILTIN_CLASSES - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - stack = ['root'] - for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack): - if token is Name.Variable: - if value in self.builtin_function: - yield index, Name.Builtin, value - continue - if value in self.special_forms: - yield index, Keyword, value - continue - if value in self.macros: - yield index, Name.Builtin, value - continue - if value in self.lambda_list_keywords: - yield index, Keyword, value - continue - if value in self.declarations: - yield index, Keyword, value - continue - if value in self.builtin_types: - yield index, Keyword.Type, value - continue - if value in self.builtin_classes: - yield index, Name.Class, value - continue - yield index, token, value - - tokens = { - 'root' : [ - ('', Text, 'body'), - ], - 'multiline-comment' : [ - (r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19) - (r'\|#', Comment.Multiline, '#pop'), - (r'[^|#]+', Comment.Multiline), - (r'[|#]', Comment.Multiline), - ], - 'commented-form' : [ - (r'\(', Comment.Preproc, '#push'), - (r'\)', Comment.Preproc, '#pop'), - (r'[^()]+', Comment.Preproc), - ], - 'body' : [ - # whitespace - (r'\s+', Text), - - # single-line comment - (r';.*$', Comment.Single), - - # multi-line comment - (r'#\|', Comment.Multiline, 'multiline-comment'), - - # encoding comment (?) - (r'#\d*Y.*$', Comment.Special), - - # strings and characters - (r'"(\\.|\\\n|[^"\\])*"', String), - # quoting - (r":" + symbol, String.Symbol), - (r"::" + symbol, String.Symbol), - (r":#" + symbol, String.Symbol), - (r"'" + symbol, String.Symbol), - (r"'", Operator), - (r"`", Operator), - - # decimal numbers - (r'[-+]?\d+\.?' + terminated, Number.Integer), - (r'[-+]?\d+/\d+' + terminated, Number), - (r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' \ - + terminated, Number.Float), - - # sharpsign strings and characters - (r"#\\." + terminated, String.Char), - (r"#\\" + symbol, String.Char), - - # vector - (r'#\(', Operator, 'body'), - - # bitstring - (r'#\d*\*[01]*', Literal.Other), - - # uninterned symbol - (r'#:' + symbol, String.Symbol), - - # read-time and load-time evaluation - (r'#[.,]', Operator), - - # function shorthand - (r'#\'', Name.Function), - - # binary rational - (r'#[bB][+-]?[01]+(/[01]+)?', Number), - - # octal rational - (r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct), - - # hex rational - (r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex), - - # radix rational - (r'#\d+[rR][+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number), - - # complex - (r'(#[cC])(\()', bygroups(Number, Punctuation), 'body'), - - # array - (r'(#\d+[aA])(\()', bygroups(Literal.Other, Punctuation), 'body'), - - # structure - (r'(#[sS])(\()', bygroups(Literal.Other, Punctuation), 'body'), - - # path - (r'#[pP]?"(\\.|[^"])*"', Literal.Other), - - # reference - (r'#\d+=', Operator), - (r'#\d+#', Operator), - - # read-time comment - (r'#+nil' + terminated + '\s*\(', Comment.Preproc, 'commented-form'), - - # read-time conditional - (r'#[+-]', Operator), - - # special operators that should have been parsed already - (r'(,@|,|\.)', Operator), - - # special constants - (r'(t|nil)' + terminated, Name.Constant), - - # functions and variables - (r'\*' + symbol + '\*', Name.Variable.Global), - (symbol, Name.Variable), - - # parentheses - (r'\(', Punctuation, 'body'), - (r'\)', Punctuation, '#pop'), - ], - } - - -class HaskellLexer(RegexLexer): - """ - A Haskell lexer based on the lexemes defined in the Haskell 98 Report. - - *New in Pygments 0.8.* - """ - name = 'Haskell' - aliases = ['haskell', 'hs'] - filenames = ['*.hs'] - mimetypes = ['text/x-haskell'] - - reserved = ['case','class','data','default','deriving','do','else', - 'if','in','infix[lr]?','instance', - 'let','newtype','of','then','type','where','_'] - ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK', - 'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE', - 'DC[1-4]','NAK','SYN','ETB','CAN', - 'EM','SUB','ESC','[FGRU]S','SP','DEL'] - - tokens = { - 'root': [ - # Whitespace: - (r'\s+', Text), - #(r'--\s*|.*$', Comment.Doc), - (r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single), - (r'{-', Comment.Multiline, 'comment'), - # Lexemes: - # Identifiers - (r'\bimport\b', Keyword.Reserved, 'import'), - (r'\bmodule\b', Keyword.Reserved, 'module'), - (r'\berror\b', Name.Exception), - (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), - (r'^[_a-z][\w\']*', Name.Function), - (r"'?[_a-z][\w']*", Name), - (r"('')?[A-Z][\w\']*", Keyword.Type), - # Operators - (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator - (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials - (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators - (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators - # Numbers - (r'\d+[eE][+-]?\d+', Number.Float), - (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), - (r'0[oO][0-7]+', Number.Oct), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'\d+', Number.Integer), - # Character/String Literals - (r"'", String.Char, 'character'), - (r'"', String, 'string'), - # Special - (r'\[\]', Keyword.Type), - (r'\(\)', Name.Builtin), - (r'[][(),;`{}]', Punctuation), - ], - 'import': [ - # Import statements - (r'\s+', Text), - (r'"', String, 'string'), - # after "funclist" state - (r'\)', Punctuation, '#pop'), - (r'qualified\b', Keyword), - # import X as Y - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(as)(\s+)([A-Z][a-zA-Z0-9_.]*)', - bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'), - # import X hiding (functions) - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(hiding)(\s+)(\()', - bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'), - # import X (functions) - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()', - bygroups(Name.Namespace, Text, Punctuation), 'funclist'), - # import X - (r'[a-zA-Z0-9_.]+', Name.Namespace, '#pop'), - ], - 'module': [ - (r'\s+', Text), - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()', - bygroups(Name.Namespace, Text, Punctuation), 'funclist'), - (r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'), - ], - 'funclist': [ - (r'\s+', Text), - (r'[A-Z][a-zA-Z0-9_]*', Keyword.Type), - (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), - (r'--.*$', Comment.Single), - (r'{-', Comment.Multiline, 'comment'), - (r',', Punctuation), - (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), - # (HACK, but it makes sense to push two instances, believe me) - (r'\(', Punctuation, ('funclist', 'funclist')), - (r'\)', Punctuation, '#pop:2'), - ], - # NOTE: the next four states are shared in the AgdaLexer; make sure - # any change is compatible with Agda as well or copy over and change - 'comment': [ - # Multiline Comments - (r'[^-{}]+', Comment.Multiline), - (r'{-', Comment.Multiline, '#push'), - (r'-}', Comment.Multiline, '#pop'), - (r'[-{}]', Comment.Multiline), - ], - 'character': [ - # Allows multi-chars, incorrectly. - (r"[^\\']", String.Char), - (r"\\", String.Escape, 'escape'), - ("'", String.Char, '#pop'), - ], - 'string': [ - (r'[^\\"]+', String), - (r"\\", String.Escape, 'escape'), - ('"', String, '#pop'), - ], - 'escape': [ - (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), - (r'\^[][A-Z@\^_]', String.Escape, '#pop'), - ('|'.join(ascii), String.Escape, '#pop'), - (r'o[0-7]+', String.Escape, '#pop'), - (r'x[\da-fA-F]+', String.Escape, '#pop'), - (r'\d+', String.Escape, '#pop'), - (r'\s+\\', String.Escape, '#pop'), - ], - } - - -class AgdaLexer(RegexLexer): - """ - For the `Agda `_ - dependently typed functional programming language and proof assistant. - - *New in Pygments 1.7.* - """ - - name = 'Agda' - aliases = ['agda'] - filenames = ['*.agda'] - mimetypes = ['text/x-agda'] - - reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data', - 'field', 'forall', 'hiding', 'in', 'inductive', 'infix', - 'infixl', 'infixr', 'let', 'open', 'pattern', 'primitive', - 'private', 'mutual', 'quote', 'quoteGoal', 'quoteTerm', - 'record', 'syntax', 'rewrite', 'unquote', 'using', 'where', - 'with'] - - tokens = { - 'root': [ - # Declaration - (r'^(\s*)([^\s\(\)\{\}]+)(\s*)(:)(\s*)', - bygroups(Text, Name.Function, Text, Operator.Word, Text)), - # Comments - (r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single), - (r'{-', Comment.Multiline, 'comment'), - # Holes - (r'{!', Comment.Directive, 'hole'), - # Lexemes: - # Identifiers - (ur'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), - (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'), - (r'\b(Set|Prop)\b', Keyword.Type), - # Special Symbols - (r'(\(|\)|\{|\})', Operator), - (ur'(\.{1,3}|\||[\u039B]|[\u2200]|[\u2192]|:|=|->)', Operator.Word), - # Numbers - (r'\d+[eE][+-]?\d+', Number.Float), - (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'\d+', Number.Integer), - # Strings - (r"'", String.Char, 'character'), - (r'"', String, 'string'), - (r'[^\s\(\)\{\}]+', Text), - (r'\s+?', Text), # Whitespace - ], - 'hole': [ - # Holes - (r'[^!{}]+', Comment.Directive), - (r'{!', Comment.Directive, '#push'), - (r'!}', Comment.Directive, '#pop'), - (r'[!{}]', Comment.Directive), - ], - 'module': [ - (r'{-', Comment.Multiline, 'comment'), - (r'[a-zA-Z][a-zA-Z0-9_.]*', Name, '#pop'), - (r'[^a-zA-Z]*', Text) - ], - 'comment': HaskellLexer.tokens['comment'], - 'character': HaskellLexer.tokens['character'], - 'string': HaskellLexer.tokens['string'], - 'escape': HaskellLexer.tokens['escape'] - } - - -class LiterateLexer(Lexer): - """ - Base class for lexers of literate file formats based on LaTeX or Bird-style - (prefixing each code line with ">"). - - Additional options accepted: - - `litstyle` - If given, must be ``"bird"`` or ``"latex"``. If not given, the style - is autodetected: if the first non-whitespace character in the source - is a backslash or percent character, LaTeX is assumed, else Bird. - """ - - bird_re = re.compile(r'(>[ \t]*)(.*\n)') - - def __init__(self, baselexer, **options): - self.baselexer = baselexer - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - style = self.options.get('litstyle') - if style is None: - style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird' - - code = '' - insertions = [] - if style == 'bird': - # bird-style - for match in line_re.finditer(text): - line = match.group() - m = self.bird_re.match(line) - if m: - insertions.append((len(code), - [(0, Comment.Special, m.group(1))])) - code += m.group(2) - else: - insertions.append((len(code), [(0, Text, line)])) - else: - # latex-style - from pygments.lexers.text import TexLexer - lxlexer = TexLexer(**self.options) - codelines = 0 - latex = '' - for match in line_re.finditer(text): - line = match.group() - if codelines: - if line.lstrip().startswith('\\end{code}'): - codelines = 0 - latex += line - else: - code += line - elif line.lstrip().startswith('\\begin{code}'): - codelines = 1 - latex += line - insertions.append((len(code), - list(lxlexer.get_tokens_unprocessed(latex)))) - latex = '' - else: - latex += line - insertions.append((len(code), - list(lxlexer.get_tokens_unprocessed(latex)))) - for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)): - yield item - - -class LiterateHaskellLexer(LiterateLexer): - """ - For Literate Haskell (Bird-style or LaTeX) source. - - Additional options accepted: - - `litstyle` - If given, must be ``"bird"`` or ``"latex"``. If not given, the style - is autodetected: if the first non-whitespace character in the source - is a backslash or percent character, LaTeX is assumed, else Bird. - - *New in Pygments 0.9.* - """ - name = 'Literate Haskell' - aliases = ['lhs', 'literate-haskell', 'lhaskell'] - filenames = ['*.lhs'] - mimetypes = ['text/x-literate-haskell'] - - def __init__(self, **options): - hslexer = HaskellLexer(**options) - LiterateLexer.__init__(self, hslexer, **options) - - -class LiterateAgdaLexer(LiterateLexer): - """ - For Literate Agda source. - - Additional options accepted: - - `litstyle` - If given, must be ``"bird"`` or ``"latex"``. If not given, the style - is autodetected: if the first non-whitespace character in the source - is a backslash or percent character, LaTeX is assumed, else Bird. - - *New in Pygments 1.7.* - """ - name = 'Literate Agda' - aliases = ['lagda', 'literate-agda'] - filenames = ['*.lagda'] - mimetypes = ['text/x-literate-agda'] - - def __init__(self, **options): - agdalexer = AgdaLexer(**options) - LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options) - - -class SMLLexer(RegexLexer): - """ - For the Standard ML language. - - *New in Pygments 1.5.* - """ - - name = 'Standard ML' - aliases = ['sml'] - filenames = ['*.sml', '*.sig', '*.fun',] - mimetypes = ['text/x-standardml', 'application/x-standardml'] - - alphanumid_reserved = [ - # Core - 'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else', - 'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix', - 'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse', - 'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while', - # Modules - 'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature', - 'struct', 'structure', 'where', - ] - - symbolicid_reserved = [ - # Core - ':', '\|', '=', '=>', '->', '#', - # Modules - ':>', - ] - - nonid_reserved = [ '(', ')', '[', ']', '{', '}', ',', ';', '...', '_' ] - - alphanumid_re = r"[a-zA-Z][a-zA-Z0-9_']*" - symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+" - - # A character constant is a sequence of the form #s, where s is a string - # constant denoting a string of size one character. This setup just parses - # the entire string as either a String.Double or a String.Char (depending - # on the argument), even if the String.Char is an erronous - # multiple-character string. - def stringy (whatkind): - return [ - (r'[^"\\]', whatkind), - (r'\\[\\\"abtnvfr]', String.Escape), - # Control-character notation is used for codes < 32, - # where \^@ == \000 - (r'\\\^[\x40-\x5e]', String.Escape), - # Docs say 'decimal digits' - (r'\\[0-9]{3}', String.Escape), - (r'\\u[0-9a-fA-F]{4}', String.Escape), - (r'\\\s+\\', String.Interpol), - (r'"', whatkind, '#pop'), - ] - - # Callbacks for distinguishing tokens and reserved words - def long_id_callback(self, match): - if match.group(1) in self.alphanumid_reserved: token = Error - else: token = Name.Namespace - yield match.start(1), token, match.group(1) - yield match.start(2), Punctuation, match.group(2) - - def end_id_callback(self, match): - if match.group(1) in self.alphanumid_reserved: token = Error - elif match.group(1) in self.symbolicid_reserved: token = Error - else: token = Name - yield match.start(1), token, match.group(1) - - def id_callback(self, match): - str = match.group(1) - if str in self.alphanumid_reserved: token = Keyword.Reserved - elif str in self.symbolicid_reserved: token = Punctuation - else: token = Name - yield match.start(1), token, str - - tokens = { - # Whitespace and comments are (almost) everywhere - 'whitespace': [ - (r'\s+', Text), - (r'\(\*', Comment.Multiline, 'comment'), - ], - - 'delimiters': [ - # This lexer treats these delimiters specially: - # Delimiters define scopes, and the scope is how the meaning of - # the `|' is resolved - is it a case/handle expression, or function - # definition by cases? (This is not how the Definition works, but - # it's how MLton behaves, see http://mlton.org/SMLNJDeviations) - (r'\(|\[|{', Punctuation, 'main'), - (r'\)|\]|}', Punctuation, '#pop'), - (r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')), - (r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'), - (r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'), - ], - - 'core': [ - # Punctuation that doesn't overlap symbolic identifiers - (r'(%s)' % '|'.join([re.escape(z) for z in nonid_reserved]), - Punctuation), - - # Special constants: strings, floats, numbers in decimal and hex - (r'#"', String.Char, 'char'), - (r'"', String.Double, 'string'), - (r'~?0x[0-9a-fA-F]+', Number.Hex), - (r'0wx[0-9a-fA-F]+', Number.Hex), - (r'0w\d+', Number.Integer), - (r'~?\d+\.\d+[eE]~?\d+', Number.Float), - (r'~?\d+\.\d+', Number.Float), - (r'~?\d+[eE]~?\d+', Number.Float), - (r'~?\d+', Number.Integer), - - # Labels - (r'#\s*[1-9][0-9]*', Name.Label), - (r'#\s*(%s)' % alphanumid_re, Name.Label), - (r'#\s+(%s)' % symbolicid_re, Name.Label), - # Some reserved words trigger a special, local lexer state change - (r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'), - (r'(?=\b(exception)\b(?!\'))', Text, ('ename')), - (r'\b(functor|include|open|signature|structure)\b(?!\')', - Keyword.Reserved, 'sname'), - (r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'), - - # Regular identifiers, long and otherwise - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"), - (r'(%s)' % alphanumid_re, id_callback), - (r'(%s)' % symbolicid_re, id_callback), - ], - 'dotted': [ - (r'(%s)(\.)' % alphanumid_re, long_id_callback), - (r'(%s)' % alphanumid_re, end_id_callback, "#pop"), - (r'(%s)' % symbolicid_re, end_id_callback, "#pop"), - (r'\s+', Error), - (r'\S+', Error), - ], - - - # Main parser (prevents errors in files that have scoping errors) - 'root': [ (r'', Text, 'main') ], - - # In this scope, I expect '|' to not be followed by a function name, - # and I expect 'and' to be followed by a binding site - 'main': [ - include('whitespace'), - - # Special behavior of val/and/fun - (r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'), - (r'\b(fun)\b(?!\')', Keyword.Reserved, - ('#pop', 'main-fun', 'fname')), - - include('delimiters'), - include('core'), - (r'\S+', Error), - ], - - # In this scope, I expect '|' and 'and' to be followed by a function - 'main-fun': [ - include('whitespace'), - - (r'\s', Text), - (r'\(\*', Comment.Multiline, 'comment'), - - # Special behavior of val/and/fun - (r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'), - (r'\b(val)\b(?!\')', Keyword.Reserved, - ('#pop', 'main', 'vname')), - - # Special behavior of '|' and '|'-manipulating keywords - (r'\|', Punctuation, 'fname'), - (r'\b(case|handle)\b(?!\')', Keyword.Reserved, - ('#pop', 'main')), - - include('delimiters'), - include('core'), - (r'\S+', Error), - ], - - # Character and string parsers - 'char': stringy(String.Char), - 'string': stringy(String.Double), - - 'breakout': [ - (r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'), - ], - - # Dealing with what comes after module system keywords - 'sname': [ - include('whitespace'), - include('breakout'), - - (r'(%s)' % alphanumid_re, Name.Namespace), - (r'', Text, '#pop'), - ], - - # Dealing with what comes after the 'fun' (or 'and' or '|') keyword - 'fname': [ - include('whitespace'), - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - - (r'(%s)' % alphanumid_re, Name.Function, '#pop'), - (r'(%s)' % symbolicid_re, Name.Function, '#pop'), - - # Ignore interesting function declarations like "fun (x + y) = ..." - (r'', Text, '#pop'), - ], - - # Dealing with what comes after the 'val' (or 'and') keyword - 'vname': [ - include('whitespace'), - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - - (r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re), - bygroups(Name.Variable, Text, Punctuation), '#pop'), - (r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re), - bygroups(Name.Variable, Text, Punctuation), '#pop'), - (r'(%s)' % alphanumid_re, Name.Variable, '#pop'), - (r'(%s)' % symbolicid_re, Name.Variable, '#pop'), - - # Ignore interesting patterns like 'val (x, y)' - (r'', Text, '#pop'), - ], - - # Dealing with what comes after the 'type' (or 'and') keyword - 'tname': [ - include('whitespace'), - include('breakout'), - - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - (r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')), - - (r'(%s)' % alphanumid_re, Keyword.Type), - (r'(%s)' % symbolicid_re, Keyword.Type), - (r'\S+', Error, '#pop'), - ], - - # A type binding includes most identifiers - 'typbind': [ - include('whitespace'), - - (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), - - include('breakout'), - include('core'), - (r'\S+', Error, '#pop'), - ], - - # Dealing with what comes after the 'datatype' (or 'and') keyword - 'dname': [ - include('whitespace'), - include('breakout'), - - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - (r'(=)(\s*)(datatype)', - bygroups(Punctuation, Text, Keyword.Reserved), '#pop'), - (r'=(?!%s)' % symbolicid_re, Punctuation, - ('#pop', 'datbind', 'datcon')), - - (r'(%s)' % alphanumid_re, Keyword.Type), - (r'(%s)' % symbolicid_re, Keyword.Type), - (r'\S+', Error, '#pop'), - ], - - # common case - A | B | C of int - 'datbind': [ - include('whitespace'), - - (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')), - (r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), - (r'\b(of)\b(?!\')', Keyword.Reserved), - - (r'(\|)(\s*)(%s)' % alphanumid_re, - bygroups(Punctuation, Text, Name.Class)), - (r'(\|)(\s+)(%s)' % symbolicid_re, - bygroups(Punctuation, Text, Name.Class)), - - include('breakout'), - include('core'), - (r'\S+', Error), - ], - - # Dealing with what comes after an exception - 'ename': [ - include('whitespace'), - - (r'(exception|and)\b(\s+)(%s)' % alphanumid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'(exception|and)\b(\s*)(%s)' % symbolicid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'\b(of)\b(?!\')', Keyword.Reserved), - - include('breakout'), - include('core'), - (r'\S+', Error), - ], - - 'datcon': [ - include('whitespace'), - (r'(%s)' % alphanumid_re, Name.Class, '#pop'), - (r'(%s)' % symbolicid_re, Name.Class, '#pop'), - (r'\S+', Error, '#pop'), - ], - - # Series of type variables - 'tyvarseq': [ - (r'\s', Text), - (r'\(\*', Comment.Multiline, 'comment'), - - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (alphanumid_re, Name), - (r',', Punctuation), - (r'\)', Punctuation, '#pop'), - (symbolicid_re, Name), - ], - - 'comment': [ - (r'[^(*)]', Comment.Multiline), - (r'\(\*', Comment.Multiline, '#push'), - (r'\*\)', Comment.Multiline, '#pop'), - (r'[(*)]', Comment.Multiline), - ], - } - - -class OcamlLexer(RegexLexer): - """ - For the OCaml language. - - *New in Pygments 0.7.* - """ - - name = 'OCaml' - aliases = ['ocaml'] - filenames = ['*.ml', '*.mli', '*.mll', '*.mly'] - mimetypes = ['text/x-ocaml'] - - keywords = [ - 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', - 'downto', 'else', 'end', 'exception', 'external', 'false', - 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', - 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', - 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', - 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', - 'type', 'value', 'val', 'virtual', 'when', 'while', 'with', - ] - keyopts = [ - '!=','#','&','&&','\(','\)','\*','\+',',','-', - '-\.','->','\.','\.\.',':','::',':=',':>',';',';;','<', - '<-','=','>','>]','>}','\?','\?\?','\[','\[<','\[>','\[\|', - ']','_','`','{','{<','\|','\|]','}','~' - ] - - operators = r'[!$%&*+\./:<=>?@^|~-]' - word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or'] - prefix_syms = r'[!?~]' - infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array'] - - tokens = { - 'escape-sequence': [ - (r'\\[\\\"\'ntbr]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - ], - 'root': [ - (r'\s+', Text), - (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), - (r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)', - Name.Namespace, 'dotted'), - (r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class), - (r'\(\*(?![)])', Comment, 'comment'), - (r'\b(%s)\b' % '|'.join(keywords), Keyword), - (r'(%s)' % '|'.join(keyopts[::-1]), Operator), - (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), - (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), - (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), - - (r"[^\W\d][\w']*", Name), - - (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), - (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), - (r'0[oO][0-7][0-7_]*', Number.Oct), - (r'0[bB][01][01_]*', Number.Binary), - (r'\d[\d_]*', Number.Integer), - - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", - String.Char), - (r"'.'", String.Char), - (r"'", Keyword), # a stray quote is another syntax element - - (r'"', String.Double, 'string'), - - (r'[~?][a-z][\w\']*:', Name.Variable), - ], - 'comment': [ - (r'[^(*)]+', Comment), - (r'\(\*', Comment, '#push'), - (r'\*\)', Comment, '#pop'), - (r'[(*)]', Comment), - ], - 'string': [ - (r'[^\\"]+', String.Double), - include('escape-sequence'), - (r'\\\n', String.Double), - (r'"', String.Double, '#pop'), - ], - 'dotted': [ - (r'\s+', Text), - (r'\.', Punctuation), - (r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace), - (r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'), - (r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'), - ], - } - - -class ErlangLexer(RegexLexer): - """ - For the Erlang functional programming language. - - Blame Jeremy Thurgood (http://jerith.za.net/). - - *New in Pygments 0.9.* - """ - - name = 'Erlang' - aliases = ['erlang'] - filenames = ['*.erl', '*.hrl', '*.es', '*.escript'] - mimetypes = ['text/x-erlang'] - - keywords = [ - 'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if', - 'let', 'of', 'query', 'receive', 'try', 'when', - ] - - builtins = [ # See erlang(3) man page - 'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list', - 'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions', - 'byte_size', 'cancel_timer', 'check_process_code', 'delete_module', - 'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit', - 'float', 'float_to_list', 'fun_info', 'fun_to_list', - 'function_exported', 'garbage_collect', 'get', 'get_keys', - 'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary', - 'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean', - 'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list', - 'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record', - 'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom', - 'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom', - 'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple', - 'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5', - 'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor', - 'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2', - 'pid_to_list', 'port_close', 'port_command', 'port_connect', - 'port_control', 'port_call', 'port_info', 'port_to_list', - 'process_display', 'process_flag', 'process_info', 'purge_module', - 'put', 'read_timer', 'ref_to_list', 'register', 'resume_process', - 'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie', - 'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor', - 'spawn_opt', 'split_binary', 'start_timer', 'statistics', - 'suspend_process', 'system_flag', 'system_info', 'system_monitor', - 'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered', - 'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list', - 'universaltime_to_localtime', 'unlink', 'unregister', 'whereis' - ] - - operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)' - word_operators = [ - 'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor', - 'div', 'not', 'or', 'orelse', 'rem', 'xor' - ] - - atom_re = r"(?:[a-z][a-zA-Z0-9_]*|'[^\n']*[^\\]')" - - variable_re = r'(?:[A-Z_][a-zA-Z0-9_]*)' - - escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))' - - macro_re = r'(?:'+variable_re+r'|'+atom_re+r')' - - base_re = r'(?:[2-9]|[12][0-9]|3[0-6])' - - tokens = { - 'root': [ - (r'\s+', Text), - (r'%.*\n', Comment), - ('(' + '|'.join(keywords) + r')\b', Keyword), - ('(' + '|'.join(builtins) + r')\b', Name.Builtin), - ('(' + '|'.join(word_operators) + r')\b', Operator.Word), - (r'^-', Punctuation, 'directive'), - (operators, Operator), - (r'"', String, 'string'), - (r'<<', Name.Label), - (r'>>', Name.Label), - ('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)), - ('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()', - bygroups(Name.Function, Text, Punctuation)), - (r'[+-]?'+base_re+r'#[0-9a-zA-Z]+', Number.Integer), - (r'[+-]?\d+', Number.Integer), - (r'[+-]?\d+.\d+', Number.Float), - (r'[]\[:_@\".{}()|;,]', Punctuation), - (variable_re, Name.Variable), - (atom_re, Name), - (r'\?'+macro_re, Name.Constant), - (r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char), - (r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label), - ], - 'string': [ - (escape_re, String.Escape), - (r'"', String, '#pop'), - (r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol), - (r'[^"\\~]+', String), - (r'~', String), - ], - 'directive': [ - (r'(define)(\s*)(\()('+macro_re+r')', - bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'), - (r'(record)(\s*)(\()('+macro_re+r')', - bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'), - (atom_re, Name.Entity, '#pop'), - ], - } - - -class ErlangShellLexer(Lexer): - """ - Shell sessions in erl (for Erlang code). - - *New in Pygments 1.1.* - """ - name = 'Erlang erl session' - aliases = ['erl'] - filenames = ['*.erl-sh'] - mimetypes = ['text/x-erl-shellsession'] - - _prompt_re = re.compile(r'\d+>(?=\s|\Z)') - - def get_tokens_unprocessed(self, text): - erlexer = ErlangLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - m = self._prompt_re.match(line) - if m is not None: - end = m.end() - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:end])])) - curcode += line[end:] - else: - if curcode: - for item in do_insertions(insertions, - erlexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - if line.startswith('*'): - yield match.start(), Generic.Traceback, line - else: - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - erlexer.get_tokens_unprocessed(curcode)): - yield item - - -class OpaLexer(RegexLexer): - """ - Lexer for the Opa language (http://opalang.org). - - *New in Pygments 1.5.* - """ - - name = 'Opa' - aliases = ['opa'] - filenames = ['*.opa'] - mimetypes = ['text/x-opa'] - - # most of these aren't strictly keywords - # but if you color only real keywords, you might just - # as well not color anything - keywords = [ - 'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do', - 'else', 'end', 'external', 'forall', 'function', 'if', 'import', - 'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then', - 'type', 'val', 'with', 'xml_parser', - ] - - # matches both stuff and `stuff` - ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))' - - op_re = r'[.=\-<>,@~%/+?*&^!]' - punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere - # because they are also used for inserts - - tokens = { - # copied from the caml lexer, should be adapted - 'escape-sequence': [ - (r'\\[\\\"\'ntr}]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - ], - - # factorizing these rules, because they are inserted many times - 'comments': [ - (r'/\*', Comment, 'nested-comment'), - (r'//.*?$', Comment), - ], - 'comments-and-spaces': [ - include('comments'), - (r'\s+', Text), - ], - - 'root': [ - include('comments-and-spaces'), - # keywords - (r'\b(%s)\b' % '|'.join(keywords), Keyword), - # directives - # we could parse the actual set of directives instead of anything - # starting with @, but this is troublesome - # because it needs to be adjusted all the time - # and assuming we parse only sources that compile, it is useless - (r'@'+ident_re+r'\b', Name.Builtin.Pseudo), - - # number literals - (r'-?.[\d]+([eE][+\-]?\d+)', Number.Float), - (r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float), - (r'-?\d+[eE][+\-]?\d+', Number.Float), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'0[oO][0-7]+', Number.Oct), - (r'0[bB][01]+', Number.Binary), - (r'\d+', Number.Integer), - # color literals - (r'#[\da-fA-F]{3,6}', Number.Integer), - - # string literals - (r'"', String.Double, 'string'), - # char literal, should be checked because this is the regexp from - # the caml lexer - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'", - String.Char), - - # this is meant to deal with embedded exprs in strings - # every time we find a '}' we pop a state so that if we were - # inside a string, we are back in the string state - # as a consequence, we must also push a state every time we find a - # '{' or else we will have errors when parsing {} for instance - (r'{', Operator, '#push'), - (r'}', Operator, '#pop'), - - # html literals - # this is a much more strict that the actual parser, - # since a])', String.Single, 'html-open-tag'), - - # db path - # matching the '[_]' in '/a[_]' because it is a part - # of the syntax of the db path definition - # unfortunately, i don't know how to match the ']' in - # /a[1], so this is somewhat inconsistent - (r'[@?!]?(/\w+)+(\[_\])?', Name.Variable), - # putting the same color on <- as on db path, since - # it can be used only to mean Db.write - (r'<-(?!'+op_re+r')', Name.Variable), - - # 'modules' - # although modules are not distinguished by their names as in caml - # the standard library seems to follow the convention that modules - # only area capitalized - (r'\b([A-Z]\w*)(?=\.)', Name.Namespace), - - # operators - # = has a special role because this is the only - # way to syntactic distinguish binding constructions - # unfortunately, this colors the equal in {x=2} too - (r'=(?!'+op_re+r')', Keyword), - (r'(%s)+' % op_re, Operator), - (r'(%s)+' % punc_re, Operator), - - # coercions - (r':', Operator, 'type'), - # type variables - # we need this rule because we don't parse specially type - # definitions so in "type t('a) = ...", "'a" is parsed by 'root' - ("'"+ident_re, Keyword.Type), - - # id literal, #something, or #{expr} - (r'#'+ident_re, String.Single), - (r'#(?={)', String.Single), - - # identifiers - # this avoids to color '2' in 'a2' as an integer - (ident_re, Text), - - # default, not sure if that is needed or not - # (r'.', Text), - ], - - # it is quite painful to have to parse types to know where they end - # this is the general rule for a type - # a type is either: - # * -> ty - # * type-with-slash - # * type-with-slash -> ty - # * type-with-slash (, type-with-slash)+ -> ty - # - # the code is pretty funky in here, but this code would roughly - # translate in caml to: - # let rec type stream = - # match stream with - # | [< "->"; stream >] -> type stream - # | [< ""; stream >] -> - # type_with_slash stream - # type_lhs_1 stream; - # and type_1 stream = ... - 'type': [ - include('comments-and-spaces'), - (r'->', Keyword.Type), - (r'', Keyword.Type, ('#pop', 'type-lhs-1', 'type-with-slash')), - ], - - # parses all the atomic or closed constructions in the syntax of type - # expressions: record types, tuple types, type constructors, basic type - # and type variables - 'type-1': [ - include('comments-and-spaces'), - (r'\(', Keyword.Type, ('#pop', 'type-tuple')), - (r'~?{', Keyword.Type, ('#pop', 'type-record')), - (ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')), - (ident_re, Keyword.Type, '#pop'), - ("'"+ident_re, Keyword.Type), - # this case is not in the syntax but sometimes - # we think we are parsing types when in fact we are parsing - # some css, so we just pop the states until we get back into - # the root state - (r'', Keyword.Type, '#pop'), - ], - - # type-with-slash is either: - # * type-1 - # * type-1 (/ type-1)+ - 'type-with-slash': [ - include('comments-and-spaces'), - (r'', Keyword.Type, ('#pop', 'slash-type-1', 'type-1')), - ], - 'slash-type-1': [ - include('comments-and-spaces'), - ('/', Keyword.Type, ('#pop', 'type-1')), - # same remark as above - (r'', Keyword.Type, '#pop'), - ], - - # we go in this state after having parsed a type-with-slash - # while trying to parse a type - # and at this point we must determine if we are parsing an arrow - # type (in which case we must continue parsing) or not (in which - # case we stop) - 'type-lhs-1': [ - include('comments-and-spaces'), - (r'->', Keyword.Type, ('#pop', 'type')), - (r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')), - (r'', Keyword.Type, '#pop'), - ], - 'type-arrow': [ - include('comments-and-spaces'), - # the look ahead here allows to parse f(x : int, y : float -> truc) - # correctly - (r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'), - (r'->', Keyword.Type, ('#pop', 'type')), - # same remark as above - (r'', Keyword.Type, '#pop'), - ], - - # no need to do precise parsing for tuples and records - # because they are closed constructions, so we can simply - # find the closing delimiter - # note that this function would be not work if the source - # contained identifiers like `{)` (although it could be patched - # to support it) - 'type-tuple': [ - include('comments-and-spaces'), - (r'[^\(\)/*]+', Keyword.Type), - (r'[/*]', Keyword.Type), - (r'\(', Keyword.Type, '#push'), - (r'\)', Keyword.Type, '#pop'), - ], - 'type-record': [ - include('comments-and-spaces'), - (r'[^{}/*]+', Keyword.Type), - (r'[/*]', Keyword.Type), - (r'{', Keyword.Type, '#push'), - (r'}', Keyword.Type, '#pop'), - ], - -# 'type-tuple': [ -# include('comments-and-spaces'), -# (r'\)', Keyword.Type, '#pop'), -# (r'', Keyword.Type, ('#pop', 'type-tuple-1', 'type-1')), -# ], -# 'type-tuple-1': [ -# include('comments-and-spaces'), -# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,) -# (r',', Keyword.Type, 'type-1'), -# ], -# 'type-record':[ -# include('comments-and-spaces'), -# (r'}', Keyword.Type, '#pop'), -# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'), -# ], -# 'type-record-field-expr': [ -# -# ], - - 'nested-comment': [ - (r'[^/*]+', Comment), - (r'/\*', Comment, '#push'), - (r'\*/', Comment, '#pop'), - (r'[/*]', Comment), - ], - - # the copy pasting between string and single-string - # is kinda sad. Is there a way to avoid that?? - 'string': [ - (r'[^\\"{]+', String.Double), - (r'"', String.Double, '#pop'), - (r'{', Operator, 'root'), - include('escape-sequence'), - ], - 'single-string': [ - (r'[^\\\'{]+', String.Double), - (r'\'', String.Double, '#pop'), - (r'{', Operator, 'root'), - include('escape-sequence'), - ], - - # all the html stuff - # can't really reuse some existing html parser - # because we must be able to parse embedded expressions - - # we are in this state after someone parsed the '<' that - # started the html literal - 'html-open-tag': [ - (r'[\w\-:]+', String.Single, ('#pop', 'html-attr')), - (r'>', String.Single, ('#pop', 'html-content')), - ], - - # we are in this state after someone parsed the ' is allowed - (r'[\w\-:]*>', String.Single, '#pop'), - ], - - # we are in this state after having parsed '', String.Single, '#pop'), - (r'>', String.Single, ('#pop', 'html-content')), - ], - - 'html-attr-value': [ - (r"'", String.Single, ('#pop', 'single-string')), - (r'"', String.Single, ('#pop', 'string')), - (r'#'+ident_re, String.Single, '#pop'), - (r'#(?={)', String.Single, ('#pop', 'root')), - (r'[^"\'{`=<>]+', String.Single, '#pop'), - (r'{', Operator, ('#pop', 'root')), # this is a tail call! - ], - - # we should probably deal with '\' escapes here - 'html-content': [ - (r'', Comment, '#pop'), - (r'[^\-]+|-', Comment), - ], - } - - -class CoqLexer(RegexLexer): - """ - For the `Coq `_ theorem prover. - - *New in Pygments 1.5.* - """ - - name = 'Coq' - aliases = ['coq'] - filenames = ['*.v'] - mimetypes = ['text/x-coq'] - - keywords1 = [ - # Vernacular commands - 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable', - 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis', - 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope', - 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac', - 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit', - 'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex', - 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure', - 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary', - 'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save', - 'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', - 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside', - 'outside', - ] - keywords2 = [ - # Gallina - 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct', - 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else', - 'for', 'of', 'nosimpl', 'with', 'as', - ] - keywords3 = [ - # Sorts - 'Type', 'Prop', - ] - keywords4 = [ - # Tactics - 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro', - 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct', - 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite', - 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold', - 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog', - 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial', - 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto', - 'split', 'left', 'right', 'autorewrite', - ] - keywords5 = [ - # Terminators - 'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega', - 'assumption', 'solve', 'contradiction', 'discriminate', - ] - keywords6 = [ - # Control - 'do', 'last', 'first', 'try', 'idtac', 'repeat', - ] - # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', - # 'downto', 'else', 'end', 'exception', 'external', 'false', - # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', - # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', - # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', - # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', - # 'type', 'val', 'virtual', 'when', 'while', 'with' - keyopts = [ - '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', - r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', - '<-', '=', '>', '>]', '>}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', - r'\[\|', ']', '_', '`', '{', '{<', r'\|', r'\|]', '}', '~', '=>', - r'/\\', r'\\/', - u'Π', u'λ', - ] - operators = r'[!$%&*+\./:<=>?@^|~-]' - word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or'] - prefix_syms = r'[!?~]' - infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', - 'array'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), - (r'\(\*', Comment, 'comment'), - (r'\b(%s)\b' % '|'.join(keywords1), Keyword.Namespace), - (r'\b(%s)\b' % '|'.join(keywords2), Keyword), - (r'\b(%s)\b' % '|'.join(keywords3), Keyword.Type), - (r'\b(%s)\b' % '|'.join(keywords4), Keyword), - (r'\b(%s)\b' % '|'.join(keywords5), Keyword.Pseudo), - (r'\b(%s)\b' % '|'.join(keywords6), Keyword.Reserved), - (r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)', - Name.Namespace, 'dotted'), - (r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class), - (r'(%s)' % '|'.join(keyopts[::-1]), Operator), - (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), - (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), - (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), - - (r"[^\W\d][\w']*", Name), - - (r'\d[\d_]*', Number.Integer), - (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), - (r'0[oO][0-7][0-7_]*', Number.Oct), - (r'0[bB][01][01_]*', Number.Binary), - (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), - - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", - String.Char), - (r"'.'", String.Char), - (r"'", Keyword), # a stray quote is another syntax element - - (r'"', String.Double, 'string'), - - (r'[~?][a-z][\w\']*:', Name.Variable), - ], - 'comment': [ - (r'[^(*)]+', Comment), - (r'\(\*', Comment, '#push'), - (r'\*\)', Comment, '#pop'), - (r'[(*)]', Comment), - ], - 'string': [ - (r'[^"]+', String.Double), - (r'""', String.Double), - (r'"', String.Double, '#pop'), - ], - 'dotted': [ - (r'\s+', Text), - (r'\.', Punctuation), - (r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace), - (r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'), - (r'[a-z][a-z0-9_\']*', Name, '#pop'), - (r'', Text, '#pop') - ], - } - - def analyse_text(text): - if text.startswith('(*'): - return True - - -class NewLispLexer(RegexLexer): - """ - For `newLISP. `_ source code (version 10.3.0). - - *New in Pygments 1.5.* - """ - - name = 'NewLisp' - aliases = ['newlisp'] - filenames = ['*.lsp', '*.nl'] - mimetypes = ['text/x-newlisp', 'application/x-newlisp'] - - flags = re.IGNORECASE | re.MULTILINE | re.UNICODE - - # list of built-in functions for newLISP version 10.3 - builtins = [ - '^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++', - '<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10', - '$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7', - '$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs', - 'acos', 'acosh', 'add', 'address', 'amb', 'and', 'and', 'append-file', - 'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin', - 'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec', - 'base64-enc', 'bayes-query', 'bayes-train', 'begin', 'begin', 'begin', - 'beta', 'betai', 'bind', 'binomial', 'bits', 'callback', 'case', 'case', - 'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean', - 'close', 'command-event', 'cond', 'cond', 'cond', 'cons', 'constant', - 'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count', - 'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry', - 'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec', - 'def-new', 'default', 'define-macro', 'define-macro', 'define', - 'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device', - 'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while', - 'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup', - 'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event', - 'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand', - 'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter', - 'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt', - 'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln', - 'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string', - 'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc', - 'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert', - 'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error', - 'last', 'legal?', 'length', 'let', 'let', 'let', 'letex', 'letn', - 'letn', 'letn', 'list?', 'list', 'load', 'local', 'log', 'lookup', - 'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat', - 'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply', - 'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error', - 'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local', - 'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping', - 'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select', - 'net-send-to', 'net-send-udp', 'net-send', 'net-service', - 'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper', - 'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack', - 'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop', - 'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print', - 'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event', - 'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand', - 'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file', - 'read-key', 'read-line', 'read-utf8', 'read', 'reader-event', - 'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex', - 'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse', - 'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self', - 'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all', - 'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent', - 'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt', - 'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?', - 'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term', - 'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case', - 'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?', - 'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until', - 'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while', - 'write', 'write-char', 'write-file', 'write-line', 'write', - 'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?', - ] - - # valid names - valid_name = r'([a-zA-Z0-9!$%&*+.,/<=>?@^_~|-])+|(\[.*?\])+' - - tokens = { - 'root': [ - # shebang - (r'#!(.*?)$', Comment.Preproc), - # comments starting with semicolon - (r';.*$', Comment.Single), - # comments starting with # - (r'#.*$', Comment.Single), - - # whitespace - (r'\s+', Text), - - # strings, symbols and characters - (r'"(\\\\|\\"|[^"])*"', String), - - # braces - (r"{", String, "bracestring"), - - # [text] ... [/text] delimited strings - (r'\[text\]*', String, "tagstring"), - - # 'special' operators... - (r"('|:)", Operator), - - # highlight the builtins - ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins), - Keyword), - - # the remaining functions - (r'(?<=\()' + valid_name, Name.Variable), - - # the remaining variables - (valid_name, String.Symbol), - - # parentheses - (r'(\(|\))', Punctuation), - ], - - # braced strings... - 'bracestring': [ - ("{", String, "#push"), - ("}", String, "#pop"), - ("[^{}]+", String), - ], - - # tagged [text]...[/text] delimited strings... - 'tagstring': [ - (r'(?s)(.*?)(\[/text\])', String, '#pop'), - ], - } - - -class ElixirLexer(RegexLexer): - """ - For the `Elixir language `_. - - *New in Pygments 1.5.* - """ - - name = 'Elixir' - aliases = ['elixir', 'ex', 'exs'] - filenames = ['*.ex', '*.exs'] - mimetypes = ['text/x-elixir'] - - def gen_elixir_sigil_rules(): - states = {} - - states['strings'] = [ - (r'(%[A-Ba-z])?"""(?:.|\n)*?"""', String.Doc), - (r"'''(?:.|\n)*?'''", String.Doc), - (r'"', String.Double, 'dqs'), - (r"'.*'", String.Single), - (r'(?', 'lt'): - - states['strings'] += [ - (r'%[a-z]' + lbrace, String.Double, name + 'intp'), - (r'%[A-Z]' + lbrace, String.Double, name + 'no-intp') - ] - - states[name +'intp'] = [ - (r'' + rbrace + '[a-z]*', String.Double, "#pop"), - include('enddoublestr') - ] - - states[name +'no-intp'] = [ - (r'.*' + rbrace + '[a-z]*', String.Double , "#pop") - ] - - return states - - tokens = { - 'root': [ - (r'\s+', Text), - (r'#.*$', Comment.Single), - (r'\b(case|cond|end|bc|lc|if|unless|try|loop|receive|fn|defmodule|' - r'defp?|defprotocol|defimpl|defrecord|defmacrop?|defdelegate|' - r'defexception|exit|raise|throw|unless|after|rescue|catch|else)\b(?![?!])|' - r'(?)\b\s*', Keyword), - (r'\b(import|require|use|recur|quote|unquote|super|refer)\b(?![?!])', - Keyword.Namespace), - (r'(?|<(?!<|=)|>(?!<|=|>)|<=|>=|===|==|=~|!=|!~|(?=[ \t])\?|' - r'(?<=[ \t])!+|&&|\|\||\^|\*|\+|\-|/|' - r'\||\+\+|\-\-|\*\*|\/\/|\<\-|\<\>|<<|>>|=|\.', Operator), - (r'(?=]))?|\<\>|===?|>=?|<=?|' - r'<=>|&&?|%\(\)|%\[\]|%\{\}|\+\+?|\-\-?|\|\|?|\!|//|[%&`/\|]|' - r'\*\*?|=?~|<\-)|([a-zA-Z_]\w*([?!])?)(:)(?!:)', String.Symbol), - (r':"', String.Symbol, 'interpoling_symbol'), - (r'\b(nil|true|false)\b(?![?!])|\b[A-Z]\w*\b', Name.Constant), - (r'\b(__(FILE|LINE|MODULE|MAIN|FUNCTION)__)\b(?![?!])', Name.Builtin.Pseudo), - (r'[a-zA-Z_!][\w_]*[!\?]?', Name), - (r'[(){};,/\|:\\\[\]]', Punctuation), - (r'@[a-zA-Z_]\w*|&\d', Name.Variable), - (r'\b(0[xX][0-9A-Fa-f]+|\d(_?\d)*(\.(?![^\d\s])' - r'(_?\d)*)?([eE][-+]?\d(_?\d)*)?|0[bB][01]+)\b', Number), - (r'%r\/.*\/', String.Regex), - include('strings'), - ], - 'dqs': [ - (r'"', String.Double, "#pop"), - include('enddoublestr') - ], - 'interpoling': [ - (r'#{', String.Interpol, 'interpoling_string'), - ], - 'interpoling_string' : [ - (r'}', String.Interpol, "#pop"), - include('root') - ], - 'interpoling_symbol': [ - (r'"', String.Symbol, "#pop"), - include('interpoling'), - (r'[^#"]+', String.Symbol), - ], - 'enddoublestr' : [ - include('interpoling'), - (r'[^#"]+', String.Double), - ] - } - tokens.update(gen_elixir_sigil_rules()) - - -class ElixirConsoleLexer(Lexer): - """ - For Elixir interactive console (iex) output like: - - .. sourcecode:: iex - - iex> [head | tail] = [1,2,3] - [1,2,3] - iex> head - 1 - iex> tail - [2,3] - iex> [head | tail] - [1,2,3] - iex> length [head | tail] - 3 - - *New in Pygments 1.5.* - """ - - name = 'Elixir iex session' - aliases = ['iex'] - mimetypes = ['text/x-elixir-shellsession'] - - _prompt_re = re.compile('(iex|\.{3})> ') - - def get_tokens_unprocessed(self, text): - exlexer = ElixirLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - if line.startswith(u'** '): - insertions.append((len(curcode), - [(0, Generic.Error, line[:-1])])) - curcode += line[-1:] - else: - m = self._prompt_re.match(line) - if m is not None: - end = m.end() - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:end])])) - curcode += line[end:] - else: - if curcode: - for item in do_insertions(insertions, - exlexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - exlexer.get_tokens_unprocessed(curcode)): - yield item - - -class KokaLexer(RegexLexer): - """ - Lexer for the `Koka `_ - language. - - *New in Pygments 1.6.* - """ - - name = 'Koka' - aliases = ['koka'] - filenames = ['*.kk', '*.kki'] - mimetypes = ['text/x-koka'] - - keywords = [ - 'infix', 'infixr', 'infixl', - 'type', 'cotype', 'rectype', 'alias', - 'struct', 'con', - 'fun', 'function', 'val', 'var', - 'external', - 'if', 'then', 'else', 'elif', 'return', 'match', - 'private', 'public', 'private', - 'module', 'import', 'as', - 'include', 'inline', - 'rec', - 'try', 'yield', 'enum', - 'interface', 'instance', - ] - - # keywords that are followed by a type - typeStartKeywords = [ - 'type', 'cotype', 'rectype', 'alias', 'struct', 'enum', - ] - - # keywords valid in a type - typekeywords = [ - 'forall', 'exists', 'some', 'with', - ] - - # builtin names and special names - builtin = [ - 'for', 'while', 'repeat', - 'foreach', 'foreach-indexed', - 'error', 'catch', 'finally', - 'cs', 'js', 'file', 'ref', 'assigned', - ] - - # symbols that can be in an operator - symbols = '[\$%&\*\+@!/\\\^~=\.:\-\?\|<>]+' - - # symbol boundary: an operator keyword should not be followed by any of these - sboundary = '(?!'+symbols+')' - - # name boundary: a keyword should not be followed by any of these - boundary = '(?![\w/])' - - # koka token abstractions - tokenType = Name.Attribute - tokenTypeDef = Name.Class - tokenConstructor = Generic.Emph - - # main lexer - tokens = { - 'root': [ - include('whitespace'), - - # go into type mode - (r'::?' + sboundary, tokenType, 'type'), - (r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), - 'alias-type'), - (r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), - 'struct-type'), - ((r'(%s)' % '|'.join(typeStartKeywords)) + - r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), - 'type'), - - # special sequences of tokens (we use ?: for non-capturing group as - # required by 'bygroups') - (r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)', - bygroups(Keyword, Text, Keyword, Name.Namespace)), - (r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)' - r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)' - r'((?:[a-z]\w*/)*[a-z]\w*))?', - bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text, - Keyword, Name.Namespace)), - - (r'(^(?:(?:public|private)\s*)?(?:function|fun|val))' - r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))', - bygroups(Keyword, Text, Name.Function)), - (r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?' - r'([a-z]\w*|\((?:' + symbols + r'|/)\))', - bygroups(Keyword, Text, Keyword, Name.Function)), - - # keywords - (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type), - (r'(%s)' % '|'.join(keywords) + boundary, Keyword), - (r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo), - (r'::?|:=|\->|[=\.]' + sboundary, Keyword), - - # names - (r'((?:[a-z]\w*/)*)([A-Z]\w*)', - bygroups(Name.Namespace, tokenConstructor)), - (r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)), - (r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))', - bygroups(Name.Namespace, Name)), - (r'_\w*', Name.Variable), - - # literal string - (r'@"', String.Double, 'litstring'), - - # operators - (symbols + "|/(?![\*/])", Operator), - (r'`', Operator), - (r'[\{\}\(\)\[\];,]', Punctuation), - - # literals. No check for literal characters with len > 1 - (r'[0-9]+\.[0-9]+([eE][\-\+]?[0-9]+)?', Number.Float), - (r'0[xX][0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - - (r"'", String.Char, 'char'), - (r'"', String.Double, 'string'), - ], - - # type started by alias - 'alias-type': [ - (r'=',Keyword), - include('type') - ], - - # type started by struct - 'struct-type': [ - (r'(?=\((?!,*\)))',Punctuation, '#pop'), - include('type') - ], - - # type started by colon - 'type': [ - (r'[\(\[<]', tokenType, 'type-nested'), - include('type-content') - ], - - # type nested in brackets: can contain parameters, comma etc. - 'type-nested': [ - (r'[\)\]>]', tokenType, '#pop'), - (r'[\(\[<]', tokenType, 'type-nested'), - (r',', tokenType), - (r'([a-z]\w*)(\s*)(:)(?!:)', - bygroups(Name, Text, tokenType)), # parameter name - include('type-content') - ], - - # shared contents of a type - 'type-content': [ - include('whitespace'), - - # keywords - (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword), - (r'(?=((%s)' % '|'.join(keywords) + boundary + '))', - Keyword, '#pop'), # need to match because names overlap... - - # kinds - (r'[EPHVX]' + boundary, tokenType), - - # type names - (r'[a-z][0-9]*(?![\w/])', tokenType ), - (r'_\w*', tokenType.Variable), # Generic.Emph - (r'((?:[a-z]\w*/)*)([A-Z]\w*)', - bygroups(Name.Namespace, tokenType)), - (r'((?:[a-z]\w*/)*)([a-z]\w+)', - bygroups(Name.Namespace, tokenType)), - - # type keyword operators - (r'::|\->|[\.:|]', tokenType), - - #catchall - (r'', Text, '#pop') - ], - - # comments and literals - 'whitespace': [ - (r'\n\s*#.*$', Comment.Preproc), - (r'\s+', Text), - (r'/\*', Comment.Multiline, 'comment'), - (r'//.*$', Comment.Single) - ], - 'comment': [ - (r'[^/\*]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[\*/]', Comment.Multiline), - ], - 'litstring': [ - (r'[^"]+', String.Double), - (r'""', String.Escape), - (r'"', String.Double, '#pop'), - ], - 'string': [ - (r'[^\\"\n]+', String.Double), - include('escape-sequence'), - (r'["\n]', String.Double, '#pop'), - ], - 'char': [ - (r'[^\\\'\n]+', String.Char), - include('escape-sequence'), - (r'[\'\n]', String.Char, '#pop'), - ], - 'escape-sequence': [ - (r'\\[nrt\\\"\']', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - (r'\\u[0-9a-fA-F]{4}', String.Escape), - # Yes, \U literals are 6 hex digits. - (r'\\U[0-9a-fA-F]{6}', String.Escape) - ] - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/hdl.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/hdl.py deleted file mode 100644 index 57ffc34..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/hdl.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.hdl - ~~~~~~~~~~~~~~~~~~~ - - Lexers for hardware descriptor languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from pygments.lexer import RegexLexer, bygroups, include, using, this -from pygments.token import \ - Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \ - Error - -__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer'] - - -class VerilogLexer(RegexLexer): - """ - For verilog source code with preprocessor directives. - - *New in Pygments 1.4.* - """ - name = 'verilog' - aliases = ['verilog', 'v'] - filenames = ['*.v'] - mimetypes = ['text/x-verilog'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' - - tokens = { - 'root': [ - (r'^\s*`define', Comment.Preproc, 'macro'), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'[{}#@]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), - (r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary - (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), - (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), - (r'\'[01xz]', Number), - (r'\d+[Ll]?', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;\']', Punctuation), - (r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), - - (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), - (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), - 'import'), - - (r'(always|always_comb|always_ff|always_latch|and|assign|automatic|' - r'begin|break|buf|bufif0|bufif1|case|casex|casez|cmos|const|' - r'continue|deassign|default|defparam|disable|do|edge|else|end|endcase|' - r'endfunction|endgenerate|endmodule|endpackage|endprimitive|endspecify|' - r'endtable|endtask|enum|event|final|for|force|forever|fork|function|' - r'generate|genvar|highz0|highz1|if|initial|inout|input|' - r'integer|join|large|localparam|macromodule|medium|module|' - r'nand|negedge|nmos|nor|not|notif0|notif1|or|output|packed|' - r'parameter|pmos|posedge|primitive|pull0|pull1|pulldown|pullup|rcmos|' - r'ref|release|repeat|return|rnmos|rpmos|rtran|rtranif0|' - r'rtranif1|scalared|signed|small|specify|specparam|strength|' - r'string|strong0|strong1|struct|table|task|' - r'tran|tranif0|tranif1|type|typedef|' - r'unsigned|var|vectored|void|wait|weak0|weak1|while|' - r'xnor|xor)\b', Keyword), - - (r'`(accelerate|autoexpand_vectornets|celldefine|default_nettype|' - r'else|elsif|endcelldefine|endif|endprotect|endprotected|' - r'expand_vectornets|ifdef|ifndef|include|noaccelerate|noexpand_vectornets|' - r'noremove_gatenames|noremove_netnames|nounconnected_drive|' - r'protect|protected|remove_gatenames|remove_netnames|resetall|' - r'timescale|unconnected_drive|undef)\b', Comment.Preproc), - - (r'\$(bits|bitstoreal|bitstoshortreal|countdrivers|display|fclose|' - r'fdisplay|finish|floor|fmonitor|fopen|fstrobe|fwrite|' - r'getpattern|history|incsave|input|itor|key|list|log|' - r'monitor|monitoroff|monitoron|nokey|nolog|printtimescale|' - r'random|readmemb|readmemh|realtime|realtobits|reset|reset_count|' - r'reset_value|restart|rtoi|save|scale|scope|shortrealtobits|' - r'showscopes|showvariables|showvars|sreadmemb|sreadmemh|' - r'stime|stop|strobe|time|timeformat|write)\b', Name.Builtin), - - (r'(byte|shortint|int|longint|integer|time|' - r'bit|logic|reg|' - r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor' - r'shortreal|real|realtime)\b', Keyword.Type), - ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'import': [ - (r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop') - ] - } - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # Convention: mark all upper case names as constants - if token is Name: - if value.isupper(): - token = Name.Constant - yield index, token, value - - -class SystemVerilogLexer(RegexLexer): - """ - Extends verilog lexer to recognise all SystemVerilog keywords from IEEE - 1800-2009 standard. - - *New in Pygments 1.5.* - """ - name = 'systemverilog' - aliases = ['systemverilog', 'sv'] - filenames = ['*.sv', '*.svh'] - mimetypes = ['text/x-systemverilog'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' - - tokens = { - 'root': [ - (r'^\s*`define', Comment.Preproc, 'macro'), - (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), - (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'), - - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'[{}#@]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), - (r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary - (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), - (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), - (r'\'[01xz]', Number), - (r'\d+[Ll]?', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;\']', Punctuation), - (r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), - - (r'(accept_on|alias|always|always_comb|always_ff|always_latch|' - r'and|assert|assign|assume|automatic|before|begin|bind|bins|' - r'binsof|bit|break|buf|bufif0|bufif1|byte|case|casex|casez|' - r'cell|chandle|checker|class|clocking|cmos|config|const|constraint|' - r'context|continue|cover|covergroup|coverpoint|cross|deassign|' - r'default|defparam|design|disable|dist|do|edge|else|end|endcase|' - r'endchecker|endclass|endclocking|endconfig|endfunction|endgenerate|' - r'endgroup|endinterface|endmodule|endpackage|endprimitive|' - r'endprogram|endproperty|endsequence|endspecify|endtable|' - r'endtask|enum|event|eventually|expect|export|extends|extern|' - r'final|first_match|for|force|foreach|forever|fork|forkjoin|' - r'function|generate|genvar|global|highz0|highz1|if|iff|ifnone|' - r'ignore_bins|illegal_bins|implies|import|incdir|include|' - r'initial|inout|input|inside|instance|int|integer|interface|' - r'intersect|join|join_any|join_none|large|let|liblist|library|' - r'local|localparam|logic|longint|macromodule|matches|medium|' - r'modport|module|nand|negedge|new|nexttime|nmos|nor|noshowcancelled|' - r'not|notif0|notif1|null|or|output|package|packed|parameter|' - r'pmos|posedge|primitive|priority|program|property|protected|' - r'pull0|pull1|pulldown|pullup|pulsestyle_ondetect|pulsestyle_onevent|' - r'pure|rand|randc|randcase|randsequence|rcmos|real|realtime|' - r'ref|reg|reject_on|release|repeat|restrict|return|rnmos|' - r'rpmos|rtran|rtranif0|rtranif1|s_always|s_eventually|s_nexttime|' - r's_until|s_until_with|scalared|sequence|shortint|shortreal|' - r'showcancelled|signed|small|solve|specify|specparam|static|' - r'string|strong|strong0|strong1|struct|super|supply0|supply1|' - r'sync_accept_on|sync_reject_on|table|tagged|task|this|throughout|' - r'time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|' - r'tri1|triand|trior|trireg|type|typedef|union|unique|unique0|' - r'unsigned|until|until_with|untyped|use|uwire|var|vectored|' - r'virtual|void|wait|wait_order|wand|weak|weak0|weak1|while|' - r'wildcard|wire|with|within|wor|xnor|xor)\b', Keyword ), - - (r'(`__FILE__|`__LINE__|`begin_keywords|`celldefine|`default_nettype|' - r'`define|`else|`elsif|`end_keywords|`endcelldefine|`endif|' - r'`ifdef|`ifndef|`include|`line|`nounconnected_drive|`pragma|' - r'`resetall|`timescale|`unconnected_drive|`undef|`undefineall)\b', - Comment.Preproc ), - - (r'(\$display|\$displayb|\$displayh|\$displayo|\$dumpall|\$dumpfile|' - r'\$dumpflush|\$dumplimit|\$dumpoff|\$dumpon|\$dumpports|' - r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|' - r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|' - r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|' - r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|' - r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|' - r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|' - r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|' - r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|' - r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|' - r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|' - r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|' - r'\$writememh|\$writeo)\b' , Name.Builtin ), - - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(byte|shortint|int|longint|integer|time|' - r'bit|logic|reg|' - r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor' - r'shortreal|real|realtime)\b', Keyword.Type), - ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'classname': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'import': [ - (r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop') - ] - } - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # Convention: mark all upper case names as constants - if token is Name: - if value.isupper(): - token = Name.Constant - yield index, token, value - - def analyse_text(text): - if text.startswith('//') or text.startswith('/*'): - return 0.5 - - -class VhdlLexer(RegexLexer): - """ - For VHDL source code. - - *New in Pygments 1.5.* - """ - name = 'vhdl' - aliases = ['vhdl'] - filenames = ['*.vhdl', '*.vhd'] - mimetypes = ['text/x-vhdl'] - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single), - (r"'(U|X|0|1|Z|W|L|H|-)'", String.Char), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r"'[a-zA-Z_][a-zA-Z0-9_]*", Name.Attribute), - (r'[()\[\],.;\']', Punctuation), - (r'"[^\n\\]*"', String), - - (r'(library)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)), - (r'(use)(\s+)([a-zA-Z_][\.a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(entity|component)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Class)), - (r'(architecture|configuration)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)' - r'(of)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(is)', - bygroups(Keyword, Text, Name.Class, Text, Keyword, Text, - Name.Class, Text, Keyword)), - - (r'(end)(\s+)', bygroups(using(this), Text), 'endblock'), - - include('types'), - include('keywords'), - include('numbers'), - - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'endblock': [ - include('keywords'), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class), - (r'(\s+)', Text), - (r';', Punctuation, '#pop'), - ], - 'types': [ - (r'(boolean|bit|character|severity_level|integer|time|delay_length|' - r'natural|positive|string|bit_vector|file_open_kind|' - r'file_open_status|std_ulogic|std_ulogic_vector|std_logic|' - r'std_logic_vector)\b', Keyword.Type), - ], - 'keywords': [ - (r'(abs|access|after|alias|all|and|' - r'architecture|array|assert|attribute|begin|block|' - r'body|buffer|bus|case|component|configuration|' - r'constant|disconnect|downto|else|elsif|end|' - r'entity|exit|file|for|function|generate|' - r'generic|group|guarded|if|impure|in|' - r'inertial|inout|is|label|library|linkage|' - r'literal|loop|map|mod|nand|new|' - r'next|nor|not|null|of|on|' - r'open|or|others|out|package|port|' - r'postponed|procedure|process|pure|range|record|' - r'register|reject|return|rol|ror|select|' - r'severity|signal|shared|sla|sli|sra|' - r'srl|subtype|then|to|transport|type|' - r'units|until|use|variable|wait|when|' - r'while|with|xnor|xor)\b', Keyword), - ], - 'numbers': [ - (r'\d{1,2}#[0-9a-fA-F_]+#?', Number.Integer), - (r'[0-1_]+(\.[0-1_])', Number.Integer), - (r'\d+', Number.Integer), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), - (r'H"[0-9a-fA-F_]+"', Number.Oct), - (r'O"[0-7_]+"', Number.Oct), - (r'B"[0-1_]+"', Number.Oct), - ], - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/math.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/math.py deleted file mode 100644 index f0e49fe..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/math.py +++ /dev/null @@ -1,1918 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.math - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for math languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.util import shebang_matches -from pygments.lexer import Lexer, RegexLexer, bygroups, include, \ - combined, do_insertions -from pygments.token import Comment, String, Punctuation, Keyword, Name, \ - Operator, Number, Text, Generic - -from pygments.lexers.agile import PythonLexer -from pygments.lexers import _scilab_builtins -from pygments.lexers import _stan_builtins - -__all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer', - 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer', - 'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer', - 'IDLLexer', 'RdLexer', 'IgorLexer'] - - -class JuliaLexer(RegexLexer): - """ - For `Julia `_ source code. - - *New in Pygments 1.6.* - """ - name = 'Julia' - aliases = ['julia','jl'] - filenames = ['*.jl'] - mimetypes = ['text/x-julia','application/x-julia'] - - builtins = [ - 'exit','whos','edit','load','is','isa','isequal','typeof','tuple', - 'ntuple','uid','hash','finalizer','convert','promote','subtype', - 'typemin','typemax','realmin','realmax','sizeof','eps','promote_type', - 'method_exists','applicable','invoke','dlopen','dlsym','system', - 'error','throw','assert','new','Inf','Nan','pi','im', - ] - - tokens = { - 'root': [ - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'#.*$', Comment), - (r'[]{}:(),;[@]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), - - # keywords - (r'(begin|while|for|in|return|break|continue|' - r'macro|quote|let|if|elseif|else|try|catch|end|' - r'bitstype|ccall|do|using|module|import|export|' - r'importall|baremodule|immutable)\b', Keyword), - (r'(local|global|const)\b', Keyword.Declaration), - (r'(Bool|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64' - r'|Float32|Float64|Complex64|Complex128|Any|Nothing|None)\b', - Keyword.Type), - - # functions - (r'(function)((?:\s|\\\s)+)', - bygroups(Keyword,Name.Function), 'funcname'), - - # types - (r'(type|typealias|abstract)((?:\s|\\\s)+)', - bygroups(Keyword,Name.Class), 'typename'), - - # operators - (r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator), - (r'\.\*|\.\^|\.\\|\.\/|\\', Operator), - - # builtins - ('(' + '|'.join(builtins) + r')\b', Name.Builtin), - - # backticks - (r'`(?s).*?`', String.Backtick), - - # chars - (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|" - r"\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char), - - # try to match trailing transpose - (r'(?<=[.\w\)\]])\'+', Operator), - - # strings - (r'(?:[IL])"', String, 'string'), - (r'[E]?"', String, combined('stringescape', 'string')), - - # names - (r'@[a-zA-Z0-9_.]+', Name.Decorator), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - - # numbers - (r'(\d+(_\d+)+\.\d*|\d*\.\d+(_\d+)+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+(_\d+)+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'0b[01]+(_[01]+)+', Number.Binary), - (r'0b[01]+', Number.Binary), - (r'0o[0-7]+(_[0-7]+)+', Number.Oct), - (r'0o[0-7]+', Number.Oct), - (r'0x[a-fA-F0-9]+(_[a-fA-F0-9]+)+', Number.Hex), - (r'0x[a-fA-F0-9]+', Number.Hex), - (r'\d+(_\d+)+', Number.Integer), - (r'\d+', Number.Integer) - ], - - 'funcname': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop'), - ('\([^\s\w{]{1,2}\)', Operator, '#pop'), - ('[^\s\w{]{1,2}', Operator, '#pop'), - ], - - 'typename': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - - 'stringescape': [ - (r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|' - r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) - ], - - 'string': [ - (r'"', String, '#pop'), - (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings - (r'\$(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?', - String.Interpol), - (r'[^\\"$]+', String), - # quotes, dollar signs, and backslashes must be parsed one at a time - (r'["\\]', String), - # unhandled string formatting sign - (r'\$', String) - ], - } - - def analyse_text(text): - return shebang_matches(text, r'julia') - - -line_re = re.compile('.*?\n') - -class JuliaConsoleLexer(Lexer): - """ - For Julia console sessions. Modeled after MatlabSessionLexer. - - *New in Pygments 1.6.* - """ - name = 'Julia console' - aliases = ['jlcon'] - - def get_tokens_unprocessed(self, text): - jllexer = JuliaLexer(**self.options) - - curcode = '' - insertions = [] - - for match in line_re.finditer(text): - line = match.group() - - if line.startswith('julia>'): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:3])])) - curcode += line[3:] - - elif line.startswith(' '): - - idx = len(curcode) - - # without is showing error on same line as before...? - line = "\n" + line - token = (0, Generic.Traceback, line) - insertions.append((idx, [token])) - - else: - if curcode: - for item in do_insertions( - insertions, jllexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - - yield match.start(), Generic.Output, line - - if curcode: # or item: - for item in do_insertions( - insertions, jllexer.get_tokens_unprocessed(curcode)): - yield item - - -class MuPADLexer(RegexLexer): - """ - A `MuPAD `_ lexer. - Contributed by Christopher Creutzig . - - *New in Pygments 0.8.* - """ - name = 'MuPAD' - aliases = ['mupad'] - filenames = ['*.mu'] - - tokens = { - 'root' : [ - (r'//.*?$', Comment.Single), - (r'/\*', Comment.Multiline, 'comment'), - (r'"(?:[^"\\]|\\.)*"', String), - (r'\(|\)|\[|\]|\{|\}', Punctuation), - (r'''(?x)\b(?: - next|break|end| - axiom|end_axiom|category|end_category|domain|end_domain|inherits| - if|%if|then|elif|else|end_if| - case|of|do|otherwise|end_case| - while|end_while| - repeat|until|end_repeat| - for|from|to|downto|step|end_for| - proc|local|option|save|begin|end_proc| - delete|frame - )\b''', Keyword), - (r'''(?x)\b(?: - DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR| - DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT| - DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC| - DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR - )\b''', Name.Class), - (r'''(?x)\b(?: - PI|EULER|E|CATALAN| - NIL|FAIL|undefined|infinity| - TRUE|FALSE|UNKNOWN - )\b''', - Name.Constant), - (r'\b(?:dom|procname)\b', Name.Builtin.Pseudo), - (r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator), - (r'''(?x)\b(?: - and|or|not|xor| - assuming| - div|mod| - union|minus|intersect|in|subset - )\b''', - Operator.Word), - (r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number), - #(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin), - (r'''(?x) - ((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`) - (?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)(\s*)([(])''', - bygroups(Name.Function, Text, Punctuation)), - (r'''(?x) - (?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`) - (?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable), - (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number), - (r'\.[0-9]+(?:e[0-9]+)?', Number), - (r'.', Text) - ], - 'comment' : [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ] - } - - -class MatlabLexer(RegexLexer): - """ - For Matlab source code. - - *New in Pygments 0.10.* - """ - name = 'Matlab' - aliases = ['matlab'] - filenames = ['*.m'] - mimetypes = ['text/matlab'] - - # - # These lists are generated automatically. - # Run the following in bash shell: - # - # for f in elfun specfun elmat; do - # echo -n "$f = " - # matlab -nojvm -r "help $f;exit;" | perl -ne \ - # 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}' - # done - # - # elfun: Elementary math functions - # specfun: Special Math functions - # elmat: Elementary matrices and matrix manipulation - # - # taken from Matlab version 7.4.0.336 (R2007a) - # - elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh", - "acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2", - "atanh","sec","secd","sech","asec","asecd","asech","csc","cscd", - "csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd", - "acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2", - "realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs", - "angle","complex","conj","imag","real","unwrap","isreal","cplxpair", - "fix","floor","ceil","round","mod","rem","sign"] - specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta", - "betainc","betaln","ellipj","ellipke","erf","erfc","erfcx", - "erfinv","expint","gamma","gammainc","gammaln","psi","legendre", - "cross","dot","factor","isprime","primes","gcd","lcm","rat", - "rats","perms","nchoosek","factorial","cart2sph","cart2pol", - "pol2cart","sph2cart","hsv2rgb","rgb2hsv"] - elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace", - "freqspace","meshgrid","accumarray","size","length","ndims","numel", - "disp","isempty","isequal","isequalwithequalnans","cat","reshape", - "diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90", - "find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute", - "ipermute","shiftdim","circshift","squeeze","isscalar","isvector", - "ans","eps","realmax","realmin","pi","i","inf","nan","isnan", - "isinf","isfinite","j","why","compan","gallery","hadamard","hankel", - "hilb","invhilb","magic","pascal","rosser","toeplitz","vander", - "wilkinson"] - - tokens = { - 'root': [ - # line starting with '!' is sent as a system command. not sure what - # label to use... - (r'^!.*', String.Other), - (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), - (r'%.*$', Comment), - (r'^\s*function', Keyword, 'deffunc'), - - # from 'iskeyword' on version 7.11 (R2010): - (r'(break|case|catch|classdef|continue|else|elseif|end|enumerated|' - r'events|for|function|global|if|methods|otherwise|parfor|' - r'persistent|properties|return|spmd|switch|try|while)\b', Keyword), - - ("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin), - - # line continuation with following comment: - (r'\.\.\..*$', Comment), - - # operators: - (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - # punctuation: - (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation), - (r'=|:|;', Punctuation), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w\)\]])\'', Operator), - - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+', Number.Integer), - - (r'(?. - - *New in Pygments 0.10.* - """ - name = 'Matlab session' - aliases = ['matlabsession'] - - def get_tokens_unprocessed(self, text): - mlexer = MatlabLexer(**self.options) - - curcode = '' - insertions = [] - - for match in line_re.finditer(text): - line = match.group() - - if line.startswith('>>'): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:3])])) - curcode += line[3:] - - elif line.startswith('???'): - - idx = len(curcode) - - # without is showing error on same line as before...? - line = "\n" + line - token = (0, Generic.Traceback, line) - insertions.append((idx, [token])) - - else: - if curcode: - for item in do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - - yield match.start(), Generic.Output, line - - if curcode: # or item: - for item in do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)): - yield item - - -class OctaveLexer(RegexLexer): - """ - For GNU Octave source code. - - *New in Pygments 1.5.* - """ - name = 'Octave' - aliases = ['octave'] - filenames = ['*.m'] - mimetypes = ['text/octave'] - - # These lists are generated automatically. - # Run the following in bash shell: - # - # First dump all of the Octave manual into a plain text file: - # - # $ info octave --subnodes -o octave-manual - # - # Now grep through it: - - # for i in \ - # "Built-in Function" "Command" "Function File" \ - # "Loadable Function" "Mapping Function"; - # do - # perl -e '@name = qw('"$i"'); - # print lc($name[0]),"_kw = [\n"'; - # - # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ - # octave-manual | sort | uniq ; - # echo "]" ; - # echo; - # done - - # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) - - builtin_kw = [ "addlistener", "addpath", "addproperty", "all", - "and", "any", "argnames", "argv", "assignin", - "atexit", "autoload", - "available_graphics_toolkits", "beep_on_error", - "bitand", "bitmax", "bitor", "bitshift", "bitxor", - "cat", "cell", "cellstr", "char", "class", "clc", - "columns", "command_line_path", - "completion_append_char", "completion_matches", - "complex", "confirm_recursive_rmdir", "cputime", - "crash_dumps_octave_core", "ctranspose", "cumprod", - "cumsum", "debug_on_error", "debug_on_interrupt", - "debug_on_warning", "default_save_options", - "dellistener", "diag", "diff", "disp", - "doc_cache_file", "do_string_escapes", "double", - "drawnow", "e", "echo_executing_commands", "eps", - "eq", "errno", "errno_list", "error", "eval", - "evalin", "exec", "exist", "exit", "eye", "false", - "fclear", "fclose", "fcntl", "fdisp", "feof", - "ferror", "feval", "fflush", "fgetl", "fgets", - "fieldnames", "file_in_loadpath", "file_in_path", - "filemarker", "filesep", "find_dir_in_path", - "fixed_point_format", "fnmatch", "fopen", "fork", - "formula", "fprintf", "fputs", "fread", "freport", - "frewind", "fscanf", "fseek", "fskipl", "ftell", - "functions", "fwrite", "ge", "genpath", "get", - "getegid", "getenv", "geteuid", "getgid", - "getpgrp", "getpid", "getppid", "getuid", "glob", - "gt", "gui_mode", "history_control", - "history_file", "history_size", - "history_timestamp_format_string", "home", - "horzcat", "hypot", "ifelse", - "ignore_function_time_stamp", "inferiorto", - "info_file", "info_program", "inline", "input", - "intmax", "intmin", "ipermute", - "is_absolute_filename", "isargout", "isbool", - "iscell", "iscellstr", "ischar", "iscomplex", - "isempty", "isfield", "isfloat", "isglobal", - "ishandle", "isieee", "isindex", "isinteger", - "islogical", "ismatrix", "ismethod", "isnull", - "isnumeric", "isobject", "isreal", - "is_rooted_relative_filename", "issorted", - "isstruct", "isvarname", "kbhit", "keyboard", - "kill", "lasterr", "lasterror", "lastwarn", - "ldivide", "le", "length", "link", "linspace", - "logical", "lstat", "lt", "make_absolute_filename", - "makeinfo_program", "max_recursion_depth", "merge", - "methods", "mfilename", "minus", "mislocked", - "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", - "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", - "munlock", "nargin", "nargout", - "native_float_format", "ndims", "ne", "nfields", - "nnz", "norm", "not", "numel", "nzmax", - "octave_config_info", "octave_core_file_limit", - "octave_core_file_name", - "octave_core_file_options", "ones", "or", - "output_max_field_width", "output_precision", - "page_output_immediately", "page_screen_output", - "path", "pathsep", "pause", "pclose", "permute", - "pi", "pipe", "plus", "popen", "power", - "print_empty_dimensions", "printf", - "print_struct_array_contents", "prod", - "program_invocation_name", "program_name", - "putenv", "puts", "pwd", "quit", "rats", "rdivide", - "readdir", "readlink", "read_readline_init_file", - "realmax", "realmin", "rehash", "rename", - "repelems", "re_read_readline_init_file", "reset", - "reshape", "resize", "restoredefaultpath", - "rethrow", "rmdir", "rmfield", "rmpath", "rows", - "save_header_format_string", "save_precision", - "saving_history", "scanf", "set", "setenv", - "shell_cmd", "sighup_dumps_octave_core", - "sigterm_dumps_octave_core", "silent_functions", - "single", "size", "size_equal", "sizemax", - "sizeof", "sleep", "source", "sparse_auto_mutate", - "split_long_rows", "sprintf", "squeeze", "sscanf", - "stat", "stderr", "stdin", "stdout", "strcmp", - "strcmpi", "string_fill_char", "strncmp", - "strncmpi", "struct", "struct_levels_to_print", - "strvcat", "subsasgn", "subsref", "sum", "sumsq", - "superiorto", "suppress_verbose_help_message", - "symlink", "system", "tic", "tilde_expand", - "times", "tmpfile", "tmpnam", "toc", "toupper", - "transpose", "true", "typeinfo", "umask", "uminus", - "uname", "undo_string_escapes", "unlink", "uplus", - "upper", "usage", "usleep", "vec", "vectorize", - "vertcat", "waitpid", "warning", "warranty", - "whos_line_format", "yes_or_no", "zeros", - "inf", "Inf", "nan", "NaN"] - - command_kw = [ "close", "load", "who", "whos", ] - - function_kw = [ "accumarray", "accumdim", "acosd", "acotd", - "acscd", "addtodate", "allchild", "ancestor", - "anova", "arch_fit", "arch_rnd", "arch_test", - "area", "arma_rnd", "arrayfun", "ascii", "asctime", - "asecd", "asind", "assert", "atand", - "autoreg_matrix", "autumn", "axes", "axis", "bar", - "barh", "bartlett", "bartlett_test", "beep", - "betacdf", "betainv", "betapdf", "betarnd", - "bicgstab", "bicubic", "binary", "binocdf", - "binoinv", "binopdf", "binornd", "bitcmp", - "bitget", "bitset", "blackman", "blanks", - "blkdiag", "bone", "box", "brighten", "calendar", - "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", - "cauchy_rnd", "caxis", "celldisp", "center", "cgs", - "chisquare_test_homogeneity", - "chisquare_test_independence", "circshift", "cla", - "clabel", "clf", "clock", "cloglog", "closereq", - "colon", "colorbar", "colormap", "colperm", - "comet", "common_size", "commutation_matrix", - "compan", "compare_versions", "compass", - "computer", "cond", "condest", "contour", - "contourc", "contourf", "contrast", "conv", - "convhull", "cool", "copper", "copyfile", "cor", - "corrcoef", "cor_test", "cosd", "cotd", "cov", - "cplxpair", "cross", "cscd", "cstrcat", "csvread", - "csvwrite", "ctime", "cumtrapz", "curl", "cut", - "cylinder", "date", "datenum", "datestr", - "datetick", "datevec", "dblquad", "deal", - "deblank", "deconv", "delaunay", "delaunayn", - "delete", "demo", "detrend", "diffpara", "diffuse", - "dir", "discrete_cdf", "discrete_inv", - "discrete_pdf", "discrete_rnd", "display", - "divergence", "dlmwrite", "dos", "dsearch", - "dsearchn", "duplication_matrix", "durbinlevinson", - "ellipsoid", "empirical_cdf", "empirical_inv", - "empirical_pdf", "empirical_rnd", "eomday", - "errorbar", "etime", "etreeplot", "example", - "expcdf", "expinv", "expm", "exppdf", "exprnd", - "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", - "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", - "factorial", "fail", "fcdf", "feather", "fftconv", - "fftfilt", "fftshift", "figure", "fileattrib", - "fileparts", "fill", "findall", "findobj", - "findstr", "finv", "flag", "flipdim", "fliplr", - "flipud", "fpdf", "fplot", "fractdiff", "freqz", - "freqz_plot", "frnd", "fsolve", - "f_test_regression", "ftp", "fullfile", "fzero", - "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", - "gcbf", "gcbo", "gcf", "genvarname", "geocdf", - "geoinv", "geopdf", "geornd", "getfield", "ginput", - "glpk", "gls", "gplot", "gradient", - "graphics_toolkit", "gray", "grid", "griddata", - "griddatan", "gtext", "gunzip", "gzip", "hadamard", - "hamming", "hankel", "hanning", "hggroup", - "hidden", "hilb", "hist", "histc", "hold", "hot", - "hotelling_test", "housh", "hsv", "hurst", - "hygecdf", "hygeinv", "hygepdf", "hygernd", - "idivide", "ifftshift", "image", "imagesc", - "imfinfo", "imread", "imshow", "imwrite", "index", - "info", "inpolygon", "inputname", "interpft", - "interpn", "intersect", "invhilb", "iqr", "isa", - "isdefinite", "isdir", "is_duplicate_entry", - "isequal", "isequalwithequalnans", "isfigure", - "ishermitian", "ishghandle", "is_leap_year", - "isletter", "ismac", "ismember", "ispc", "isprime", - "isprop", "isscalar", "issquare", "isstrprop", - "issymmetric", "isunix", "is_valid_file_id", - "isvector", "jet", "kendall", - "kolmogorov_smirnov_cdf", - "kolmogorov_smirnov_test", "kruskal_wallis_test", - "krylov", "kurtosis", "laplace_cdf", "laplace_inv", - "laplace_pdf", "laplace_rnd", "legend", "legendre", - "license", "line", "linkprop", "list_primes", - "loadaudio", "loadobj", "logistic_cdf", - "logistic_inv", "logistic_pdf", "logistic_rnd", - "logit", "loglog", "loglogerr", "logm", "logncdf", - "logninv", "lognpdf", "lognrnd", "logspace", - "lookfor", "ls_command", "lsqnonneg", "magic", - "mahalanobis", "manova", "matlabroot", - "mcnemar_test", "mean", "meansq", "median", "menu", - "mesh", "meshc", "meshgrid", "meshz", "mexext", - "mget", "mkpp", "mode", "moment", "movefile", - "mpoles", "mput", "namelengthmax", "nargchk", - "nargoutchk", "nbincdf", "nbininv", "nbinpdf", - "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", - "nonzeros", "normcdf", "normest", "norminv", - "normpdf", "normrnd", "now", "nthroot", "null", - "ocean", "ols", "onenormest", "optimget", - "optimset", "orderfields", "orient", "orth", - "pack", "pareto", "parseparams", "pascal", "patch", - "pathdef", "pcg", "pchip", "pcolor", "pcr", - "peaks", "periodogram", "perl", "perms", "pie", - "pink", "planerot", "playaudio", "plot", - "plotmatrix", "plotyy", "poisscdf", "poissinv", - "poisspdf", "poissrnd", "polar", "poly", - "polyaffine", "polyarea", "polyderiv", "polyfit", - "polygcd", "polyint", "polyout", "polyreduce", - "polyval", "polyvalm", "postpad", "powerset", - "ppder", "ppint", "ppjumps", "ppplot", "ppval", - "pqpnonneg", "prepad", "primes", "print", - "print_usage", "prism", "probit", "qp", "qqplot", - "quadcc", "quadgk", "quadl", "quadv", "quiver", - "qzhess", "rainbow", "randi", "range", "rank", - "ranks", "rat", "reallog", "realpow", "realsqrt", - "record", "rectangle_lw", "rectangle_sw", - "rectint", "refresh", "refreshdata", - "regexptranslate", "repmat", "residue", "ribbon", - "rindex", "roots", "rose", "rosser", "rotdim", - "rref", "run", "run_count", "rundemos", "run_test", - "runtests", "saveas", "saveaudio", "saveobj", - "savepath", "scatter", "secd", "semilogx", - "semilogxerr", "semilogy", "semilogyerr", - "setaudio", "setdiff", "setfield", "setxor", - "shading", "shift", "shiftdim", "sign_test", - "sinc", "sind", "sinetone", "sinewave", "skewness", - "slice", "sombrero", "sortrows", "spaugment", - "spconvert", "spdiags", "spearman", "spectral_adf", - "spectral_xdf", "specular", "speed", "spencer", - "speye", "spfun", "sphere", "spinmap", "spline", - "spones", "sprand", "sprandn", "sprandsym", - "spring", "spstats", "spy", "sqp", "stairs", - "statistics", "std", "stdnormal_cdf", - "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", - "stem", "stft", "strcat", "strchr", "strjust", - "strmatch", "strread", "strsplit", "strtok", - "strtrim", "strtrunc", "structfun", "studentize", - "subplot", "subsindex", "subspace", "substr", - "substruct", "summer", "surf", "surface", "surfc", - "surfl", "surfnorm", "svds", "swapbytes", - "sylvester_matrix", "symvar", "synthesis", "table", - "tand", "tar", "tcdf", "tempdir", "tempname", - "test", "text", "textread", "textscan", "tinv", - "title", "toeplitz", "tpdf", "trace", "trapz", - "treelayout", "treeplot", "triangle_lw", - "triangle_sw", "tril", "trimesh", "triplequad", - "triplot", "trisurf", "triu", "trnd", "tsearchn", - "t_test", "t_test_regression", "type", "unidcdf", - "unidinv", "unidpdf", "unidrnd", "unifcdf", - "unifinv", "unifpdf", "unifrnd", "union", "unique", - "unix", "unmkpp", "unpack", "untabify", "untar", - "unwrap", "unzip", "u_test", "validatestring", - "vander", "var", "var_test", "vech", "ver", - "version", "view", "voronoi", "voronoin", - "waitforbuttonpress", "wavread", "wavwrite", - "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", - "welch_test", "what", "white", "whitebg", - "wienrnd", "wilcoxon_test", "wilkinson", "winter", - "xlabel", "xlim", "ylabel", "yulewalker", "zip", - "zlabel", "z_test", ] - - loadable_kw = [ "airy", "amd", "balance", "besselh", "besseli", - "besselj", "besselk", "bessely", "bitpack", - "bsxfun", "builtin", "ccolamd", "cellfun", - "cellslices", "chol", "choldelete", "cholinsert", - "cholinv", "cholshift", "cholupdate", "colamd", - "colloc", "convhulln", "convn", "csymamd", - "cummax", "cummin", "daspk", "daspk_options", - "dasrt", "dasrt_options", "dassl", "dassl_options", - "dbclear", "dbdown", "dbstack", "dbstatus", - "dbstop", "dbtype", "dbup", "dbwhere", "det", - "dlmread", "dmperm", "dot", "eig", "eigs", - "endgrent", "endpwent", "etree", "fft", "fftn", - "fftw", "filter", "find", "full", "gcd", - "getgrent", "getgrgid", "getgrnam", "getpwent", - "getpwnam", "getpwuid", "getrusage", "givens", - "gmtime", "gnuplot_binary", "hess", "ifft", - "ifftn", "inv", "isdebugmode", "issparse", "kron", - "localtime", "lookup", "lsode", "lsode_options", - "lu", "luinc", "luupdate", "matrix_type", "max", - "min", "mktime", "pinv", "qr", "qrdelete", - "qrinsert", "qrshift", "qrupdate", "quad", - "quad_options", "qz", "rand", "rande", "randg", - "randn", "randp", "randperm", "rcond", "regexp", - "regexpi", "regexprep", "schur", "setgrent", - "setpwent", "sort", "spalloc", "sparse", "spparms", - "sprank", "sqrtm", "strfind", "strftime", - "strptime", "strrep", "svd", "svd_driver", "syl", - "symamd", "symbfact", "symrcm", "time", "tsearch", - "typecast", "urlread", "urlwrite", ] - - mapping_kw = [ "abs", "acos", "acosh", "acot", "acoth", "acsc", - "acsch", "angle", "arg", "asec", "asech", "asin", - "asinh", "atan", "atanh", "beta", "betainc", - "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", - "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", - "erfcx", "erfinv", "exp", "finite", "fix", "floor", - "fmod", "gamma", "gammainc", "gammaln", "imag", - "isalnum", "isalpha", "isascii", "iscntrl", - "isdigit", "isfinite", "isgraph", "isinf", - "islower", "isna", "isnan", "isprint", "ispunct", - "isspace", "isupper", "isxdigit", "lcm", "lgamma", - "log", "lower", "mod", "real", "rem", "round", - "roundb", "sec", "sech", "sign", "sin", "sinh", - "sqrt", "tan", "tanh", "toascii", "tolower", "xor", - ] - - builtin_consts = [ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", - "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", - "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", - "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", - "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", - "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", - "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", - "WSTOPSIG", "WTERMSIG", "WUNTRACED", ] - - tokens = { - 'root': [ - #We should look into multiline comments - (r'[%#].*$', Comment), - (r'^\s*function', Keyword, 'deffunc'), - - # from 'iskeyword' on hg changeset 8cc154f45e37 - (r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|' - r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|' - r'endevents|endfor|endfunction|endif|endmethods|endproperties|' - r'endswitch|endwhile|events|for|function|get|global|if|methods|' - r'otherwise|persistent|properties|return|set|static|switch|try|' - r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword), - - ("(" + "|".join( builtin_kw + command_kw - + function_kw + loadable_kw - + mapping_kw) + r')\b', Name.Builtin), - - ("(" + "|".join(builtin_consts) + r')\b', Name.Constant), - - # operators in Octave but not Matlab: - (r'-=|!=|!|/=|--', Operator), - # operators: - (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), - # operators in Octave but not Matlab requiring escape for re: - (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*',Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - - # punctuation: - (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation), - (r'=|:|;', Punctuation), - - (r'"[^"]*"', String), - - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+', Number.Integer), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w\)\]])\'', Operator), - (r'(?|<=|>=|&&|&|~|\|\|?', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - # punctuation: - (r'[\[\](){}@.,=:;]', Punctuation), - - (r'"[^"]*"', String), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w\)\]])\'', Operator), - (r'(?') or line.startswith('+'): - # Colorize the prompt as such, - # then put rest of line into current_code_block - insertions.append((len(current_code_block), - [(0, Generic.Prompt, line[:2])])) - current_code_block += line[2:] - else: - # We have reached a non-prompt line! - # If we have stored prompt lines, need to process them first. - if current_code_block: - # Weave together the prompts and highlight code. - for item in do_insertions(insertions, - slexer.get_tokens_unprocessed(current_code_block)): - yield item - # Reset vars for next code block. - current_code_block = '' - insertions = [] - # Now process the actual line itself, this is output from R. - yield match.start(), Generic.Output, line - - # If we happen to end on a code block with nothing after it, need to - # process the last code block. This is neither elegant nor DRY so - # should be changed. - if current_code_block: - for item in do_insertions(insertions, - slexer.get_tokens_unprocessed(current_code_block)): - yield item - - -class SLexer(RegexLexer): - """ - For S, S-plus, and R source code. - - *New in Pygments 0.10.* - """ - - name = 'S' - aliases = ['splus', 's', 'r'] - filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile'] - mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', - 'text/x-R', 'text/x-r-history', 'text/x-r-profile'] - - tokens = { - 'comments': [ - (r'#.*$', Comment.Single), - ], - 'valid_name': [ - (r'[a-zA-Z][0-9a-zA-Z\._]*', Text), - # can begin with ., but not if that is followed by a digit - (r'\.[a-zA-Z_][0-9a-zA-Z\._]*', Text), - ], - 'punctuation': [ - (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation), - ], - 'keywords': [ - (r'(if|else|for|while|repeat|in|next|break|return|switch|function)' - r'(?![0-9a-zA-Z\._])', - Keyword.Reserved) - ], - 'operators': [ - (r'<>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator), - (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator) - ], - 'builtin_symbols': [ - (r'(NULL|NA(_(integer|real|complex|character)_)?|' - r'Inf|TRUE|FALSE|NaN|\.\.(\.|[0-9]+))' - r'(?![0-9a-zA-Z\._])', - Keyword.Constant), - (r'(T|F)\b', Keyword.Variable), - ], - 'numbers': [ - # hex number - (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex), - # decimal number - (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+)([eE][+-]?[0-9]+)?[Li]?', - Number), - ], - 'statements': [ - include('comments'), - # whitespaces - (r'\s+', Text), - (r'`.*?`', String.Backtick), - (r'\'', String, 'string_squote'), - (r'\"', String, 'string_dquote'), - include('builtin_symbols'), - include('numbers'), - include('keywords'), - include('punctuation'), - include('operators'), - include('valid_name'), - ], - 'root': [ - include('statements'), - # blocks: - (r'\{|\}', Punctuation), - #(r'\{', Punctuation, 'block'), - (r'.', Text), - ], - #'block': [ - # include('statements'), - # ('\{', Punctuation, '#push'), - # ('\}', Punctuation, '#pop') - #], - 'string_squote': [ - (r'([^\'\\]|\\.)*\'', String, '#pop'), - ], - 'string_dquote': [ - (r'([^"\\]|\\.)*"', String, '#pop'), - ], - } - - def analyse_text(text): - if re.search(r'[a-z0-9_\])\s]<-(?!-)', text): - return 0.11 - - -class BugsLexer(RegexLexer): - """ - Pygments Lexer for `OpenBugs `_ and WinBugs - models. - - *New in Pygments 1.6.* - """ - - name = 'BUGS' - aliases = ['bugs', 'winbugs', 'openbugs'] - filenames = ['*.bug'] - - _FUNCTIONS = [ - # Scalar functions - 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', - 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', - 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', - 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', - 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', - 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', - 'trunc', - # Vector functions - 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', - 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', - 'sd', 'sort', 'sum', - ## Special - 'D', 'I', 'F', 'T', 'C'] - """ OpenBUGS built-in functions - - From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII - - This also includes - - - T, C, I : Truncation and censoring. - ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. - - D : ODE - - F : Functional http://www.openbugs.info/Examples/Functionals.html - - """ - - _DISTRIBUTIONS = ['dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', - 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', - 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', - 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', - 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', - 'dmt', 'dwish'] - """ OpenBUGS built-in distributions - - Functions from - http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI - """ - - - tokens = { - 'whitespace' : [ - (r"\s+", Text), - ], - 'comments' : [ - # Comments - (r'#.*$', Comment.Single), - ], - 'root': [ - # Comments - include('comments'), - include('whitespace'), - # Block start - (r'(model)(\s+)({)', - bygroups(Keyword.Namespace, Text, Punctuation)), - # Reserved Words - (r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved), - # Built-in Functions - (r'(%s)(?=\s*\()' - % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), - Name.Builtin), - # Regular variable names - (r'[A-Za-z][A-Za-z0-9_.]*', Name), - # Number Literals - (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), - # Punctuation - (r'\[|\]|\(|\)|:|,|;', Punctuation), - # Assignment operators - # SLexer makes these tokens Operators. - (r'<-|~', Operator), - # Infix and prefix operators - (r'\+|-|\*|/', Operator), - # Block - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r"^\s*model\s*{", text, re.M): - return 0.7 - else: - return 0.0 - -class JagsLexer(RegexLexer): - """ - Pygments Lexer for JAGS. - - *New in Pygments 1.6.* - """ - - name = 'JAGS' - aliases = ['jags'] - filenames = ['*.jag', '*.bug'] - - ## JAGS - _FUNCTIONS = [ - 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', - 'cos', 'cosh', 'cloglog', - 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', - 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', - 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', - 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', - 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', - # Truncation/Censoring (should I include) - 'T', 'I'] - # Distributions with density, probability and quartile functions - _DISTRIBUTIONS = ['[dpq]%s' % x for x in - ['bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', - 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', - 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib']] - # Other distributions without density and probability - _OTHER_DISTRIBUTIONS = [ - 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', - 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', - 'dnbinom', 'dweibull', 'ddirich'] - - tokens = { - 'whitespace' : [ - (r"\s+", Text), - ], - 'names' : [ - # Regular variable names - (r'[a-zA-Z][a-zA-Z0-9_.]*\b', Name), - ], - 'comments' : [ - # do not use stateful comments - (r'(?s)/\*.*?\*/', Comment.Multiline), - # Comments - (r'#.*$', Comment.Single), - ], - 'root': [ - # Comments - include('comments'), - include('whitespace'), - # Block start - (r'(model|data)(\s+)({)', - bygroups(Keyword.Namespace, Text, Punctuation)), - (r'var(?![0-9a-zA-Z\._])', Keyword.Declaration), - # Reserved Words - (r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved), - # Builtins - # Need to use lookahead because . is a valid char - (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS - + _DISTRIBUTIONS - + _OTHER_DISTRIBUTIONS), - Name.Builtin), - # Names - include('names'), - # Number Literals - (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), - (r'\[|\]|\(|\)|:|,|;', Punctuation), - # Assignment operators - (r'<-|~', Operator), - # # JAGS includes many more than OpenBUGS - (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r'^\s*model\s*\{', text, re.M): - if re.search(r'^\s*data\s*\{', text, re.M): - return 0.9 - elif re.search(r'^\s*var', text, re.M): - return 0.9 - else: - return 0.3 - else: - return 0 - -class StanLexer(RegexLexer): - """Pygments Lexer for Stan models. - - The Stan modeling language is specified in the *Stan 1.3.0 - Modeling Language Manual* `pdf - `_. - - *New in Pygments 1.6.* - """ - - name = 'Stan' - aliases = ['stan'] - filenames = ['*.stan'] - - tokens = { - 'whitespace' : [ - (r"\s+", Text), - ], - 'comments' : [ - (r'(?s)/\*.*?\*/', Comment.Multiline), - # Comments - (r'(//|#).*$', Comment.Single), - ], - 'root': [ - # Stan is more restrictive on strings than this regex - (r'"[^"]*"', String), - # Comments - include('comments'), - # block start - include('whitespace'), - # Block start - (r'(%s)(\s*)({)' % - r'|'.join(('data', r'transformed\s+?data', - 'parameters', r'transformed\s+parameters', - 'model', r'generated\s+quantities')), - bygroups(Keyword.Namespace, Text, Punctuation)), - # Reserved Words - (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), - # Truncation - (r'T(?=\s*\[)', Keyword), - # Data types - (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), - # Punctuation - (r"[;:,\[\]()]", Punctuation), - # Builtin - (r'(%s)(?=\s*\()' - % r'|'.join(_stan_builtins.FUNCTIONS - + _stan_builtins.DISTRIBUTIONS), - Name.Builtin), - # Special names ending in __, like lp__ - (r'[A-Za-z][A-Za-z0-9_]*__\b', Name.Builtin.Pseudo), - (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), - # Regular variable names - (r'[A-Za-z][A-Za-z0-9_]*\b', Name), - # Real Literals - (r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float), - (r'-?[0-9]*\.[0-9]*', Number.Float), - # Integer Literals - (r'-?[0-9]+', Number.Integer), - # Assignment operators - # SLexer makes these tokens Operators. - (r'<-|~', Operator), - # Infix and prefix operators (and = ) - (r"\+|-|\.?\*|\.?/|\\|'|==?|!=?|<=?|>=?|\|\||&&", Operator), - # Block delimiters - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r'^\s*parameters\s*\{', text, re.M): - return 1.0 - else: - return 0.0 - - -class IDLLexer(RegexLexer): - """ - Pygments Lexer for IDL (Interactive Data Language). - - *New in Pygments 1.6.* - """ - name = 'IDL' - aliases = ['idl'] - filenames = ['*.pro'] - mimetypes = ['text/idl'] - - _RESERVED = ['and', 'begin', 'break', 'case', 'common', 'compile_opt', - 'continue', 'do', 'else', 'end', 'endcase', 'elseelse', - 'endfor', 'endforeach', 'endif', 'endrep', 'endswitch', - 'endwhile', 'eq', 'for', 'foreach', 'forward_function', - 'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le', - 'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro', - 'repeat', 'switch', 'then', 'until', 'while', 'xor'] - """Reserved words from: http://www.exelisvis.com/docs/reswords.html""" - - _BUILTIN_LIB = ['abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10', - 'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query', - 'arg_present', 'array_equal', 'array_indices', 'arrow', - 'ascii_template', 'asin', 'assoc', 'atan', 'axis', - 'a_correlate', 'bandpass_filter', 'bandreject_filter', - 'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk', - 'besely', 'beta', 'bilinear', 'binary_template', 'bindgen', - 'binomial', 'bin_date', 'bit_ffs', 'bit_population', - 'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint', - 'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder', - 'bytscl', 'caldat', 'calendar', 'call_external', - 'call_function', 'call_method', 'call_procedure', 'canny', - 'catch', 'cd', 'cdf_[0-9a-za-z_]*', 'ceil', 'chebyshev', - 'check_math', - 'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen', - 'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts', - 'cmyk_convert', 'colorbar', 'colorize_sample', - 'colormap_applicable', 'colormap_gradient', - 'colormap_rotation', 'colortable', 'color_convert', - 'color_exchange', 'color_quan', 'color_range_map', 'comfit', - 'command_line_args', 'complex', 'complexarr', 'complexround', - 'compute_mesh_normals', 'cond', 'congrid', 'conj', - 'constrained_min', 'contour', 'convert_coord', 'convol', - 'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos', - 'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct', - 'create_view', 'crossp', 'crvlength', 'cti_test', - 'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord', - 'cw_animate', 'cw_animate_getp', 'cw_animate_load', - 'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index', - 'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel', - 'cw_form', 'cw_fslider', 'cw_light_editor', - 'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient', - 'cw_palette_editor', 'cw_palette_editor_get', - 'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider', - 'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists', - 'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key', - 'define_msgblk', 'define_msgblk_from_file', 'defroi', - 'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv', - 'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix', - 'dialog_dbconnect', 'dialog_message', 'dialog_pickfile', - 'dialog_printersetup', 'dialog_printjob', - 'dialog_read_image', 'dialog_write_image', 'digital_filter', - 'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure', - 'dlm_load', 'dlm_register', 'doc_library', 'double', - 'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec', - 'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn', - 'eof', 'eos_[0-9a-za-z_]*', 'erase', 'erf', 'erfc', 'erfcx', - 'erode', 'errorplot', 'errplot', 'estimator_filter', - 'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint', - 'extrac', 'extract_slice', 'factorial', 'fft', 'filepath', - 'file_basename', 'file_chmod', 'file_copy', 'file_delete', - 'file_dirname', 'file_expand_path', 'file_info', - 'file_lines', 'file_link', 'file_mkdir', 'file_move', - 'file_poll_input', 'file_readlink', 'file_same', - 'file_search', 'file_test', 'file_which', 'findgen', - 'finite', 'fix', 'flick', 'float', 'floor', 'flow3', - 'fltarr', 'flush', 'format_axis_values', 'free_lun', - 'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root', - 'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct', - 'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint', - 'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv', - 'getwindows', 'get_drive_list', 'get_dxf_objects', - 'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size', - 'greg2jul', 'grib_[0-9a-za-z_]*', 'grid3', 'griddata', - 'grid_input', 'grid_tps', 'gs_iter', - 'h5[adfgirst]_[0-9a-za-z_]*', 'h5_browser', 'h5_close', - 'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse', - 'hanning', 'hash', 'hdf_[0-9a-za-z_]*', 'heap_free', - 'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save', - 'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal', - 'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int', - 'i18n_multibytetoutf8', 'i18n_multibytetowidechar', - 'i18n_utf8tomultibyte', 'i18n_widechartomultibyte', - 'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity', - 'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64', - 'idl_validname', 'iellipse', 'igamma', 'igetcurrent', - 'igetdata', 'igetid', 'igetproperty', 'iimage', 'image', - 'image_cont', 'image_statistics', 'imaginary', 'imap', - 'indgen', 'intarr', 'interpol', 'interpolate', - 'interval_volume', 'int_2d', 'int_3d', 'int_tabulated', - 'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon', - 'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve', - 'irotate', 'ir_filter', 'isa', 'isave', 'iscale', - 'isetcurrent', 'isetproperty', 'ishft', 'isocontour', - 'isosurface', 'isurface', 'itext', 'itranslate', 'ivector', - 'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse', - 'json_serialize', 'jul2greg', 'julday', 'keyword_set', - 'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date', - 'label_region', 'ladfit', 'laguerre', 'laplacian', - 'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ', - 'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes', - 'la_gm_linear_model', 'la_hqr', 'la_invert', - 'la_least_squares', 'la_least_square_equality', - 'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol', - 'la_svd', 'la_tridc', 'la_trimprove', 'la_triql', - 'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt', - 'legend', 'legendre', 'linbcg', 'lindgen', 'linfit', - 'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr', - 'lngamma', 'lnp_test', 'loadct', 'locale_get', - 'logical_and', 'logical_or', 'logical_true', 'lon64arr', - 'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove', - 'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll', - 'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points', - 'map_continents', 'map_grid', 'map_image', 'map_patch', - 'map_proj_forward', 'map_proj_image', 'map_proj_info', - 'map_proj_init', 'map_proj_inverse', 'map_set', - 'matrix_multiply', 'matrix_power', 'max', 'md_test', - 'mean', 'meanabsdev', 'mean_filter', 'median', 'memory', - 'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge', - 'mesh_numtriangles', 'mesh_obj', 'mesh_smooth', - 'mesh_surfacearea', 'mesh_validate', 'mesh_volume', - 'message', 'min', 'min_curve_surf', 'mk_html_help', - 'modifyct', 'moment', 'morph_close', 'morph_distance', - 'morph_gradient', 'morph_hitormiss', 'morph_open', - 'morph_thin', 'morph_tophat', 'multi', 'm_correlate', - 'ncdf_[0-9a-za-z_]*', 'newton', 'noise_hurl', 'noise_pick', - 'noise_scatter', 'noise_slur', 'norm', 'n_elements', - 'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy', - 'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid', - 'online_help', 'on_error', 'open', 'oplot', 'oploterr', - 'parse_url', 'particle_trace', 'path_cache', 'path_sep', - 'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox', - 'plot_field', 'pnt_line', 'point_lun', 'polarplot', - 'polar_contour', 'polar_surface', 'poly', 'polyfill', - 'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp', - 'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell', - 'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes', - 'print', 'printd', 'product', 'profile', 'profiler', - 'profiles', 'project_vol', 'psafm', 'pseudo', - 'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new', - 'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull', - 'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp', - 'query_csv', 'query_dicom', 'query_gif', 'query_image', - 'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict', - 'query_png', 'query_ppm', 'query_srf', 'query_tiff', - 'query_wav', 'radon', 'randomn', 'randomu', 'ranks', - 'rdpix', 'read', 'reads', 'readu', 'read_ascii', - 'read_binary', 'read_bmp', 'read_csv', 'read_dicom', - 'read_gif', 'read_image', 'read_interfile', 'read_jpeg', - 'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png', - 'read_ppm', 'read_spr', 'read_srf', 'read_sylk', - 'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap', - 'read_xwd', 'real_part', 'rebin', 'recall_commands', - 'recon3', 'reduce_colors', 'reform', 'region_grow', - 'register_cursor', 'regress', 'replicate', - 'replicate_inplace', 'resolve_all', 'resolve_routine', - 'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts', - 'rot', 'rotate', 'round', 'routine_filepath', - 'routine_info', 'rs_test', 'r_correlate', 'r_test', - 'save', 'savgol', 'scale3', 'scale3d', 'scope_level', - 'scope_traceback', 'scope_varfetch', 'scope_varname', - 'search2d', 'search3d', 'sem_create', 'sem_delete', - 'sem_lock', 'sem_release', 'setenv', 'set_plot', - 'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr', - 'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap', - 'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin', - 'sindgen', 'sinh', 'size', 'skewness', 'skip_lun', - 'slicer3', 'slide_image', 'smooth', 'sobel', 'socket', - 'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat', - 'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab', - 'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize', - 'stddev', 'stop', 'strarr', 'strcmp', 'strcompress', - 'streamline', 'stregex', 'stretch', 'string', 'strjoin', - 'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid', - 'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign', - 'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc', - 'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace', - 'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan', - 'tanh', 'tek_color', 'temporary', 'tetra_clip', - 'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed', - 'timegen', 'time_test2', 'tm_test', 'total', 'trace', - 'transpose', 'triangulate', 'trigrid', 'triql', 'trired', - 'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff', - 'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd', - 'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint', - 'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr', - 'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym', - 'value_locate', 'variance', 'vector', 'vector_field', 'vel', - 'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj', - 'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw', - 'where', 'widget_base', 'widget_button', 'widget_combobox', - 'widget_control', 'widget_displaycontextmen', 'widget_draw', - 'widget_droplist', 'widget_event', 'widget_info', - 'widget_label', 'widget_list', 'widget_propertysheet', - 'widget_slider', 'widget_tab', 'widget_table', - 'widget_text', 'widget_tree', 'widget_tree_move', - 'widget_window', 'wiener_filter', 'window', 'writeu', - 'write_bmp', 'write_csv', 'write_gif', 'write_image', - 'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict', - 'write_png', 'write_ppm', 'write_spr', 'write_srf', - 'write_sylk', 'write_tiff', 'write_wav', 'write_wave', - 'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt', - 'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet', - 'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar', - 'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet', - 'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps', - 'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise', - 'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont', - 'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl', - 'xmtool', 'xobjview', 'xobjview_rotate', - 'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d', - 'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit', - 'xvolume', 'xvolume_rotate', 'xvolume_write_image', - 'xyouts', 'zoom', 'zoom_24'] - """Functions from: http://www.exelisvis.com/docs/routines-1.html""" - - tokens = { - 'root': [ - (r'^\s*;.*?\n', Comment.Singleline), - (r'\b(' + '|'.join(_RESERVED) + r')\b', Keyword), - (r'\b(' + '|'.join(_BUILTIN_LIB) + r')\b', Name.Builtin), - (r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator), - (r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator), - (r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator), - (r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator), - (r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number), - (r'.', Text), - ] - } - - -class RdLexer(RegexLexer): - """ - Pygments Lexer for R documentation (Rd) files - - This is a very minimal implementation, highlighting little more - than the macros. A description of Rd syntax is found in `Writing R - Extensions `_ - and `Parsing Rd files `_. - - *New in Pygments 1.6.* - """ - name = 'Rd' - aliases = ['rd'] - filenames = ['*.Rd'] - mimetypes = ['text/x-r-doc'] - - # To account for verbatim / LaTeX-like / and R-like areas - # would require parsing. - tokens = { - 'root' : [ - # catch escaped brackets and percent sign - (r'\\[\\{}%]', String.Escape), - # comments - (r'%.*$', Comment), - # special macros with no arguments - (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant), - # macros - (r'\\[a-zA-Z]+\b', Keyword), - # special preprocessor macros - (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc), - # non-escaped brackets - (r'[{}]', Name.Builtin), - # everything else - (r'[^\\%\n{}]+', Text), - (r'.', Text), - ] - } - - -class IgorLexer(RegexLexer): - """ - Pygments Lexer for Igor Pro procedure files (.ipf). - See http://www.wavemetrics.com/ and http://www.igorexchange.com/. - - *New in Pygments 1.7.* - """ - - name = 'Igor' - aliases = ['igor', 'igorpro'] - filenames = ['*.ipf'] - mimetypes = ['text/ipf'] - - flags = re.IGNORECASE - - flowControl = [ - 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch', - 'case', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry', 'break', - 'continue', 'return', - ] - types = [ - 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE', - 'STRUCT', 'ThreadSafe', 'function', 'end', 'static', 'macro', 'window', - 'graph', 'Structure', 'EndStructure', 'EndMacro', 'FuncFit', 'Proc', - 'Picture', 'Menu', 'SubMenu', 'Prompt', 'DoPrompt', - ] - operations = [ - 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', - 'AddMovieFrame', 'APMath', 'Append', 'AppendImage', - 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText', - 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour', - 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall', - 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', - 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc', - 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar', - 'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile', - 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross', - 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor', - 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions', - 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide', - 'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints', - 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic', - 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow', - 'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine', - 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText', - 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder', - 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText', - 'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp', - 'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR', - 'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly', - 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf', - 'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload', - 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo', - 'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow', - 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', - 'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools', - 'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles', - 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection', - 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask', - 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', - 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration', - 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave', - 'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold', - 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', - 'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath', - 'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder', - 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings', - 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout', - 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', - 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess', - 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime', - 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter', - 'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve', - 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', - 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', - 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText', - 'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList', - 'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', - 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder', - 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable', - 'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain', - 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage', - 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath', - 'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open', - 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo', - 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction', - 'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu', - 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', - 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable', - 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit', - 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour', - 'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', - 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder', - 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages', - 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample', - 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData', - 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook', - 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', - 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern', - 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer', - 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode', - 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed', - 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus', - 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth', - 'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet', - 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart', - 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString', - 'sprintf', 'sscanf', 'Stack', 'StackWindows', - 'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest', - 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest', - 'StatsCircularCorrelationTest', 'StatsCircularMeans', - 'StatsCircularMoments', 'StatsCircularTwoSampleTest', - 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest', - 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest', - 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest', - 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest', - 'StatsLinearRegression', 'StatsMultiCorrelationTest', - 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles', - 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample', - 'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest', - 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest', - 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest', - 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String', - 'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile', - 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid', - 'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv', - 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform', - 'WindowFunction', - ] - functions = [ - 'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog', - 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh', - 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi', - 'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch', - 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs', - 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev', - 'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos', - 'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi', - 'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual', - 'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian', - 'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave', - 'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma', - 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', - 'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata', - 'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor', - 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin', - 'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss', - 'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize', - 'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise', - 'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1', - 'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion', - 'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D', - 'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre', - 'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln', - 'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint', - 'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max', - 'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey', - 'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect', - 'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x', - 'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar', - 'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec', - 'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ', - 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD', - 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF', - 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF', - 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', - 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', - 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', - 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', - 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF', - 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF', - 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF', - 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF', - 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF', - 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', - 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', - 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', - 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF', - 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF', - 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF', - 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF', - 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF', - 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF', - 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', - 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF', - 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF', - 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF', - 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF', - 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute', - 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF', - 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', - 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF', - 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF', - 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF', - 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean', - 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', - 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', - 'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch', - 'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists', - 'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease', - 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', - 'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists', - 'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem', - 'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR', - ] - functions += [ - 'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo', - 'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName', - 'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo', - 'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date', - 'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo', - 'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont', - 'GetDimLabel', 'GetErrMessage', 'GetFormula', - 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR', - 'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData', - 'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash', - 'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile', - 'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList', - 'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str', - 'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo', - 'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey', - 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', - 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey', - 'Secs2Date', 'Secs2Time', 'SelectString', 'SortList', - 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath', - 'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault', - 'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel', - 'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr', - 'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits', - 'WinList', 'WinName', 'WinRecreation', 'XWaveName', - 'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef', - 'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef', - 'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR', - 'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR', - ] - - tokens = { - 'root': [ - (r'//.*$', Comment.Single), - (r'"([^"\\]|\\.)*"', String), - # Flow Control. - (r'\b(%s)\b' % '|'.join(flowControl), Keyword), - # Types. - (r'\b(%s)\b' % '|'.join(types), Keyword.Type), - # Built-in operations. - (r'\b(%s)\b' % '|'.join(operations), Name.Class), - # Built-in functions. - (r'\b(%s)\b' % '|'.join(functions), Name.Function), - # Compiler directives. - (r'^#(include|pragma|define|ifdef|ifndef|endif)', - Name.Decorator), - (r'[^a-zA-Z"/]+', Text), - (r'.', Text), - ], - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/other.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/other.py deleted file mode 100644 index 10598fb..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/other.py +++ /dev/null @@ -1,3778 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.other - ~~~~~~~~~~~~~~~~~~~~~ - - Lexers for other languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, using, \ - this, combined, ExtendedRegexLexer -from pygments.token import Error, Punctuation, Literal, Token, \ - Text, Comment, Operator, Keyword, Name, String, Number, Generic, \ - Whitespace -from pygments.util import get_bool_opt -from pygments.lexers.web import HtmlLexer - -from pygments.lexers._openedgebuiltins import OPENEDGEKEYWORDS -from pygments.lexers._robotframeworklexer import RobotFrameworkLexer - -# backwards compatibility -from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer -from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ - TcshLexer - -__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'MOOCodeLexer', - 'SmalltalkLexer', 'LogtalkLexer', 'GnuplotLexer', 'PovrayLexer', - 'AppleScriptLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer', - 'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer', 'PostScriptLexer', - 'AutohotkeyLexer', 'GoodDataCLLexer', 'MaqlLexer', 'ProtoBufLexer', - 'HybrisLexer', 'AwkLexer', 'Cfengine3Lexer', 'SnobolLexer', - 'ECLLexer', 'UrbiscriptLexer', 'OpenEdgeLexer', 'BroLexer', - 'MscgenLexer', 'KconfigLexer', 'VGLLexer', 'SourcePawnLexer', - 'RobotFrameworkLexer', 'PuppetLexer', 'NSISLexer', 'RPMSpecLexer', - 'CbmBasicV2Lexer', 'AutoItLexer', 'RexxLexer'] - - -class ECLLexer(RegexLexer): - """ - Lexer for the declarative big-data `ECL - `_ - language. - - *New in Pygments 1.5.* - """ - - name = 'ECL' - aliases = ['ecl'] - filenames = ['*.ecl'] - mimetypes = ['application/x-ecl'] - - flags = re.IGNORECASE | re.MULTILINE - - tokens = { - 'root': [ - include('whitespace'), - include('statements'), - ], - 'whitespace': [ - (r'\s+', Text), - (r'\/\/.*', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - ], - 'statements': [ - include('types'), - include('keywords'), - include('functions'), - include('hash'), - (r'"', String, 'string'), - (r'\'', String, 'string'), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]+', Operator), - (r'[{}()\[\],.;]', Punctuation), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'hash': [ - (r'^#.*$', Comment.Preproc), - ], - 'types': [ - (r'(RECORD|END)\D', Keyword.Declaration), - (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|' - r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|' - r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)', - bygroups(Keyword.Type, Text)), - ], - 'keywords': [ - (r'(APPLY|ASSERT|BUILD|BUILDINDEX|EVALUATE|FAIL|KEYDIFF|KEYPATCH|' - r'LOADXML|NOTHOR|NOTIFY|OUTPUT|PARALLEL|SEQUENTIAL|SOAPCALL|WAIT' - r'CHECKPOINT|DEPRECATED|FAILCODE|FAILMESSAGE|FAILURE|GLOBAL|' - r'INDEPENDENT|ONWARNING|PERSIST|PRIORITY|RECOVERY|STORED|SUCCESS|' - r'WAIT|WHEN)\b', Keyword.Reserved), - # These are classed differently, check later - (r'(ALL|AND|ANY|AS|ATMOST|BEFORE|BEGINC\+\+|BEST|BETWEEN|CASE|CONST|' - r'COUNTER|CSV|DESCEND|ENCRYPT|ENDC\+\+|ENDMACRO|EXCEPT|EXCLUSIVE|' - r'EXPIRE|EXPORT|EXTEND|FALSE|FEW|FIRST|FLAT|FULL|FUNCTION|GROUP|' - r'HEADER|HEADING|HOLE|IFBLOCK|IMPORT|IN|JOINED|KEEP|KEYED|LAST|' - r'LEFT|LIMIT|LOAD|LOCAL|LOCALE|LOOKUP|MACRO|MANY|MAXCOUNT|' - r'MAXLENGTH|MIN SKEW|MODULE|INTERFACE|NAMED|NOCASE|NOROOT|NOSCAN|' - r'NOSORT|NOT|OF|ONLY|OPT|OR|OUTER|OVERWRITE|PACKED|PARTITION|' - r'PENALTY|PHYSICALLENGTH|PIPE|QUOTE|RELATIONSHIP|REPEAT|RETURN|' - r'RIGHT|SCAN|SELF|SEPARATOR|SERVICE|SHARED|SKEW|SKIP|SQL|STORE|' - r'TERMINATOR|THOR|THRESHOLD|TOKEN|TRANSFORM|TRIM|TRUE|TYPE|' - r'UNICODEORDER|UNSORTED|VALIDATE|VIRTUAL|WHOLE|WILD|WITHIN|XML|' - r'XPATH|__COMPRESSED__)\b', Keyword.Reserved), - ], - 'functions': [ - (r'(ABS|ACOS|ALLNODES|ASCII|ASIN|ASSTRING|ATAN|ATAN2|AVE|CASE|' - r'CHOOSE|CHOOSEN|CHOOSESETS|CLUSTERSIZE|COMBINE|CORRELATION|COS|' - r'COSH|COUNT|COVARIANCE|CRON|DATASET|DEDUP|DEFINE|DENORMALIZE|' - r'DISTRIBUTE|DISTRIBUTED|DISTRIBUTION|EBCDIC|ENTH|ERROR|EVALUATE|' - r'EVENT|EVENTEXTRA|EVENTNAME|EXISTS|EXP|FAILCODE|FAILMESSAGE|' - r'FETCH|FROMUNICODE|GETISVALID|GLOBAL|GRAPH|GROUP|HASH|HASH32|' - r'HASH64|HASHCRC|HASHMD5|HAVING|IF|INDEX|INTFORMAT|ISVALID|' - r'ITERATE|JOIN|KEYUNICODE|LENGTH|LIBRARY|LIMIT|LN|LOCAL|LOG|LOOP|' - r'MAP|MATCHED|MATCHLENGTH|MATCHPOSITION|MATCHTEXT|MATCHUNICODE|' - r'MAX|MERGE|MERGEJOIN|MIN|NOLOCAL|NONEMPTY|NORMALIZE|PARSE|PIPE|' - r'POWER|PRELOAD|PROCESS|PROJECT|PULL|RANDOM|RANGE|RANK|RANKED|' - r'REALFORMAT|RECORDOF|REGEXFIND|REGEXREPLACE|REGROUP|REJECTED|' - r'ROLLUP|ROUND|ROUNDUP|ROW|ROWDIFF|SAMPLE|SET|SIN|SINH|SIZEOF|' - r'SOAPCALL|SORT|SORTED|SQRT|STEPPED|STORED|SUM|TABLE|TAN|TANH|' - r'THISNODE|TOPN|TOUNICODE|TRANSFER|TRIM|TRUNCATE|TYPEOF|UNGROUP|' - r'UNICODEORDER|VARIANCE|WHICH|WORKUNIT|XMLDECODE|XMLENCODE|' - r'XMLTEXT|XMLUNICODE)\b', Name.Function), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\'', String, '#pop'), - (r'[^"\']+', String), - ], - } - - -class BrainfuckLexer(RegexLexer): - """ - Lexer for the esoteric `BrainFuck `_ - language. - """ - - name = 'Brainfuck' - aliases = ['brainfuck', 'bf'] - filenames = ['*.bf', '*.b'] - mimetypes = ['application/x-brainfuck'] - - tokens = { - 'common': [ - # use different colors for different instruction types - (r'[.,]+', Name.Tag), - (r'[+-]+', Name.Builtin), - (r'[<>]+', Name.Variable), - (r'[^.,+\-<>\[\]]+', Comment), - ], - 'root': [ - (r'\[', Keyword, 'loop'), - (r'\]', Error), - include('common'), - ], - 'loop': [ - (r'\[', Keyword, '#push'), - (r'\]', Keyword, '#pop'), - include('common'), - ] - } - - -class BefungeLexer(RegexLexer): - """ - Lexer for the esoteric `Befunge `_ - language. - - *New in Pygments 0.7.* - """ - name = 'Befunge' - aliases = ['befunge'] - filenames = ['*.befunge'] - mimetypes = ['application/x-befunge'] - - tokens = { - 'root': [ - (r'[0-9a-f]', Number), - (r'[\+\*/%!`-]', Operator), # Traditional math - (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives - (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives - (r'[|_mw]', Keyword), - (r'[{}]', Name.Tag), # Befunge-98 stack ops - (r'".*?"', String.Double), # Strings don't appear to allow escapes - (r'\'.', String.Single), # Single character - (r'[#;]', Comment), # Trampoline... depends on direction hit - (r'[pg&~=@iotsy]', Keyword), # Misc - (r'[()A-Z]', Comment), # Fingerprints - (r'\s+', Text), # Whitespace doesn't matter - ], - } - - -class RedcodeLexer(RegexLexer): - """ - A simple Redcode lexer based on ICWS'94. - Contributed by Adam Blinkinsop . - - *New in Pygments 0.8.* - """ - name = 'Redcode' - aliases = ['redcode'] - filenames = ['*.cw'] - - opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD', - 'JMP','JMZ','JMN','DJN','CMP','SLT','SPL', - 'ORG','EQU','END'] - modifiers = ['A','B','AB','BA','F','X','I'] - - tokens = { - 'root': [ - # Whitespace: - (r'\s+', Text), - (r';.*$', Comment.Single), - # Lexemes: - # Identifiers - (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), - (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), - (r'[A-Za-z_][A-Za-z_0-9]+', Name), - # Operators - (r'[-+*/%]', Operator), - (r'[#$@<>]', Operator), # mode - (r'[.,]', Punctuation), # mode - # Numbers - (r'[-+]?\d+', Number.Integer), - ], - } - - -class MOOCodeLexer(RegexLexer): - """ - For `MOOCode `_ (the MOO scripting - language). - - *New in Pygments 0.9.* - """ - name = 'MOOCode' - filenames = ['*.moo'] - aliases = ['moocode', 'moo'] - mimetypes = ['text/x-moocode'] - - tokens = { - 'root' : [ - # Numbers - (r'(0|[1-9][0-9_]*)', Number.Integer), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # exceptions - (r'(E_PERM|E_DIV)', Name.Exception), - # db-refs - (r'((#[-0-9]+)|(\$[a-z_A-Z0-9]+))', Name.Entity), - # Keywords - (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' - r'|endwhile|break|continue|return|try' - r'|except|endtry|finally|in)\b', Keyword), - # builtins - (r'(random|length)', Name.Builtin), - # special variables - (r'(player|caller|this|args)', Name.Variable.Instance), - # skip whitespace - (r'\s+', Text), - (r'\n', Text), - # other operators - (r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator), - # function call - (r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)), - # variables - (r'([a-zA-Z_0-9]+)', Text), - ] - } - - -class SmalltalkLexer(RegexLexer): - """ - For `Smalltalk `_ syntax. - Contributed by Stefan Matthias Aust. - Rewritten by Nils Winter. - - *New in Pygments 0.10.* - """ - name = 'Smalltalk' - filenames = ['*.st'] - aliases = ['smalltalk', 'squeak', 'st'] - mimetypes = ['text/x-smalltalk'] - - tokens = { - 'root' : [ - (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), - include('squeak fileout'), - include('whitespaces'), - include('method definition'), - (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), - include('objects'), - (r'\^|\:=|\_', Operator), - # temporaries - (r'[\]({}.;!]', Text), - ], - 'method definition' : [ - # Not perfect can't allow whitespaces at the beginning and the - # without breaking everything - (r'([a-zA-Z]+\w*:)(\s*)(\w+)', - bygroups(Name.Function, Text, Name.Variable)), - (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), - (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', - bygroups(Name.Function, Text, Name.Variable, Text)), - ], - 'blockvariables' : [ - include('whitespaces'), - (r'(:)(\s*)(\w+)', - bygroups(Operator, Text, Name.Variable)), - (r'\|', Operator, '#pop'), - (r'', Text, '#pop'), # else pop - ], - 'literals' : [ - (r"'(''|[^'])*'", String, 'afterobject'), - (r'\$.', String.Char, 'afterobject'), - (r'#\(', String.Symbol, 'parenth'), - (r'\)', Text, 'afterobject'), - (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), - ], - '_parenth_helper' : [ - include('whitespaces'), - (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), - (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), - # literals - (r"'(''|[^'])*'", String), - (r'\$.', String.Char), - (r'#*\(', String.Symbol, 'inner_parenth'), - ], - 'parenth' : [ - # This state is a bit tricky since - # we can't just pop this state - (r'\)', String.Symbol, ('root', 'afterobject')), - include('_parenth_helper'), - ], - 'inner_parenth': [ - (r'\)', String.Symbol, '#pop'), - include('_parenth_helper'), - ], - 'whitespaces' : [ - # skip whitespace and comments - (r'\s+', Text), - (r'"(""|[^"])*"', Comment), - ], - 'objects' : [ - (r'\[', Text, 'blockvariables'), - (r'\]', Text, 'afterobject'), - (r'\b(self|super|true|false|nil|thisContext)\b', - Name.Builtin.Pseudo, 'afterobject'), - (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), - (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), - (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', - String.Symbol, 'afterobject'), - include('literals'), - ], - 'afterobject' : [ - (r'! !$', Keyword , '#pop'), # squeak chunk delimiter - include('whitespaces'), - (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', - Name.Builtin, '#pop'), - (r'\b(new\b(?!:))', Name.Builtin), - (r'\:=|\_', Operator, '#pop'), - (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), - (r'\b[a-zA-Z]+\w*', Name.Function), - (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), - (r'\.', Punctuation, '#pop'), - (r';', Punctuation), - (r'[\])}]', Text), - (r'[\[({]', Text, '#pop'), - ], - 'squeak fileout' : [ - # Squeak fileout format (optional) - (r'^"(""|[^"])*"!', Keyword), - (r"^'(''|[^'])*'!", Keyword), - (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', - bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), - (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", - bygroups(Keyword, Name.Class, Keyword, String, Keyword)), - (r'^(\w+)( subclass: )(#\w+)' - r'(\s+instanceVariableNames: )(.*?)' - r'(\s+classVariableNames: )(.*?)' - r'(\s+poolDictionaries: )(.*?)' - r'(\s+category: )(.*?)(!)', - bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, - String, Keyword, String, Keyword, String, Keyword)), - (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', - bygroups(Name.Class, Keyword, String, Keyword)), - (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), - (r'! !$', Keyword), - ], - } - - -class LogtalkLexer(RegexLexer): - """ - For `Logtalk `_ source code. - - *New in Pygments 0.10.* - """ - - name = 'Logtalk' - aliases = ['logtalk'] - filenames = ['*.lgt'] - mimetypes = ['text/x-logtalk'] - - tokens = { - 'root': [ - # Directives - (r'^\s*:-\s',Punctuation,'directive'), - # Comments - (r'%.*?\n', Comment), - (r'/\*(.|\n)*?\*/',Comment), - # Whitespace - (r'\n', Text), - (r'\s+', Text), - # Numbers - (r"0'.", Number), - (r'0b[01]+', Number), - (r'0o[0-7]+', Number), - (r'0x[0-9a-fA-F]+', Number), - (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), - # Variables - (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), - # Event handlers - (r'(after|before)(?=[(])', Keyword), - # Execution-context methods - (r'(parameter|this|se(lf|nder))(?=[(])', Keyword), - # Reflection - (r'(current_predicate|predicate_property)(?=[(])', Keyword), - # DCGs and term expansion - (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', - Keyword), - # Entity - (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', - Keyword), - (r'(object|protocol|category)_property(?=[(])', Keyword), - # Entity relations - (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), - (r'extends_(object|protocol|category)(?=[(])', Keyword), - (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), - (r'(instantiat|specializ)es_class(?=[(])', Keyword), - # Events - (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), - # Flags - (r'(current|set)_logtalk_flag(?=[(])', Keyword), - # Compiling, loading, and library paths - (r'logtalk_(compile|l(ibrary_path|oad_context|oad))(?=[(])', - Keyword), - # Database - (r'(clause|retract(all)?)(?=[(])', Keyword), - (r'a(bolish|ssert(a|z))(?=[(])', Keyword), - # Control constructs - (r'(ca(ll|tch)|throw)(?=[(])', Keyword), - (r'(fail|true)\b', Keyword), - # All solutions - (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), - # Multi-threading meta-predicates - (r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', - Keyword), - # Term unification - (r'unify_with_occurs_check(?=[(])', Keyword), - # Term creation and decomposition - (r'(functor|arg|copy_term|numbervars)(?=[(])', Keyword), - # Evaluable functors - (r'(rem|mod|abs|sign)(?=[(])', Keyword), - (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), - (r'(floor|truncate|round|ceiling)(?=[(])', Keyword), - # Other arithmetic functors - (r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword), - # Term testing - (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|' - r'ground)(?=[(])', Keyword), - # Term comparison - (r'compare(?=[(])', Keyword), - # Stream selection and control - (r'(curren|se)t_(in|out)put(?=[(])', Keyword), - (r'(open|close)(?=[(])', Keyword), - (r'flush_output(?=[(])', Keyword), - (r'(at_end_of_stream|flush_output)\b', Keyword), - (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', - Keyword), - # Character and byte input/output - (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), - (r'\bnl\b', Keyword), - # Term input/output - (r'read(_term)?(?=[(])', Keyword), - (r'write(q|_(canonical|term))?(?=[(])', Keyword), - (r'(current_)?op(?=[(])', Keyword), - (r'(current_)?char_conversion(?=[(])', Keyword), - # Atomic term processing - (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), - (r'(char_code|sub_atom)(?=[(])', Keyword), - (r'number_c(har|ode)s(?=[(])', Keyword), - # Implementation defined hooks functions - (r'(se|curren)t_prolog_flag(?=[(])', Keyword), - (r'\bhalt\b', Keyword), - (r'halt(?=[(])', Keyword), - # Message sending operators - (r'(::|:|\^\^)', Operator), - # External call - (r'[{}]', Keyword), - # Logic and control - (r'\b(ignore|once)(?=[(])', Keyword), - (r'\brepeat\b', Keyword), - # Sorting - (r'(key)?sort(?=[(])', Keyword), - # Bitwise functors - (r'(>>|<<|/\\|\\\\|\\)', Operator), - # Arithemtic evaluation - (r'\bis\b', Keyword), - # Arithemtic comparison - (r'(=:=|=\\=|<|=<|>=|>)', Operator), - # Term creation and decomposition - (r'=\.\.', Operator), - # Term unification - (r'(=|\\=)', Operator), - # Term comparison - (r'(==|\\==|@=<|@<|@>=|@>)', Operator), - # Evaluable functors - (r'(//|[-+*/])', Operator), - (r'\b(e|pi|mod|rem)\b', Operator), - # Other arithemtic functors - (r'\b\*\*\b', Operator), - # DCG rules - (r'-->', Operator), - # Control constructs - (r'([!;]|->)', Operator), - # Logic and control - (r'\\+', Operator), - # Mode operators - (r'[?@]', Operator), - # Existential quantifier - (r'\^', Operator), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # Ponctuation - (r'[()\[\],.|]', Text), - # Atoms - (r"[a-z][a-zA-Z0-9_]*", Text), - (r"'", String, 'quoted_atom'), - ], - - 'quoted_atom': [ - (r"''", String), - (r"'", String, '#pop'), - (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), - (r"[^\\'\n]+", String), - (r'\\', String), - ], - - 'directive': [ - # Conditional compilation directives - (r'(el)?if(?=[(])', Keyword, 'root'), - (r'(e(lse|ndif))[.]', Keyword, 'root'), - # Entity directives - (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), - (r'(end_(category|object|protocol))[.]',Keyword, 'root'), - # Predicate scope directives - (r'(public|protected|private)(?=[(])', Keyword, 'root'), - # Other directives - (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), - (r'in(fo|itialization)(?=[(])', Keyword, 'root'), - (r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'), - (r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|' - r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', - Keyword, 'root'), - (r'op(?=[(])', Keyword, 'root'), - (r'(c(alls|oinductive)|reexport|use(s|_module))(?=[(])', - Keyword, 'root'), - (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'), - (r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'), - ], - - 'entityrelations': [ - (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)' - r'(?=[(])', Keyword), - # Numbers - (r"0'.", Number), - (r'0b[01]+', Number), - (r'0o[0-7]+', Number), - (r'0x[0-9a-fA-F]+', Number), - (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), - # Variables - (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), - # Atoms - (r"[a-z][a-zA-Z0-9_]*", Text), - (r"'", String, 'quoted_atom'), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # End of entity-opening directive - (r'([)]\.)', Text, 'root'), - # Scope operator - (r'(::)', Operator), - # Ponctuation - (r'[()\[\],.|]', Text), - # Comments - (r'%.*?\n', Comment), - (r'/\*(.|\n)*?\*/',Comment), - # Whitespace - (r'\n', Text), - (r'\s+', Text), - ] - } - - def analyse_text(text): - if ':- object(' in text: - return True - if ':- protocol(' in text: - return True - if ':- category(' in text: - return True - return False - - -def _shortened(word): - dpos = word.find('$') - return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b' - for i in range(len(word), dpos, -1)]) -def _shortened_many(*words): - return '|'.join(map(_shortened, words)) - -class GnuplotLexer(RegexLexer): - """ - For `Gnuplot `_ plotting scripts. - - *New in Pygments 0.11.* - """ - - name = 'Gnuplot' - aliases = ['gnuplot'] - filenames = ['*.plot', '*.plt'] - mimetypes = ['text/x-gnuplot'] - - tokens = { - 'root': [ - include('whitespace'), - (_shortened('bi$nd'), Keyword, 'bind'), - (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), - (_shortened('f$it'), Keyword, 'fit'), - (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), - (r'else\b', Keyword), - (_shortened('pa$use'), Keyword, 'pause'), - (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), - (_shortened('sa$ve'), Keyword, 'save'), - (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), - (_shortened_many('sh$ow', 'uns$et'), - Keyword, ('noargs', 'optionarg')), - (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', - 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', - 'pwd$', 're$read', 'res$et', 'scr$eendump', - 'she$ll', 'sy$stem', 'up$date'), - Keyword, 'genericargs'), - (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', - 'she$ll', 'test$'), - Keyword, 'noargs'), - ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)', - bygroups(Name.Variable, Text, Operator), 'genericargs'), - ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)', - bygroups(Name.Function, Text, Operator), 'genericargs'), - (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros - (r';', Keyword), - ], - 'comment': [ - (r'[^\\\n]', Comment), - (r'\\\n', Comment), - (r'\\', Comment), - # don't add the newline to the Comment token - ('', Comment, '#pop'), - ], - 'whitespace': [ - ('#', Comment, 'comment'), - (r'[ \t\v\f]+', Text), - ], - 'noargs': [ - include('whitespace'), - # semicolon and newline end the argument list - (r';', Punctuation, '#pop'), - (r'\n', Text, '#pop'), - ], - 'dqstring': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - (r'\n', String, '#pop'), # newline ends the string too - ], - 'sqstring': [ - (r"''", String), # escaped single quote - (r"'", String, '#pop'), - (r"[^\\'\n]+", String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # normal backslash - (r'\n', String, '#pop'), # newline ends the string too - ], - 'genericargs': [ - include('noargs'), - (r'"', String, 'dqstring'), - (r"'", String, 'sqstring'), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), - (r'(\d+\.\d*|\.\d+)', Number.Float), - (r'-?\d+', Number.Integer), - ('[,.~!%^&*+=|?:<>/-]', Operator), - ('[{}()\[\]]', Punctuation), - (r'(eq|ne)\b', Operator.Word), - (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', - bygroups(Name.Function, Text, Punctuation)), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros - (r'\\\n', Text), - ], - 'optionarg': [ - include('whitespace'), - (_shortened_many( - "a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der", - "box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta", - "data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign", - "fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid", - "hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle", - "la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale", - "mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin", - "rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot", - "mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics", - "nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics", - "mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput", - "pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot", - "poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze", - "st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs", - "ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le", - "v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta", - "yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel", - "yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs", - "x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs", - "zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs", - "x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs", - "noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs", - "xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs", - "noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs", - "cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange", - "y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange", - "vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis", - "zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'), - ], - 'bind': [ - ('!', Keyword, '#pop'), - (_shortened('all$windows'), Name.Builtin), - include('genericargs'), - ], - 'quit': [ - (r'gnuplot\b', Keyword), - include('noargs'), - ], - 'fit': [ - (r'via\b', Name.Builtin), - include('plot'), - ], - 'if': [ - (r'\)', Punctuation, '#pop'), - include('genericargs'), - ], - 'pause': [ - (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), - (_shortened('key$press'), Name.Builtin), - include('genericargs'), - ], - 'plot': [ - (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', - 'mat$rix', 's$mooth', 'thru$', 't$itle', - 'not$itle', 'u$sing', 'w$ith'), - Name.Builtin), - include('genericargs'), - ], - 'save': [ - (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), - Name.Builtin), - include('genericargs'), - ], - } - - -class PovrayLexer(RegexLexer): - """ - For `Persistence of Vision Raytracer `_ files. - - *New in Pygments 0.11.* - """ - name = 'POVRay' - aliases = ['pov'] - filenames = ['*.pov', '*.inc'] - mimetypes = ['text/x-povray'] - - tokens = { - 'root': [ - (r'/\*[\w\W]*?\*/', Comment.Multiline), - (r'//.*\n', Comment.Single), - (r'(?s)"(?:\\.|[^"\\])+"', String.Double), - (r'#(debug|default|else|end|error|fclose|fopen|ifdef|ifndef|' - r'include|range|read|render|statistics|switch|undef|version|' - r'warning|while|write|define|macro|local|declare)\b', - Comment.Preproc), - (r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|' - r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|' - r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|' - r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|' - r'attenuating|average|background|black_hole|blue|blur_samples|' - r'bounded_by|box_mapping|bozo|break|brick|brick_size|' - r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|' - r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|' - r'color|color_map|colour|colour_map|component|composite|concat|' - r'confidence|conic_sweep|constant|control0|control1|cos|cosh|' - r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|' - r'debug|declare|default|degrees|dents|diffuse|direction|' - r'distance|distance_maximum|div|dust|dust_type|eccentricity|' - r'else|emitting|end|error|error_bound|exp|exponent|' - r'fade_distance|fade_power|falloff|falloff_angle|false|' - r'file_exists|filter|finish|fisheye|flatness|flip|floor|' - r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|' - r'global_settings|glowing|gradient|granite|gray_threshold|' - r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|' - r'if|ifdef|iff|image_map|incidence|include|int|interpolate|' - r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|' - r'linear|linear_spline|linear_sweep|location|log|looks_like|' - r'look_at|low_error_factor|mandel|map_type|marble|material_map|' - r'matrix|max|max_intersections|max_iteration|max_trace_level|' - r'max_value|metallic|min|minimum_reuse|mod|mortar|' - r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|' - r'octaves|off|offset|omega|omnimax|on|once|onion|open|' - r'orthographic|panoramic|pattern1|pattern2|pattern3|' - r'perspective|pgm|phase|phong|phong_size|pi|pigment|' - r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|' - r'precision|pwr|quadratic_spline|quaternion|quick_color|' - r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|' - r'ramp_wave|rand|range|reciprocal|recursion_limit|red|' - r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|' - r'right|ripples|rotate|roughness|samples|scale|scallop_wave|' - r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|' - r'slice|slope_map|smooth|specular|spherical_mapping|spiral|' - r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|' - r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|' - r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|' - r'test_camera_4|texture|texture_map|tga|thickness|threshold|' - r'tightness|tile2|tiles|track|transform|translate|transmit|' - r'triangle_wave|true|ttf|turbulence|turb_depth|type|' - r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|' - r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|' - r'vnormalize|volume_object|volume_rendered|vol_with_light|' - r'vrotate|v_steps|warning|warp|water_level|waves|while|width|' - r'wood|wrinkles|yes)\b', Keyword), - (r'(bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|' - r'disc|height_field|intersection|julia_fractal|lathe|' - r'light_source|merge|mesh|object|plane|poly|polygon|prism|' - r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|' - r'text|torus|triangle|union)\b', Name.Builtin), - # TODO: <=, etc - (r'[\[\](){}<>;,]', Punctuation), - (r'[-+*/=]', Operator), - (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), - (r'[a-zA-Z_][a-zA-Z_0-9]*', Name), - (r'[0-9]+\.[0-9]*', Number.Float), - (r'\.[0-9]+', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\s+', Text), - ] - } - - -class AppleScriptLexer(RegexLexer): - """ - For `AppleScript source code - `_, - including `AppleScript Studio - `_. - Contributed by Andreas Amann . - """ - - name = 'AppleScript' - aliases = ['applescript'] - filenames = ['*.applescript'] - - flags = re.MULTILINE | re.DOTALL - - Identifiers = r'[a-zA-Z]\w*' - Literals = ['AppleScript', 'current application', 'false', 'linefeed', - 'missing value', 'pi','quote', 'result', 'return', 'space', - 'tab', 'text item delimiters', 'true', 'version'] - Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ', - 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', - 'real ', 'record ', 'reference ', 'RGB color ', 'script ', - 'text ', 'unit types', '(?:Unicode )?text', 'string'] - BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month', - 'paragraph', 'word', 'year'] - HandlerParams = ['about', 'above', 'against', 'apart from', 'around', - 'aside from', 'at', 'below', 'beneath', 'beside', - 'between', 'for', 'given', 'instead of', 'on', 'onto', - 'out of', 'over', 'since'] - Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL', - 'choose application', 'choose color', 'choose file( name)?', - 'choose folder', 'choose from list', - 'choose remote application', 'clipboard info', - 'close( access)?', 'copy', 'count', 'current date', 'delay', - 'delete', 'display (alert|dialog)', 'do shell script', - 'duplicate', 'exists', 'get eof', 'get volume settings', - 'info for', 'launch', 'list (disks|folder)', 'load script', - 'log', 'make', 'mount volume', 'new', 'offset', - 'open( (for access|location))?', 'path to', 'print', 'quit', - 'random number', 'read', 'round', 'run( script)?', - 'say', 'scripting components', - 'set (eof|the clipboard to|volume)', 'store script', - 'summarize', 'system attribute', 'system info', - 'the clipboard', 'time to GMT', 'write', 'quoted form'] - References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', - 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', - 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', - 'before', 'behind', 'every', 'front', 'index', 'last', - 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose'] - Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not", - "isn't", "isn't equal( to)?", "is not equal( to)?", - "doesn't equal", "does not equal", "(is )?greater than", - "comes after", "is not less than or equal( to)?", - "isn't less than or equal( to)?", "(is )?less than", - "comes before", "is not greater than or equal( to)?", - "isn't greater than or equal( to)?", - "(is )?greater than or equal( to)?", "is not less than", - "isn't less than", "does not come before", - "doesn't come before", "(is )?less than or equal( to)?", - "is not greater than", "isn't greater than", - "does not come after", "doesn't come after", "starts? with", - "begins? with", "ends? with", "contains?", "does not contain", - "doesn't contain", "is in", "is contained by", "is not in", - "is not contained by", "isn't contained by", "div", "mod", - "not", "(a )?(ref( to)?|reference to)", "is", "does"] - Control = ['considering', 'else', 'error', 'exit', 'from', 'if', - 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', - 'try', 'until', 'using terms from', 'while', 'whith', - 'with timeout( of)?', 'with transaction', 'by', 'continue', - 'end', 'its?', 'me', 'my', 'return', 'of' , 'as'] - Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get'] - Reserved = ['but', 'put', 'returning', 'the'] - StudioClasses = ['action cell', 'alert reply', 'application', 'box', - 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', - 'clip view', 'color well', 'color-panel', - 'combo box( item)?', 'control', - 'data( (cell|column|item|row|source))?', 'default entry', - 'dialog reply', 'document', 'drag info', 'drawer', - 'event', 'font(-panel)?', 'formatter', - 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', - 'movie( view)?', 'open-panel', 'outline view', 'panel', - 'pasteboard', 'plugin', 'popup button', - 'progress indicator', 'responder', 'save-panel', - 'scroll view', 'secure text field( cell)?', 'slider', - 'sound', 'split view', 'stepper', 'tab view( item)?', - 'table( (column|header cell|header view|view))', - 'text( (field( cell)?|view))?', 'toolbar( item)?', - 'user-defaults', 'view', 'window'] - StudioEvents = ['accept outline drop', 'accept table drop', 'action', - 'activated', 'alert ended', 'awake from nib', 'became key', - 'became main', 'begin editing', 'bounds changed', - 'cell value', 'cell value changed', 'change cell value', - 'change item value', 'changed', 'child of item', - 'choose menu item', 'clicked', 'clicked toolbar item', - 'closed', 'column clicked', 'column moved', - 'column resized', 'conclude drop', 'data representation', - 'deminiaturized', 'dialog ended', 'document nib name', - 'double clicked', 'drag( (entered|exited|updated))?', - 'drop', 'end editing', 'exposed', 'idle', 'item expandable', - 'item value', 'item value changed', 'items changed', - 'keyboard down', 'keyboard up', 'launched', - 'load data representation', 'miniaturized', 'mouse down', - 'mouse dragged', 'mouse entered', 'mouse exited', - 'mouse moved', 'mouse up', 'moved', - 'number of browser rows', 'number of items', - 'number of rows', 'open untitled', 'opened', 'panel ended', - 'parameters updated', 'plugin loaded', 'prepare drop', - 'prepare outline drag', 'prepare outline drop', - 'prepare table drag', 'prepare table drop', - 'read from file', 'resigned active', 'resigned key', - 'resigned main', 'resized( sub views)?', - 'right mouse down', 'right mouse dragged', - 'right mouse up', 'rows changed', 'scroll wheel', - 'selected tab view item', 'selection changed', - 'selection changing', 'should begin editing', - 'should close', 'should collapse item', - 'should end editing', 'should expand item', - 'should open( untitled)?', - 'should quit( after last window closed)?', - 'should select column', 'should select item', - 'should select row', 'should select tab view item', - 'should selection change', 'should zoom', 'shown', - 'update menu item', 'update parameters', - 'update toolbar item', 'was hidden', 'was miniaturized', - 'will become active', 'will close', 'will dismiss', - 'will display browser cell', 'will display cell', - 'will display item cell', 'will display outline cell', - 'will finish launching', 'will hide', 'will miniaturize', - 'will move', 'will open', 'will pop up', 'will quit', - 'will resign active', 'will resize( sub views)?', - 'will select tab view item', 'will show', 'will zoom', - 'write to file', 'zoomed'] - StudioCommands = ['animate', 'append', 'call method', 'center', - 'close drawer', 'close panel', 'display', - 'display alert', 'display dialog', 'display panel', 'go', - 'hide', 'highlight', 'increment', 'item for', - 'load image', 'load movie', 'load nib', 'load panel', - 'load sound', 'localized string', 'lock focus', 'log', - 'open drawer', 'path for', 'pause', 'perform action', - 'play', 'register', 'resume', 'scroll', 'select( all)?', - 'show', 'size to fit', 'start', 'step back', - 'step forward', 'stop', 'synchronize', 'unlock focus', - 'update'] - StudioProperties = ['accepts arrow key', 'action method', 'active', - 'alignment', 'allowed identifiers', - 'allows branch selection', 'allows column reordering', - 'allows column resizing', 'allows column selection', - 'allows customization', - 'allows editing text attributes', - 'allows empty selection', 'allows mixed state', - 'allows multiple selection', 'allows reordering', - 'allows undo', 'alpha( value)?', 'alternate image', - 'alternate increment value', 'alternate title', - 'animation delay', 'associated file name', - 'associated object', 'auto completes', 'auto display', - 'auto enables items', 'auto repeat', - 'auto resizes( outline column)?', - 'auto save expanded items', 'auto save name', - 'auto save table columns', 'auto saves configuration', - 'auto scroll', 'auto sizes all columns to fit', - 'auto sizes cells', 'background color', 'bezel state', - 'bezel style', 'bezeled', 'border rect', 'border type', - 'bordered', 'bounds( rotation)?', 'box type', - 'button returned', 'button type', - 'can choose directories', 'can choose files', - 'can draw', 'can hide', - 'cell( (background color|size|type))?', 'characters', - 'class', 'click count', 'clicked( data)? column', - 'clicked data item', 'clicked( data)? row', - 'closeable', 'collating', 'color( (mode|panel))', - 'command key down', 'configuration', - 'content(s| (size|view( margins)?))?', 'context', - 'continuous', 'control key down', 'control size', - 'control tint', 'control view', - 'controller visible', 'coordinate system', - 'copies( on scroll)?', 'corner view', 'current cell', - 'current column', 'current( field)? editor', - 'current( menu)? item', 'current row', - 'current tab view item', 'data source', - 'default identifiers', 'delta (x|y|z)', - 'destination window', 'directory', 'display mode', - 'displayed cell', 'document( (edited|rect|view))?', - 'double value', 'dragged column', 'dragged distance', - 'dragged items', 'draws( cell)? background', - 'draws grid', 'dynamically scrolls', 'echos bullets', - 'edge', 'editable', 'edited( data)? column', - 'edited data item', 'edited( data)? row', 'enabled', - 'enclosing scroll view', 'ending page', - 'error handling', 'event number', 'event type', - 'excluded from windows menu', 'executable path', - 'expanded', 'fax number', 'field editor', 'file kind', - 'file name', 'file type', 'first responder', - 'first visible column', 'flipped', 'floating', - 'font( panel)?', 'formatter', 'frameworks path', - 'frontmost', 'gave up', 'grid color', 'has data items', - 'has horizontal ruler', 'has horizontal scroller', - 'has parent data item', 'has resize indicator', - 'has shadow', 'has sub menu', 'has vertical ruler', - 'has vertical scroller', 'header cell', 'header view', - 'hidden', 'hides when deactivated', 'highlights by', - 'horizontal line scroll', 'horizontal page scroll', - 'horizontal ruler view', 'horizontally resizable', - 'icon image', 'id', 'identifier', - 'ignores multiple clicks', - 'image( (alignment|dims when disabled|frame style|' - 'scaling))?', - 'imports graphics', 'increment value', - 'indentation per level', 'indeterminate', 'index', - 'integer value', 'intercell spacing', 'item height', - 'key( (code|equivalent( modifier)?|window))?', - 'knob thickness', 'label', 'last( visible)? column', - 'leading offset', 'leaf', 'level', 'line scroll', - 'loaded', 'localized sort', 'location', 'loop mode', - 'main( (bunde|menu|window))?', 'marker follows cell', - 'matrix mode', 'maximum( content)? size', - 'maximum visible columns', - 'menu( form representation)?', 'miniaturizable', - 'miniaturized', 'minimized image', 'minimized title', - 'minimum column width', 'minimum( content)? size', - 'modal', 'modified', 'mouse down state', - 'movie( (controller|file|rect))?', 'muted', 'name', - 'needs display', 'next state', 'next text', - 'number of tick marks', 'only tick mark values', - 'opaque', 'open panel', 'option key down', - 'outline table column', 'page scroll', 'pages across', - 'pages down', 'palette label', 'pane splitter', - 'parent data item', 'parent window', 'pasteboard', - 'path( (names|separator))?', 'playing', - 'plays every frame', 'plays selection only', 'position', - 'preferred edge', 'preferred type', 'pressure', - 'previous text', 'prompt', 'properties', - 'prototype cell', 'pulls down', 'rate', - 'released when closed', 'repeated', - 'requested print time', 'required file type', - 'resizable', 'resized column', 'resource path', - 'returns records', 'reuses columns', 'rich text', - 'roll over', 'row height', 'rulers visible', - 'save panel', 'scripts path', 'scrollable', - 'selectable( identifiers)?', 'selected cell', - 'selected( data)? columns?', 'selected data items?', - 'selected( data)? rows?', 'selected item identifier', - 'selection by rect', 'send action on arrow key', - 'sends action when done editing', 'separates columns', - 'separator item', 'sequence number', 'services menu', - 'shared frameworks path', 'shared support path', - 'sheet', 'shift key down', 'shows alpha', - 'shows state by', 'size( mode)?', - 'smart insert delete enabled', 'sort case sensitivity', - 'sort column', 'sort order', 'sort type', - 'sorted( data rows)?', 'sound', 'source( mask)?', - 'spell checking enabled', 'starting page', 'state', - 'string value', 'sub menu', 'super menu', 'super view', - 'tab key traverses cells', 'tab state', 'tab type', - 'tab view', 'table view', 'tag', 'target( printer)?', - 'text color', 'text container insert', - 'text container origin', 'text returned', - 'tick mark position', 'time stamp', - 'title(d| (cell|font|height|position|rect))?', - 'tool tip', 'toolbar', 'trailing offset', 'transparent', - 'treat packages as directories', 'truncated labels', - 'types', 'unmodified characters', 'update views', - 'use sort indicator', 'user defaults', - 'uses data source', 'uses ruler', - 'uses threaded animation', - 'uses title from previous column', 'value wraps', - 'version', - 'vertical( (line scroll|page scroll|ruler view))?', - 'vertically resizable', 'view', - 'visible( document rect)?', 'volume', 'width', 'window', - 'windows menu', 'wraps', 'zoomable', 'zoomed'] - - tokens = { - 'root': [ - (r'\s+', Text), - (ur'¬\n', String.Escape), - (r"'s\s+", Text), # This is a possessive, consider moving - (r'(--|#).*?$', Comment), - (r'\(\*', Comment.Multiline, 'comment'), - (r'[\(\){}!,.:]', Punctuation), - (ur'(«)([^»]+)(»)', - bygroups(Text, Name.Builtin, Text)), - (r'\b((?:considering|ignoring)\s*)' - r'(application responses|case|diacriticals|hyphens|' - r'numeric strings|punctuation|white space)', - bygroups(Keyword, Name.Builtin)), - (ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator), - (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), - (r'^(\s*(?:on|end)\s+)' - r'(%s)' % '|'.join(StudioEvents[::-1]), - bygroups(Keyword, Name.Function)), - (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), - (r'\b(as )(%s)\b' % '|'.join(Classes), - bygroups(Keyword, Name.Class)), - (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), - (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), - (r'\b(%s)\b' % '|'.join(Control), Keyword), - (r'\b(%s)\b' % '|'.join(Declarations), Keyword), - (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), - (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), - (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), - (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), - (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), - (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), - (r'\b(%s)\b' % '|'.join(References), Name.Builtin), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r'\b(%s)\b' % Identifiers, Name.Variable), - (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), - (r'[-+]?\d+', Number.Integer), - ], - 'comment': [ - ('\(\*', Comment.Multiline, '#push'), - ('\*\)', Comment.Multiline, '#pop'), - ('[^*(]+', Comment.Multiline), - ('[*(]', Comment.Multiline), - ], - } - - -class ModelicaLexer(RegexLexer): - """ - For `Modelica `_ source code. - - *New in Pygments 1.1.* - """ - name = 'Modelica' - aliases = ['modelica'] - filenames = ['*.mo'] - mimetypes = ['text/x-modelica'] - - flags = re.IGNORECASE | re.DOTALL - - tokens = { - 'whitespace': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'//(\n|(.|\n)*?[^\\]\n)', Comment), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), - ], - 'statements': [ - (r'"', String, 'string'), - (r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+)', Number.Float), - (r'\d+[Ll]?', Number.Integer), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\]{},.;]', Punctuation), - (r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin), - (r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')" - r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class), - (r"('[\w\+\-\*\/\^]+'|\w+)", Name), - ], - 'root': [ - include('whitespace'), - include('keywords'), - include('functions'), - include('operators'), - include('classes'), - (r'("|)', Name.Tag, 'html-content'), - include('statements'), - ], - 'keywords': [ - (r'(algorithm|annotation|break|connect|constant|constrainedby|' - r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' - r'end|equation|exit|expandable|extends|' - r'external|false|final|flow|for|if|import|impure|in|initial\sequation|' - r'inner|input|loop|nondiscrete|outer|output|parameter|partial|' - r'protected|public|pure|redeclare|replaceable|stream|time|then|true|' - r'when|while|within)\b', Keyword), - ], - 'functions': [ - (r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|' - r'cross|div|exp|floor|getInstanceName|log|log10|mod|rem|' - r'semiLinear|sign|sin|sinh|size|spatialDistribution|sqrt|tan|' - r'tanh|zeros)\b', Name.Function), - ], - 'operators': [ - (r'(actualStream|and|assert|cardinality|change|Clock|delay|der|edge|' - r'hold|homotopy|initial|inStream|noEvent|not|or|pre|previous|reinit|' - r'return|sample|smooth|spatialDistribution|subSample|terminal|' - r'terminate)\b', Name.Builtin), - ], - 'classes': [ - (r'(block|class|connector|function|model|package|' - r'record|type)(\s+)([A-Za-z_]+)', - bygroups(Keyword, Text, Name.Class)) - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', - String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'html-content': [ - (r'<\s*/\s*html\s*>', Name.Tag, '#pop'), - (r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)), - ] - } - - -class RebolLexer(RegexLexer): - """ - A `REBOL `_ lexer. - - *New in Pygments 1.1.* - """ - name = 'REBOL' - aliases = ['rebol'] - filenames = ['*.r', '*.r3'] - mimetypes = ['text/x-rebol'] - - flags = re.IGNORECASE | re.MULTILINE - - re.IGNORECASE - - escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)' - - def word_callback(lexer, match): - word = match.group() - - if re.match(".*:$", word): - yield match.start(), Generic.Subheading, word - elif re.match( - r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' - r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' - r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' - r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' - r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' - r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' - r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' - r'while|compress|decompress|secure|open|close|read|read-io|' - r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' - r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' - r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' - r'browse|launch|stats|get-modes|set-modes|to-local-file|' - r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' - r'hide|draw|show|size-text|textinfo|offset-to-caret|' - r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' - r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' - r'dsa-make-key|dsa-generate-key|dsa-make-signature|' - r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' - r'rsa-encrypt)$', word): - yield match.start(), Name.Builtin, word - elif re.match( - r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' - r'minimum|maximum|negate|complement|absolute|random|head|tail|' - r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' - r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' - r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' - r'copy)$', word): - yield match.start(), Name.Function, word - elif re.match( - r'(error|source|input|license|help|install|echo|Usage|with|func|' - r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' - r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' - r'remold|charset|array|replace|move|extract|forskip|forall|alter|' - r'first+|also|take|for|forever|dispatch|attempt|what-dir|' - r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' - r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' - r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' - r'write-user|save-user|set-user-name|protect-system|parse-xml|' - r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' - r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' - r'request-dir|center-face|do-events|net-error|decode-url|' - r'parse-header|parse-header-date|parse-email-addrs|import-email|' - r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' - r'find-key-face|do-face|viewtop|confine|find-window|' - r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' - r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' - r'read-thru|load-thru|do-thru|launch-thru|load-image|' - r'request-download|do-face-alt|set-font|set-para|get-style|' - r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' - r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' - r'resize-face|load-stock|load-stock-block|notify|request|flash|' - r'request-color|request-pass|request-text|request-list|' - r'request-date|request-file|dbug|editor|link-relative-path|' - r'emailer|parse-error)$', word): - yield match.start(), Keyword.Namespace, word - elif re.match( - r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' - r'return|exit|break)$', word): - yield match.start(), Name.Exception, word - elif re.match('REBOL$', word): - yield match.start(), Generic.Heading, word - elif re.match("to-.*", word): - yield match.start(), Keyword, word - elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', - word): - yield match.start(), Operator, word - elif re.match(".*\?$", word): - yield match.start(), Keyword, word - elif re.match(".*\!$", word): - yield match.start(), Keyword.Type, word - elif re.match("'.*", word): - yield match.start(), Name.Variable.Instance, word # lit-word - elif re.match("#.*", word): - yield match.start(), Name.Label, word # issue - elif re.match("%.*", word): - yield match.start(), Name.Decorator, word # file - else: - yield match.start(), Name.Variable, word - - tokens = { - 'root': [ - (r'REBOL', Generic.Strong, 'script'), - (r'R', Comment), - (r'[^R]+', Comment), - ], - 'script': [ - (r'\s+', Text), - (r'#"', String.Char, 'char'), - (r'#{[0-9a-fA-F]*}', Number.Hex), - (r'2#{', Number.Hex, 'bin2'), - (r'64#{[0-9a-zA-Z+/=\s]*}', Number.Hex), - (r'"', String, 'string'), - (r'{', String, 'string2'), - (r';#+.*\n', Comment.Special), - (r';\*+.*\n', Comment.Preproc), - (r';.*\n', Comment), - (r'%"', Name.Decorator, 'stringFile'), - (r'%[^(\^{^")\s\[\]]+', Name.Decorator), - (r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money - (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time - (r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?' - r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date - (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple - (r'\d+[xX]\d+', Keyword.Constant), # pair - (r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float), - (r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float), - (r'[+-]?\d+(\'\d+)?', Number), - (r'[\[\]\(\)]', Generic.Strong), - (r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator), # url - (r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # url - (r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # email - (r'comment\s', Comment, 'comment'), - (r'/[^(\^{^")\s/[\]]*', Name.Attribute), - (r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), - (r'<[a-zA-Z0-9:._-]*>', Name.Tag), - (r'<[^(<>\s")]+', Name.Tag, 'tag'), - (r'([^(\^{^")\s]+)', Text), - ], - 'string': [ - (r'[^(\^")]+', String), - (escape_re, String.Escape), - (r'[\(|\)]+', String), - (r'\^.', String.Escape), - (r'"', String, '#pop'), - ], - 'string2': [ - (r'[^(\^{^})]+', String), - (escape_re, String.Escape), - (r'[\(|\)]+', String), - (r'\^.', String.Escape), - (r'{', String, '#push'), - (r'}', String, '#pop'), - ], - 'stringFile': [ - (r'[^(\^")]+', Name.Decorator), - (escape_re, Name.Decorator), - (r'\^.', Name.Decorator), - (r'"', Name.Decorator, '#pop'), - ], - 'char': [ - (escape_re + '"', String.Char, '#pop'), - (r'\^."', String.Char, '#pop'), - (r'."', String.Char, '#pop'), - ], - 'tag': [ - (escape_re, Name.Tag), - (r'"', Name.Tag, 'tagString'), - (r'[^(<>\r\n")]+', Name.Tag), - (r'>', Name.Tag, '#pop'), - ], - 'tagString': [ - (r'[^(\^")]+', Name.Tag), - (escape_re, Name.Tag), - (r'[\(|\)]+', Name.Tag), - (r'\^.', Name.Tag), - (r'"', Name.Tag, '#pop'), - ], - 'tuple': [ - (r'(\d+\.)+', Keyword.Constant), - (r'\d+', Keyword.Constant, '#pop'), - ], - 'bin2': [ - (r'\s+', Number.Hex), - (r'([0-1]\s*){8}', Number.Hex), - (r'}', Number.Hex, '#pop'), - ], - 'comment': [ - (r'"', Comment, 'commentString1'), - (r'{', Comment, 'commentString2'), - (r'\[', Comment, 'commentBlock'), - (r'[^(\s{\"\[]+', Comment, '#pop'), - ], - 'commentString1': [ - (r'[^(\^")]+', Comment), - (escape_re, Comment), - (r'[\(|\)]+', Comment), - (r'\^.', Comment), - (r'"', Comment, '#pop'), - ], - 'commentString2': [ - (r'[^(\^{^})]+', Comment), - (escape_re, Comment), - (r'[\(|\)]+', Comment), - (r'\^.', Comment), - (r'{', Comment, '#push'), - (r'}', Comment, '#pop'), - ], - 'commentBlock': [ - (r'\[', Comment, '#push'), - (r'\]', Comment, '#pop'), - (r'[^(\[\])]+', Comment), - ], - } - - -class ABAPLexer(RegexLexer): - """ - Lexer for ABAP, SAP's integrated language. - - *New in Pygments 1.1.* - """ - name = 'ABAP' - aliases = ['abap'] - filenames = ['*.abap'] - mimetypes = ['text/x-abap'] - - flags = re.IGNORECASE | re.MULTILINE - - tokens = { - 'common': [ - (r'\s+', Text), - (r'^\*.*$', Comment.Single), - (r'\".*?\n', Comment.Single), - ], - 'variable-names': [ - (r'<[\S_]+>', Name.Variable), - (r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable), - ], - 'root': [ - include('common'), - #function calls - (r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)', - bygroups(Keyword, Text, Name.Function)), - (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|' - r'TRANSACTION|TRANSFORMATION))\b', - Keyword), - (r'(FORM|PERFORM)(\s+)(\w+)', - bygroups(Keyword, Text, Name.Function)), - (r'(PERFORM)(\s+)(\()(\w+)(\))', - bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )), - (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)', - bygroups(Keyword, Text, Name.Function, Text, Keyword)), - - # method implementation - (r'(METHOD)(\s+)([\w~]+)', - bygroups(Keyword, Text, Name.Function)), - # method calls - (r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)', - bygroups(Text, Name.Variable, Operator, Name.Function)), - # call methodnames returning style - (r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function), - - # keywords with dashes in them. - # these need to be first, because for instance the -ID part - # of MESSAGE-ID wouldn't get highlighted if MESSAGE was - # first in the list of keywords. - (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|' - r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|' - r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|' - r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|' - r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|' - r'INTERFACE-POOL|INVERTED-DATE|' - r'LOAD-OF-PROGRAM|LOG-POINT|' - r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|' - r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|' - r'OUTPUT-LENGTH|PRINT-CONTROL|' - r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|' - r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|' - r'TYPE-POOL|TYPE-POOLS' - r')\b', Keyword), - - # keyword kombinations - (r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|' - r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|' - r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|' - r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|' - r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|' - r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|' - r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|' - r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|' - r'RUN\s+TIME|TIME\s+(STAMP)?)?|' - r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|' - r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|' - r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|' - r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|' - r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|' - r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|' - r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|' - r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|' - r'DATABASE|SHARED\s+(MEMORY|BUFFER))|' - r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|' - r'FREE\s(MEMORY|OBJECT)?|' - r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|' - r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|' - r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|' - r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|' - r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|' - r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|' - r'SCREEN)|COMMENT|FUNCTION\s+KEY|' - r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|' - r'SKIP|ULINE)|' - r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|' - r'TO LIST-PROCESSING|TO TRANSACTION)' - r'(ENDING|STARTING)\s+AT|' - r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|' - r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|' - r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|' - r'(BEGIN|END)\s+OF|' - r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|' - r'COMPARING(\s+ALL\s+FIELDS)?|' - r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|' - r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|' - r'END-OF-(DEFINITION|PAGE|SELECTION)|' - r'WITH\s+FRAME(\s+TITLE)|' - - # simple kombinations - r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|' - r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|' - r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|' - r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|' - r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|' - r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|' - r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword), - - # single word keywords. - (r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|' - r'ASSIGN(ING)?|AT(\s+FIRST)?|' - r'BACK|BLOCK|BREAK-POINT|' - r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|' - r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|' - r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|' - r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|' - r'DETAIL|DIRECTORY|DIVIDE|DO|' - r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|' - r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|' - r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|' - r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|' - r'HIDE|' - r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|' - r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|' - r'LENGTH|LINES|LOAD|LOCAL|' - r'JOIN|' - r'KEY|' - r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|' - r'NODES|' - r'OBLIGATORY|OF|OFF|ON|OVERLAY|' - r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|' - r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|' - r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|' - r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|' - r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|' - r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|' - r'ULINE|UNDER|UNPACK|UPDATE|USING|' - r'VALUE|VALUES|VIA|' - r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword), - - # builtins - (r'(abs|acos|asin|atan|' - r'boolc|boolx|bit_set|' - r'char_off|charlen|ceil|cmax|cmin|condense|contains|' - r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|' - r'count|count_any_of|count_any_not_of|' - r'dbmaxlen|distance|' - r'escape|exp|' - r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|' - r'insert|' - r'lines|log|log10|' - r'match|matches|' - r'nmax|nmin|numofchar|' - r'repeat|replace|rescale|reverse|round|' - r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|' - r'substring|substring_after|substring_from|substring_before|substring_to|' - r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|' - r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)), - - (r'&[0-9]', Name), - (r'[0-9]+', Number.Integer), - - # operators which look like variable names before - # parsing variable names. - (r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|' - r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|' - r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator), - - include('variable-names'), - - # standard oparators after variable names, - # because < and > are part of field symbols. - (r'[?*<>=\-+]', Operator), - (r"'(''|[^'])*'", String.Single), - (r'[/;:()\[\],\.]', Punctuation) - ], - } - - -class NewspeakLexer(RegexLexer): - """ - For `Newspeak ` syntax. - """ - name = 'Newspeak' - filenames = ['*.ns2'] - aliases = ['newspeak', ] - mimetypes = ['text/x-newspeak'] - - tokens = { - 'root' : [ - (r'\b(Newsqueak2)\b',Keyword.Declaration), - (r"'[^']*'",String), - (r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)', - bygroups(Keyword.Declaration,Text,Name.Class,Text)), - (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', - Keyword), - (r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)', - bygroups(Name.Function,Text,Name.Variable)), - (r'([a-zA-Z0-9_]+)(\s*)(=)', - bygroups(Name.Attribute,Text,Operator)), - (r'<[a-zA-Z0-9_]+>', Comment.Special), - include('expressionstat'), - include('whitespace') - ], - - 'expressionstat': [ - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'\d+', Number.Integer), - (r':\w+',Name.Variable), - (r'(\w+)(::)', bygroups(Name.Variable, Operator)), - (r'\w+:', Name.Function), - (r'\w+', Name.Variable), - (r'\(|\)', Punctuation), - (r'\[|\]', Punctuation), - (r'\{|\}', Punctuation), - - (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), - (r'\.|;', Punctuation), - include('whitespace'), - include('literals'), - ], - 'literals': [ - (r'\$.', String), - (r"'[^']*'", String), - (r"#'[^']*'", String.Symbol), - (r"#\w+:?", String.Symbol), - (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) - - ], - 'whitespace' : [ - (r'\s+', Text), - (r'"[^"]*"', Comment) - ] - } - - -class GherkinLexer(RegexLexer): - """ - For `Gherkin ` syntax. - - *New in Pygments 1.2.* - """ - name = 'Gherkin' - aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin'] - filenames = ['*.feature'] - mimetypes = ['text/x-gherkin'] - - feature_keywords = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$' - feature_element_keywords = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$' - examples_keywords = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$' - step_keywords = ur'^(\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )' - - tokens = { - 'comments': [ - (r'#.*$', Comment), - ], - 'feature_elements' : [ - (step_keywords, Keyword, "step_content_stack"), - include('comments'), - (r"(\s|.)", Name.Function), - ], - 'feature_elements_on_stack' : [ - (step_keywords, Keyword, "#pop:2"), - include('comments'), - (r"(\s|.)", Name.Function), - ], - 'examples_table': [ - (r"\s+\|", Keyword, 'examples_table_header'), - include('comments'), - (r"(\s|.)", Name.Function), - ], - 'examples_table_header': [ - (r"\s+\|\s*$", Keyword, "#pop:2"), - include('comments'), - (r"\s*\|", Keyword), - (r"[^\|]", Name.Variable), - ], - 'scenario_sections_on_stack': [ - (feature_element_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), "feature_elements_on_stack"), - ], - 'narrative': [ - include('scenario_sections_on_stack'), - (r"(\s|.)", Name.Function), - ], - 'table_vars': [ - (r'(<[^>]+>)', Name.Variable), - ], - 'numbers': [ - (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String), - ], - 'string': [ - include('table_vars'), - (r'(\s|.)', String), - ], - 'py_string': [ - (r'"""', Keyword, "#pop"), - include('string'), - ], - 'step_content_root':[ - (r"$", Keyword, "#pop"), - include('step_content'), - ], - 'step_content_stack':[ - (r"$", Keyword, "#pop:2"), - include('step_content'), - ], - 'step_content':[ - (r'"', Name.Function, "double_string"), - include('table_vars'), - include('numbers'), - include('comments'), - (r'(\s|.)', Name.Function), - ], - 'table_content': [ - (r"\s+\|\s*$", Keyword, "#pop"), - include('comments'), - (r"\s*\|", Keyword), - include('string'), - ], - 'double_string': [ - (r'"', Name.Function, "#pop"), - include('string'), - ], - 'root': [ - (r'\n', Name.Function), - include('comments'), - (r'"""', Keyword, "py_string"), - (r'\s+\|', Keyword, 'table_content'), - (r'"', Name.Function, "double_string"), - include('table_vars'), - include('numbers'), - (r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)), - (step_keywords, bygroups(Name.Function, Keyword), - 'step_content_root'), - (feature_keywords, bygroups(Keyword, Keyword, Name.Function), - 'narrative'), - (feature_element_keywords, - bygroups(Name.Function, Keyword, Keyword, Name.Function), - 'feature_elements'), - (examples_keywords, - bygroups(Name.Function, Keyword, Keyword, Name.Function), - 'examples_table'), - (r'(\s|.)', Name.Function), - ] - } - -class AsymptoteLexer(RegexLexer): - """ - For `Asymptote `_ source code. - - *New in Pygments 1.2.* - """ - name = 'Asymptote' - aliases = ['asy', 'asymptote'] - filenames = ['*.asy'] - mimetypes = ['text/x-asymptote'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' - - tokens = { - 'whitespace': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'//(\n|(.|\n)*?[^\\]\n)', Comment), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), - ], - 'statements': [ - # simple string (TeX friendly) - (r'"(\\\\|\\"|[^"])*"', String), - # C style string (with character escapes) - (r"'", String, 'string'), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), - (r'0[0-7]+[Ll]?', Number.Oct), - (r'\d+[Ll]?', Number.Integer), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.]', Punctuation), - (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), - (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' - r'return|break|continue|struct|typedef|new|access|import|' - r'unravel|from|include|quote|static|public|private|restricted|' - r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), - # Since an asy-type-name can be also an asy-function-name, - # in the following we test if the string " [a-zA-Z]" follows - # the Keyword.Type. - # Of course it is not perfect ! - (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' - r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' - r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' - r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' - r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' - r'path3|pen|picture|point|position|projection|real|revolution|' - r'scaleT|scientific|segment|side|slice|splitface|string|surface|' - r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' - r'transformation|tree|triangle|trilinear|triple|vector|' - r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type), - # Now the asy-type-name which are not asy-function-name - # except yours ! - # Perhaps useless - (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' - r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' - r'picture|position|real|revolution|slice|splitface|ticksgridT|' - r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), - ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'root': [ - include('whitespace'), - # functions - (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*\([^;]*?\))' # signature - r'(' + _ws + r')({)', - bygroups(using(this), Name.Function, using(this), using(this), - Punctuation), - 'function'), - # function declarations - (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*\([^;]*?\))' # signature - r'(' + _ws + r')(;)', - bygroups(using(this), Name.Function, using(this), using(this), - Punctuation)), - ('', Text, 'statement'), - ], - 'statement' : [ - include('whitespace'), - include('statements'), - ('[{}]', Punctuation), - (';', Punctuation, '#pop'), - ], - 'function': [ - include('whitespace'), - include('statements'), - (';', Punctuation), - ('{', Punctuation, '#push'), - ('}', Punctuation, '#pop'), - ], - 'string': [ - (r"'", String, '#pop'), - (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'\n', String), - (r"[^\\'\n]+", String), # all other characters - (r'\\\n', String), - (r'\\n', String), # line continuation - (r'\\', String), # stray backslash - ] - } - - def get_tokens_unprocessed(self, text): - from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - if token is Name and value in ASYFUNCNAME: - token = Name.Function - elif token is Name and value in ASYVARNAME: - token = Name.Variable - yield index, token, value - - -class PostScriptLexer(RegexLexer): - """ - Lexer for PostScript files. - - The PostScript Language Reference published by Adobe at - - is the authority for this. - - *New in Pygments 1.4.* - """ - name = 'PostScript' - aliases = ['postscript', 'postscr'] - filenames = ['*.ps', '*.eps'] - mimetypes = ['application/postscript'] - - delimiter = r'\(\)\<\>\[\]\{\}\/\%\s' - delimiter_end = r'(?=[%s])' % delimiter - - valid_name_chars = r'[^%s]' % delimiter - valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) - - tokens = { - 'root': [ - # All comment types - (r'^%!.+\n', Comment.Preproc), - (r'%%.*\n', Comment.Special), - (r'(^%.*\n){2,}', Comment.Multiline), - (r'%.*\n', Comment.Single), - - # String literals are awkward; enter separate state. - (r'\(', String, 'stringliteral'), - - (r'[\{\}(\<\<)(\>\>)\[\]]', Punctuation), - - # Numbers - (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex), - # Slight abuse: use Oct to signify any explicit base system - (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)' - r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct), - (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' - + delimiter_end, Number.Float), - (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer), - - # References - (r'\/%s' % valid_name, Name.Variable), - - # Names - (valid_name, Name.Function), # Anything else is executed - - # These keywords taken from - # - # Is there an authoritative list anywhere that doesn't involve - # trawling documentation? - - (r'(false|true)' + delimiter_end, Keyword.Constant), - - # Conditionals / flow control - (r'(eq|ne|ge|gt|le|lt|and|or|not|if|ifelse|for|forall)' - + delimiter_end, Keyword.Reserved), - - ('(abs|add|aload|arc|arcn|array|atan|begin|bind|ceiling|charpath|' - 'clip|closepath|concat|concatmatrix|copy|cos|currentlinewidth|' - 'currentmatrix|currentpoint|curveto|cvi|cvs|def|defaultmatrix|' - 'dict|dictstackoverflow|div|dtransform|dup|end|exch|exec|exit|exp|' - 'fill|findfont|floor|get|getinterval|grestore|gsave|gt|' - 'identmatrix|idiv|idtransform|index|invertmatrix|itransform|' - 'length|lineto|ln|load|log|loop|matrix|mod|moveto|mul|neg|newpath|' - 'pathforall|pathbbox|pop|print|pstack|put|quit|rand|rangecheck|' - 'rcurveto|repeat|restore|rlineto|rmoveto|roll|rotate|round|run|' - 'save|scale|scalefont|setdash|setfont|setgray|setlinecap|' - 'setlinejoin|setlinewidth|setmatrix|setrgbcolor|shfill|show|' - 'showpage|sin|sqrt|stack|stringwidth|stroke|strokepath|sub|' - 'syntaxerror|transform|translate|truncate|typecheck|undefined|' - 'undefinedfilename|undefinedresult)' + delimiter_end, - Name.Builtin), - - (r'\s+', Text), - ], - - 'stringliteral': [ - (r'[^\(\)\\]+', String), - (r'\\', String.Escape, 'escape'), - (r'\(', String, '#push'), - (r'\)', String, '#pop'), - ], - - 'escape': [ - (r'([0-8]{3}|n|r|t|b|f|\\|\(|\))?', String.Escape, '#pop'), - ], - } - - -class AutohotkeyLexer(RegexLexer): - """ - For `autohotkey `_ source code. - - *New in Pygments 1.4.* - """ - name = 'autohotkey' - aliases = ['ahk', 'autohotkey'] - filenames = ['*.ahk', '*.ahkl'] - mimetypes = ['text/x-autohotkey'] - - tokens = { - 'root': [ - (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), - 'incomment'), - (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'), - (r'\s+;.*?$', Comment.Singleline), - (r'^;.*?$', Comment.Singleline), - (r'[]{}(),;[]', Punctuation), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'\%[a-zA-Z_#@$][a-zA-Z0-9_#@$]*\%', Name.Variable), - (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), - include('commands'), - include('labels'), - include('builtInFunctions'), - include('builtInVariables'), - (r'"', String, combined('stringescape', 'dqs')), - include('numbers'), - (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), - (r'\\|\'', Text), - (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), - include('garbage'), - ], - 'incomment': [ - (r'^\s*\*/', Comment.Multiline, '#pop'), - (r'[^*/]', Comment.Multiline), - (r'[*/]', Comment.Multiline) - ], - 'incontinuation': [ - (r'^\s*\)', Generic, '#pop'), - (r'[^)]', Generic), - (r'[)]', Generic), - ], - 'commands': [ - (r'(?i)^(\s*)(global|local|static|' - r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|' - r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|' - r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|' - r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|' - r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|' - r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|' - r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|' - r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|' - r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|' - r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|' - r'ControlSendRaw|ControlSetText|CoordMode|Critical|' - r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|' - r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|' - r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|' - r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|' - r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|' - r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|' - r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|' - r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|' - r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|' - r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|' - r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|' - r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|' - r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|' - r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|' - r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|' - r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|' - r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|' - r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|' - r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|' - r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|' - r'SetBatchLines|SetCapslockState|SetControlDelay|' - r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|' - r'SetMouseDelay|SetNumlockState|SetScrollLockState|' - r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|' - r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|' - r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|' - r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|' - r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|' - r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|' - r'StringReplace|StringRight|StringSplit|StringTrimLeft|' - r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|' - r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|' - r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|' - r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|' - r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|' - r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|' - r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|' - r'WinWait)\b', bygroups(Text, Name.Builtin)), - ], - 'builtInFunctions': [ - (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|' - r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|' - r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|' - r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|' - r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|' - r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|' - r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|' - r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|' - r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|' - r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|' - r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|' - r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|' - r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|' - r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|' - r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|' - r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b', - Name.Function), - ], - 'builtInVariables': [ - (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|' - r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|' - r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|' - r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|' - r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|' - r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|' - r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|' - r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|' - r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|' - r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|' - r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|' - r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|' - r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|' - r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|' - r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|' - r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|' - r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|' - r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|' - r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|' - r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|' - r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|' - r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|' - r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|' - r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|' - r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|' - r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|' - r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|' - r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|' - r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|' - r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b', - Name.Variable), - ], - 'labels': [ - # hotkeys and labels - # technically, hotkey names are limited to named keys and buttons - (r'(^\s*)([^:\s\(\"]+?:{1,2})', bygroups(Text, Name.Label)), - (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)), - ], - 'numbers': [ - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+[eE][+-]?[0-9]+', Number.Float), - (r'0\d+', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+L', Number.Integer.Long), - (r'\d+', Number.Integer) - ], - 'stringescape': [ - (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), - ], - 'strings': [ - (r'[^"\n]+', String), - ], - 'dqs': [ - (r'"', String, '#pop'), - include('strings') - ], - 'garbage': [ - (r'[^\S\n]', Text), - # (r'.', Text), # no cheating - ], - } - - -class MaqlLexer(RegexLexer): - """ - Lexer for `GoodData MAQL - `_ - scripts. - - *New in Pygments 1.4.* - """ - - name = 'MAQL' - aliases = ['maql'] - filenames = ['*.maql'] - mimetypes = ['text/x-gooddata-maql','application/x-gooddata-maql'] - - flags = re.IGNORECASE - tokens = { - 'root': [ - # IDENTITY - (r'IDENTIFIER\b', Name.Builtin), - # IDENTIFIER - (r'\{[^}]+\}', Name.Variable), - # NUMBER - (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), - # STRING - (r'"', Literal.String, 'string-literal'), - # RELATION - (r'\<\>|\!\=', Operator), - (r'\=|\>\=|\>|\<\=|\<', Operator), - # := - (r'\:\=', Operator), - # OBJECT - (r'\[[^]]+\]', Name.Variable.Class), - # keywords - (r'(DIMENSIONS?|BOTTOM|METRIC|COUNT|OTHER|FACT|WITH|TOP|OR|' - r'ATTRIBUTE|CREATE|PARENT|FALSE|ROWS?|FROM|ALL|AS|PF|' - r'COLUMNS?|DEFINE|REPORT|LIMIT|TABLE|LIKE|AND|BY|' - r'BETWEEN|EXCEPT|SELECT|MATCH|WHERE|TRUE|FOR|IN|' - r'WITHOUT|FILTER|ALIAS|ORDER|FACT|WHEN|NOT|ON|' - r'KEYS|KEY|FULLSET|PRIMARY|LABELS|LABEL|VISUAL|' - r'TITLE|DESCRIPTION|FOLDER|ALTER|DROP|ADD|DATASET|' - r'DATATYPE|INT|BIGINT|DOUBLE|DATE|VARCHAR|DECIMAL|' - r'SYNCHRONIZE|TYPE|DEFAULT|ORDER|ASC|DESC|HYPERLINK|' - r'INCLUDE|TEMPLATE|MODIFY)\b', Keyword), - # FUNCNAME - (r'[a-zA-Z]\w*\b', Name.Function), - # Comments - (r'#.*', Comment.Single), - # Punctuation - (r'[,;\(\)]', Token.Punctuation), - # Space is not significant - (r'\s+', Text) - ], - 'string-literal': [ - (r'\\[tnrfbae"\\]', String.Escape), - (r'"', Literal.String, '#pop'), - (r'[^\\"]+', Literal.String) - ], - } - - -class GoodDataCLLexer(RegexLexer): - """ - Lexer for `GoodData-CL `_ - script files. - - *New in Pygments 1.4.* - """ - - name = 'GoodData-CL' - aliases = ['gooddata-cl'] - filenames = ['*.gdc'] - mimetypes = ['text/x-gooddata-cl'] - - flags = re.IGNORECASE - tokens = { - 'root': [ - # Comments - (r'#.*', Comment.Single), - # Function call - (r'[a-zA-Z]\w*', Name.Function), - # Argument list - (r'\(', Token.Punctuation, 'args-list'), - # Punctuation - (r';', Token.Punctuation), - # Space is not significant - (r'\s+', Text) - ], - 'args-list': [ - (r'\)', Token.Punctuation, '#pop'), - (r',', Token.Punctuation), - (r'[a-zA-Z]\w*', Name.Variable), - (r'=', Operator), - (r'"', Literal.String, 'string-literal'), - (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), - # Space is not significant - (r'\s', Text) - ], - 'string-literal': [ - (r'\\[tnrfbae"\\]', String.Escape), - (r'"', Literal.String, '#pop'), - (r'[^\\"]+', Literal.String) - ] - } - - -class ProtoBufLexer(RegexLexer): - """ - Lexer for `Protocol Buffer `_ - definition files. - - *New in Pygments 1.4.* - """ - - name = 'Protocol Buffer' - aliases = ['protobuf', 'proto'] - filenames = ['*.proto'] - - tokens = { - 'root': [ - (r'[ \t]+', Text), - (r'[,;{}\[\]\(\)]', Punctuation), - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - (r'\b(import|option|optional|required|repeated|default|packed|' - r'ctype|extensions|to|max|rpc|returns)\b', Keyword), - (r'(int32|int64|uint32|uint64|sint32|sint64|' - r'fixed32|fixed64|sfixed32|sfixed64|' - r'float|double|bool|string|bytes)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - (r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'), - (r'(message|extend)(\s+)', - bygroups(Keyword.Declaration, Text), 'message'), - (r'(enum|group|service)(\s+)', - bygroups(Keyword.Declaration, Text), 'type'), - (r'\".*\"', String), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'(\-?(inf|nan))', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'[+-=]', Operator), - (r'([a-zA-Z_][a-zA-Z0-9_\.]*)([ \t]*)(=)', - bygroups(Name.Attribute, Text, Operator)), - ('[a-zA-Z_][a-zA-Z0-9_\.]*', Name), - ], - 'package': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Namespace, '#pop') - ], - 'message': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'type': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name, '#pop') - ], - } - - -class HybrisLexer(RegexLexer): - """ - For `Hybris `_ source code. - - *New in Pygments 1.4.* - """ - - name = 'Hybris' - aliases = ['hybris', 'hy'] - filenames = ['*.hy', '*.hyb'] - mimetypes = ['text/x-hybris', 'application/x-hybris'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:function|method|operator\s+)+?)' - r'([a-zA-Z_][a-zA-Z0-9_]*)' - r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), - (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' - r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), - (r'(extends|private|protected|public|static|throws|function|method|' - r'operator)\b', Keyword.Declaration), - (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' - r'__INC_PATH__)\b', Keyword.Constant), - (r'(class|struct)(\s+)', - bygroups(Keyword.Declaration, Text), 'class'), - (r'(import|include)(\s+)', - bygroups(Keyword.Namespace, Text), 'import'), - (r'(gc_collect|gc_mm_items|gc_mm_usage|gc_collect_threshold|' - r'urlencode|urldecode|base64encode|base64decode|sha1|crc32|sha2|' - r'md5|md5_file|acos|asin|atan|atan2|ceil|cos|cosh|exp|fabs|floor|' - r'fmod|log|log10|pow|sin|sinh|sqrt|tan|tanh|isint|isfloat|ischar|' - r'isstring|isarray|ismap|isalias|typeof|sizeof|toint|tostring|' - r'fromxml|toxml|binary|pack|load|eval|var_names|var_values|' - r'user_functions|dyn_functions|methods|call|call_method|mknod|' - r'mkfifo|mount|umount2|umount|ticks|usleep|sleep|time|strtime|' - r'strdate|dllopen|dlllink|dllcall|dllcall_argv|dllclose|env|exec|' - r'fork|getpid|wait|popen|pclose|exit|kill|pthread_create|' - r'pthread_create_argv|pthread_exit|pthread_join|pthread_kill|' - r'smtp_send|http_get|http_post|http_download|socket|bind|listen|' - r'accept|getsockname|getpeername|settimeout|connect|server|recv|' - r'send|close|print|println|printf|input|readline|serial_open|' - r'serial_fcntl|serial_get_attr|serial_get_ispeed|serial_get_ospeed|' - r'serial_set_attr|serial_set_ispeed|serial_set_ospeed|serial_write|' - r'serial_read|serial_close|xml_load|xml_parse|fopen|fseek|ftell|' - r'fsize|fread|fwrite|fgets|fclose|file|readdir|pcre_replace|size|' - r'pop|unmap|has|keys|values|length|find|substr|replace|split|trim|' - r'remove|contains|join)\b', Name.Builtin), - (r'(MethodReference|Runner|Dll|Thread|Pipe|Process|Runnable|' - r'CGI|ClientSocket|Socket|ServerSocket|File|Console|Directory|' - r'Exception)\b', Keyword.Type), - (r'"(\\\\|\\"|[^"])*"', String), - (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), - (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), - (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), - (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?\-@]+', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-f]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text), - ], - 'class': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'import': [ - (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') - ], - } - - -class AwkLexer(RegexLexer): - """ - For Awk scripts. - - *New in Pygments 1.5.* - """ - - name = 'Awk' - aliases = ['awk', 'gawk', 'mawk', 'nawk'] - filenames = ['*.awk'] - mimetypes = ['application/x-awk'] - - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'#.*$', Comment.Single) - ], - 'slashstartsregex': [ - include('commentsandwhitespace'), - (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' - r'\B', String.Regex, '#pop'), - (r'(?=/)', Text, ('#pop', 'badregex')), - (r'', Text, '#pop') - ], - 'badregex': [ - (r'\n', Text, '#pop') - ], - 'root': [ - (r'^(?=\s|/)', Text, 'slashstartsregex'), - include('commentsandwhitespace'), - (r'\+\+|--|\|\||&&|in|\$|!?~|' - r'(\*\*|[-<>+*%\^/!=])=?', Operator, 'slashstartsregex'), - (r'[{(\[;,]', Punctuation, 'slashstartsregex'), - (r'[})\].]', Punctuation), - (r'(break|continue|do|while|exit|for|if|' - r'return)\b', Keyword, 'slashstartsregex'), - (r'function\b', Keyword.Declaration, 'slashstartsregex'), - (r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|' - r'length|match|split|sprintf|sub|substr|tolower|toupper|close|' - r'fflush|getline|next|nextfile|print|printf|strftime|systime|' - r'delete|system)\b', Keyword.Reserved), - (r'(ARGC|ARGIND|ARGV|CONVFMT|ENVIRON|ERRNO|FIELDWIDTHS|FILENAME|FNR|FS|' - r'IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|RSTART|RT|' - r'SUBSEP)\b', Name.Builtin), - (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r"'(\\\\|\\'|[^'])*'", String.Single), - ] - } - - -class Cfengine3Lexer(RegexLexer): - """ - Lexer for `CFEngine3 `_ policy files. - - *New in Pygments 1.5.* - """ - - name = 'CFEngine3' - aliases = ['cfengine3', 'cf3'] - filenames = ['*.cf'] - mimetypes = [] - - tokens = { - 'root': [ - (r'#.*?\n', Comment), - (r'(body)(\s+)(\S+)(\s+)(control)', - bygroups(Keyword, Text, Keyword, Text, Keyword)), - (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', - bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation), - 'arglist'), - (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', - bygroups(Keyword, Text, Keyword, Text, Name.Function)), - (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', - bygroups(Punctuation,Name.Variable,Punctuation, - Text,Keyword.Type,Text,Operator,Text)), - (r'(\S+)(\s*)(=>)(\s*)', - bygroups(Keyword.Reserved,Text,Operator,Text)), - (r'"', String, 'string'), - (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), - (r'([\w.!&|\(\)]+)(::)', bygroups(Name.Class, Punctuation)), - (r'(\w+)(:)', bygroups(Keyword.Declaration,Punctuation)), - (r'@[\{\(][^\)\}]+[\}\)]', Name.Variable), - (r'[(){},;]', Punctuation), - (r'=>', Operator), - (r'->', Operator), - (r'\d+\.\d+', Number.Float), - (r'\d+', Number.Integer), - (r'\w+', Name.Function), - (r'\s+', Text), - ], - 'string': [ - (r'\$[\{\(]', String.Interpol, 'interpol'), - (r'\\.', String.Escape), - (r'"', String, '#pop'), - (r'\n', String), - (r'.', String), - ], - 'interpol': [ - (r'\$[\{\(]', String.Interpol, '#push'), - (r'[\}\)]', String.Interpol, '#pop'), - (r'[^\$\{\(\)\}]+', String.Interpol), - ], - 'arglist': [ - (r'\)', Punctuation, '#pop'), - (r',', Punctuation), - (r'\w+', Name.Variable), - (r'\s+', Text), - ], - } - - -class SnobolLexer(RegexLexer): - """ - Lexer for the SNOBOL4 programming language. - - Recognizes the common ASCII equivalents of the original SNOBOL4 operators. - Does not require spaces around binary operators. - - *New in Pygments 1.5.* - """ - - name = "Snobol" - aliases = ["snobol"] - filenames = ['*.snobol'] - mimetypes = ['text/x-snobol'] - - tokens = { - # root state, start of line - # comments, continuation lines, and directives start in column 1 - # as do labels - 'root': [ - (r'\*.*\n', Comment), - (r'[\+\.] ', Punctuation, 'statement'), - (r'-.*\n', Comment), - (r'END\s*\n', Name.Label, 'heredoc'), - (r'[A-Za-z\$][\w$]*', Name.Label, 'statement'), - (r'\s+', Text, 'statement'), - ], - # statement state, line after continuation or label - 'statement': [ - (r'\s*\n', Text, '#pop'), - (r'\s+', Text), - (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' - r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' - r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' - r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', - Name.Builtin), - (r'[A-Za-z][\w\.]*', Name), - # ASCII equivalents of original operators - # | for the EBCDIC equivalent, ! likewise - # \ for EBCDIC negation - (r'\*\*|[\?\$\.!%\*/#+\-@\|&\\=]', Operator), - (r'"[^"]*"', String), - (r"'[^']*'", String), - # Accept SPITBOL syntax for real numbers - # as well as Macro SNOBOL4 - (r'[0-9]+(?=[^\.EeDd])', Number.Integer), - (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), - # Goto - (r':', Punctuation, 'goto'), - (r'[\(\)<>,;]', Punctuation), - ], - # Goto block - 'goto': [ - (r'\s*\n', Text, "#pop:2"), - (r'\s+', Text), - (r'F|S', Keyword), - (r'(\()([A-Za-z][\w.]*)(\))', - bygroups(Punctuation, Name.Label, Punctuation)) - ], - # everything after the END statement is basically one - # big heredoc. - 'heredoc': [ - (r'.*\n', String.Heredoc) - ] - } - - -class UrbiscriptLexer(ExtendedRegexLexer): - """ - For UrbiScript source code. - - *New in Pygments 1.5.* - """ - - name = 'UrbiScript' - aliases = ['urbiscript'] - filenames = ['*.u'] - mimetypes = ['application/x-urbiscript'] - - flags = re.DOTALL - - ## TODO - # - handle Experimental and deprecated tags with specific tokens - # - handle Angles and Durations with specific tokens - - def blob_callback(lexer, match, ctx): - text_before_blob = match.group(1) - blob_start = match.group(2) - blob_size_str = match.group(3) - blob_size = int(blob_size_str) - yield match.start(), String, text_before_blob - ctx.pos += len(text_before_blob) - - # if blob size doesn't match blob format (example : "\B(2)(aaa)") - # yield blob as a string - if ctx.text[match.end() + blob_size] != ")": - result = "\\B(" + blob_size_str + ")(" - yield match.start(), String, result - ctx.pos += len(result) - return - - # if blob is well formated, yield as Escape - blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")" - yield match.start(), String.Escape, blob_text - ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")" - - tokens = { - 'root': [ - (r'\s+', Text), - # comments - (r'//.*?\n', Comment), - (r'/\*', Comment.Multiline, 'comment'), - (r'(?:every|for|loop|while)(?:;|&|\||,)',Keyword), - (r'(?:assert|at|break|case|catch|closure|compl|continue|' - r'default|else|enum|every|external|finally|for|freezeif|if|new|' - r'onleave|return|stopif|switch|this|throw|timeout|try|' - r'waituntil|whenever|while)\b', Keyword), - (r'(?:asm|auto|bool|char|const_cast|delete|double|dynamic_cast|' - r'explicit|export|extern|float|friend|goto|inline|int|' - r'long|mutable|namespace|register|reinterpret_cast|short|' - r'signed|sizeof|static_cast|struct|template|typedef|typeid|' - r'typename|union|unsigned|using|virtual|volatile|' - r'wchar_t)\b', Keyword.Reserved), - # deprecated keywords, use a meaningfull token when available - (r'(?:emit|foreach|internal|loopn|static)\b', Keyword), - # ignored keywords, use a meaningfull token when available - (r'(?:private|protected|public)\b', Keyword), - (r'(?:var|do|const|function|class)\b', Keyword.Declaration), - (r'(?:true|false|nil|void)\b', Keyword.Constant), - (r'(?:Barrier|Binary|Boolean|CallMessage|Channel|Code|' - r'Comparable|Container|Control|Date|Dictionary|Directory|' - r'Duration|Enumeration|Event|Exception|Executable|File|Finalizable|' - r'Float|FormatInfo|Formatter|Global|Group|Hash|InputStream|' - r'IoService|Job|Kernel|Lazy|List|Loadable|Lobby|Location|Logger|Math|' - r'Mutex|nil|Object|Orderable|OutputStream|Pair|Path|Pattern|Position|' - r'Primitive|Process|Profile|PseudoLazy|PubSub|RangeIterable|Regexp|' - r'Semaphore|Server|Singleton|Socket|StackFrame|Stream|String|System|' - r'Tag|Timeout|Traceable|TrajectoryGenerator|Triplet|Tuple' - r'|UObject|UValue|UVar)\b', Name.Builtin), - (r'(?:this)\b', Name.Builtin.Pseudo), - # don't match single | and & - (r'(?:[-=+*%/<>~^:]+|\.&?|\|\||&&)', Operator), - (r'(?:and_eq|and|bitand|bitor|in|not|not_eq|or_eq|or|xor_eq|xor)\b', - Operator.Word), - (r'[{}\[\]()]+', Punctuation), - (r'(?:;|\||,|&|\?|!)+', Punctuation), - (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), - (r'0x[0-9a-fA-F]+', Number.Hex), - # Float, Integer, Angle and Duration - (r'(?:[0-9]+(?:(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?)?' - r'((?:rad|deg|grad)|(?:ms|s|min|h|d))?)\b', Number.Float), - # handle binary blob in strings - (r'"', String.Double, "string.double"), - (r"'", String.Single, "string.single"), - ], - 'string.double': [ - (r'((?:\\\\|\\"|[^"])*?)(\\B\((\d+)\)\()', blob_callback), - (r'(\\\\|\\"|[^"])*?"', String.Double, '#pop'), - ], - 'string.single': [ - (r"((?:\\\\|\\'|[^'])*?)(\\B\((\d+)\)\()", blob_callback), - (r"(\\\\|\\'|[^'])*?'", String.Single, '#pop'), - ], - # from http://pygments.org/docs/lexerdevelopment/#changing-states - 'comment': [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline), - ] - } - - -class OpenEdgeLexer(RegexLexer): - """ - Lexer for `OpenEdge ABL (formerly Progress) - `_ source code. - - *New in Pygments 1.5.* - """ - name = 'OpenEdge ABL' - aliases = ['openedge', 'abl', 'progress'] - filenames = ['*.p', '*.cls'] - mimetypes = ['text/x-openedge', 'application/x-openedge'] - - types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|' - r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|' - r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|' - r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|' - r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))') - - keywords = (r'(?i)(^|(?<=[^0-9a-z_\-]))(' + - r'|'.join(OPENEDGEKEYWORDS) + - r')\s*($|(?=[^0-9a-z_\-]))') - tokens = { - 'root': [ - (r'/\*', Comment.Multiline, 'comment'), - (r'\{', Comment.Preproc, 'preprocessor'), - (r'\s*&.*', Comment.Preproc), - (r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration), - (types, Keyword.Type), - (keywords, Name.Builtin), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\s+', Text), - (r'[+*/=-]', Operator), - (r'[.:()]', Punctuation), - (r'.', Name.Variable), # Lazy catch-all - ], - 'comment': [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ], - 'preprocessor': [ - (r'[^{}]', Comment.Preproc), - (r'{', Comment.Preproc, '#push'), - (r'}', Comment.Preproc, '#pop'), - ], - } - - -class BroLexer(RegexLexer): - """ - For `Bro `_ scripts. - - *New in Pygments 1.5.* - """ - name = 'Bro' - aliases = ['bro'] - filenames = ['*.bro'] - - _hex = r'[0-9a-fA-F_]+' - _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?' - _h = r'[A-Za-z0-9][-A-Za-z0-9]*' - - tokens = { - 'root': [ - # Whitespace - (r'^@.*?\n', Comment.Preproc), - (r'#.*?\n', Comment.Single), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), - # Keywords - (r'(add|alarm|break|case|const|continue|delete|do|else|enum|event' - r'|export|for|function|if|global|hook|local|module|next' - r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword), - (r'(addr|any|bool|count|counter|double|file|int|interval|net' - r'|pattern|port|record|set|string|subnet|table|time|timer' - r'|vector)\b', Keyword.Type), - (r'(T|F)\b', Keyword.Constant), - (r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire' - r'|default|disable_print_hook|raw_output|encrypt|group|log' - r'|mergeable|optional|persistent|priority|redef' - r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation, - Keyword)), - (r'\s+module\b', Keyword.Namespace), - # Addresses, ports and networks - (r'\d+/(tcp|udp|icmp|unknown)\b', Number), - (r'(\d+\.){3}\d+', Number), - (r'(' + _hex + r'){7}' + _hex, Number), - (r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number), - (r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number), - (r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number), - # Hostnames - (_h + r'(\.' + _h + r')+', String), - # Numeric - (_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date), - (r'0[xX]' + _hex, Number.Hex), - (_float, Number.Float), - (r'\d+', Number.Integer), - (r'/', String.Regex, 'regex'), - (r'"', String, 'string'), - # Operators - (r'[!%*/+:<=>?~|-]', Operator), - (r'([-+=&|]{2}|[+=!><-]=)', Operator), - (r'(in|match)\b', Operator.Word), - (r'[{}()\[\]$.,;]', Punctuation), - # Identfier - (r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)), - (r'[a-zA-Z_][a-zA-Z_0-9]*', Name) - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), - (r'\\\n', String), - (r'\\', String) - ], - 'regex': [ - (r'/', String.Regex, '#pop'), - (r'\\[\\nt/]', String.Regex), # String.Escape is too intense here. - (r'[^\\/\n]+', String.Regex), - (r'\\\n', String.Regex), - (r'\\', String.Regex) - ] - } - - -class CbmBasicV2Lexer(RegexLexer): - """ - For CBM BASIC V2 sources. - - *New in Pygments 1.6.* - """ - name = 'CBM BASIC V2' - aliases = ['cbmbas'] - filenames = ['*.bas'] - - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'rem.*\n', Comment.Single), - (r'\s+', Text), - (r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont' - r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?' - r'|list|clr|cmd|open|close|get#?', Keyword.Reserved), - (r'data|restore|dim|let|def|fn', Keyword.Declaration), - (r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn' - r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin), - (r'[-+*/^<>=]', Operator), - (r'not|and|or', Operator.Word), - (r'"[^"\n]*.', String), - (r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float), - (r'[\(\),:;]', Punctuation), - (r'\w+[$%]?', Name), - ] - } - - def analyse_text(self, text): - # if it starts with a line number, it shouldn't be a "modern" Basic - # like VB.net - if re.match(r'\d+', text): - return True - - -class MscgenLexer(RegexLexer): - """ - For `Mscgen `_ files. - - *New in Pygments 1.6.* - """ - name = 'Mscgen' - aliases = ['mscgen', 'msc'] - filenames = ['*.msc'] - - _var = r'([a-zA-Z0-9_]+|"(?:\\"|[^"])*")' - - tokens = { - 'root': [ - (r'msc\b', Keyword.Type), - # Options - (r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS' - r'|arcgradient|ARCGRADIENT)\b', Name.Property), - # Operators - (r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word), - (r'(\.|-|\|){3}', Keyword), - (r'(?:-|=|\.|:){2}' - r'|<<=>>|<->|<=>|<<>>|<:>' - r'|->|=>>|>>|=>|:>|-x|-X' - r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator), - # Names - (r'\*', Name.Builtin), - (_var, Name.Variable), - # Other - (r'\[', Punctuation, 'attrs'), - (r'\{|\}|,|;', Punctuation), - include('comments') - ], - 'attrs': [ - (r'\]', Punctuation, '#pop'), - (_var + r'(\s*)(=)(\s*)' + _var, - bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace, - String)), - (r',', Punctuation), - include('comments') - ], - 'comments': [ - (r'(?://|#).*?\n', Comment.Single), - (r'/\*(?:.|\n)*?\*/', Comment.Multiline), - (r'[ \t\r\n]+', Text.Whitespace) - ] - } - - -def _rx_indent(level): - # Kconfig *always* interprets a tab as 8 spaces, so this is the default. - # Edit this if you are in an environment where KconfigLexer gets expanded - # input (tabs expanded to spaces) and the expansion tab width is != 8, - # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). - # Value range here is 2 <= {tab_width} <= 8. - tab_width = 8 - # Regex matching a given indentation {level}, assuming that indentation is - # a multiple of {tab_width}. In other cases there might be problems. - return r'(?:\t| {1,%s}\t| {%s}){%s}.*\n' % (tab_width-1, tab_width, level) - - -class KconfigLexer(RegexLexer): - """ - For Linux-style Kconfig files. - - *New in Pygments 1.6.* - """ - - name = 'Kconfig' - aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] - # Adjust this if new kconfig file names appear in your environment - filenames = ['Kconfig', '*Config.in*', 'external.in*', - 'standard-modules.in'] - mimetypes = ['text/x-kconfig'] - # No re.MULTILINE, indentation-aware help text needs line-by-line handling - flags = 0 - - def call_indent(level): - # If indentation >= {level} is detected, enter state 'indent{level}' - return (_rx_indent(level), String.Doc, 'indent%s' % level) - - def do_indent(level): - # Print paragraphs of indentation level >= {level} as String.Doc, - # ignoring blank lines. Then return to 'root' state. - return [ - (_rx_indent(level), String.Doc), - (r'\s*\n', Text), - (r'', Generic, '#pop:2') - ] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'#.*?\n', Comment.Single), - (r'(mainmenu|config|menuconfig|choice|endchoice|comment|menu|' - r'endmenu|visible if|if|endif|source|prompt|select|depends on|' - r'default|range|option)\b', Keyword), - (r'(---help---|help)[\t ]*\n', Keyword, 'help'), - (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', - Name.Builtin), - (r'[!=&|]', Operator), - (r'[()]', Punctuation), - (r'[0-9]+', Number.Integer), - (r"'(''|[^'])*'", String.Single), - (r'"(""|[^"])*"', String.Double), - (r'\S+', Text), - ], - # Help text is indented, multi-line and ends when a lower indentation - # level is detected. - 'help': [ - # Skip blank lines after help token, if any - (r'\s*\n', Text), - # Determine the first help line's indentation level heuristically(!). - # Attention: this is not perfect, but works for 99% of "normal" - # indentation schemes up to a max. indentation level of 7. - call_indent(7), - call_indent(6), - call_indent(5), - call_indent(4), - call_indent(3), - call_indent(2), - call_indent(1), - ('', Text, '#pop'), # for incomplete help sections without text - ], - # Handle text for indentation levels 7 to 1 - 'indent7': do_indent(7), - 'indent6': do_indent(6), - 'indent5': do_indent(5), - 'indent4': do_indent(4), - 'indent3': do_indent(3), - 'indent2': do_indent(2), - 'indent1': do_indent(1), - } - - -class VGLLexer(RegexLexer): - """ - For `SampleManager VGL `_ - source code. - - *New in Pygments 1.6.* - """ - name = 'VGL' - aliases = ['vgl'] - filenames = ['*.rpf'] - - flags = re.MULTILINE | re.DOTALL | re.IGNORECASE - - tokens = { - 'root': [ - (r'\{[^\}]*\}', Comment.Multiline), - (r'declare', Keyword.Constant), - (r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object' - r'|create|on|line|with|global|routine|value|endroutine|constant' - r'|global|set|join|library|compile_option|file|exists|create|copy' - r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])', - Keyword), - (r'(true|false|null|empty|error|locked)', Keyword.Constant), - (r'[~\^\*\#!%&\[\]\(\)<>\|+=:;,./?-]', Operator), - (r'"[^"]*"', String), - (r'(\.)([a-z_\$][a-z0-9_\$]*)', bygroups(Operator, Name.Attribute)), - (r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number), - (r'[a-z_\$][a-z0-9_\$]*', Name), - (r'[\r\n]+', Text), - (r'\s+', Text) - ] - } - - -class SourcePawnLexer(RegexLexer): - """ - For SourcePawn source code with preprocessor directives. - - *New in Pygments 1.6.* - """ - name = 'SourcePawn' - aliases = ['sp'] - filenames = ['*.sp'] - mimetypes = ['text/x-sourcepawn'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' - - tokens = { - 'root': [ - # preprocessor directives: without whitespace - ('^#if\s+0', Comment.Preproc, 'if0'), - ('^#', Comment.Preproc, 'macro'), - # or with whitespace - ('^' + _ws + r'#if\s+0', Comment.Preproc, 'if0'), - ('^' + _ws + '#', Comment.Preproc, 'macro'), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - (r'[{}]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;]', Punctuation), - (r'(case|const|continue|native|' - r'default|else|enum|for|if|new|operator|' - r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), - (r'(bool|Float)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/\*(.|\n)*?\*/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'if0': [ - (r'^\s*#if.*?(?`__ configuration DSL. - - *New in Pygments 1.6.* - """ - name = 'Puppet' - aliases = ['puppet'] - filenames = ['*.pp'] - - tokens = { - 'root': [ - include('comments'), - include('keywords'), - include('names'), - include('numbers'), - include('operators'), - include('strings'), - - (r'[]{}:(),;[]', Punctuation), - (r'[^\S\n]+', Text), - ], - - 'comments': [ - (r'\s*#.*$', Comment), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - ], - - 'operators': [ - (r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator), - (r'(in|and|or|not)\b', Operator.Word), - ], - - 'names': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute), - (r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation, - String, Punctuation)), - (r'\$\S+', Name.Variable), - ], - - 'numbers': [ - # Copypasta from the Python lexer - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), - (r'\d+[eE][+-]?[0-9]+j?', Number.Float), - (r'0[0-7]+j?', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+L', Number.Integer.Long), - (r'\d+j?', Number.Integer) - ], - - 'keywords': [ - # Left out 'group' and 'require' - # Since they're often used as attributes - (r'(?i)(absent|alert|alias|audit|augeas|before|case|check|class|' - r'computer|configured|contained|create_resources|crit|cron|debug|' - r'default|define|defined|directory|else|elsif|emerg|err|exec|' - r'extlookup|fail|false|file|filebucket|fqdn_rand|generate|host|if|' - r'import|include|info|inherits|inline_template|installed|' - r'interface|k5login|latest|link|loglevel|macauthorization|' - r'mailalias|maillist|mcx|md5|mount|mounted|nagios_command|' - r'nagios_contact|nagios_contactgroup|nagios_host|' - r'nagios_hostdependency|nagios_hostescalation|nagios_hostextinfo|' - r'nagios_hostgroup|nagios_service|nagios_servicedependency|' - r'nagios_serviceescalation|nagios_serviceextinfo|' - r'nagios_servicegroup|nagios_timeperiod|node|noop|notice|notify|' - r'package|present|purged|realize|regsubst|resources|role|router|' - r'running|schedule|scheduled_task|search|selboolean|selmodule|' - r'service|sha1|shellquote|split|sprintf|ssh_authorized_key|sshkey|' - r'stage|stopped|subscribe|tag|tagged|template|tidy|true|undef|' - r'unmounted|user|versioncmp|vlan|warning|yumrepo|zfs|zone|' - r'zpool)\b', Keyword), - ], - - 'strings': [ - (r'"([^"])*"', String), - (r'\'([^\'])*\'', String), - ], - - } - - -class NSISLexer(RegexLexer): - """ - For `NSIS `_ scripts. - - *New in Pygments 1.6.* - """ - name = 'NSIS' - aliases = ['nsis', 'nsi', 'nsh'] - filenames = ['*.nsi', '*.nsh'] - mimetypes = ['text/x-nsis'] - - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'[;\#].*\n', Comment), - (r"'.*?'", String.Single), - (r'"', String.Double, 'str_double'), - (r'`', String.Backtick, 'str_backtick'), - include('macro'), - include('interpol'), - include('basic'), - (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), - (r'/[a-z_]\w*', Name.Attribute), - ('.', Text), - ], - 'basic': [ - (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', - bygroups(Text, Keyword, Text, Name.Function)), - (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', - bygroups(Keyword.Namespace, Punctuation, Name.Function)), - (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), - (r'(\b[ULS]|\B)([\!\<\>=]?=|\<\>?|\>)\B', Operator), - (r'[|+-]', Operator), - (r'\\', Punctuation), - (r'\b(Abort|Add(?:BrandingImage|Size)|' - r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' - r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' - r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' - r'ComponentText|CopyFiles|CRCCheck|' - r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' - r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' - r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' - r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' - r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' - r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' - r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' - r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' - r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' - r'InstDirError|LabelAddress|TempFileName)|' - r'Goto|HideWindow|Icon|' - r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' - r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' - r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' - r'IsWindow|LangString(?:UP)?|' - r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' - r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' - r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' - r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' - r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' - r'Return|RMDir|SearchPath|Section(?:Divider|End|' - r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' - r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' - r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' - r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' - r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' - r'Silent|StaticBkColor)|' - r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' - r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' - r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' - r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' - r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' - r'XPStyle)\b', Keyword), - (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' - r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' - r'HK(CC|CR|CU|DD|LM|PD|U)|' - r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' - r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' - r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' - r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' - r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' - r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' - r'YESNO(?:CANCEL)?)|SET|SHCTX|' - r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' - r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' - r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' - r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' - r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' - r'true|try|user|zlib)\b', Name.Constant), - ], - 'macro': [ - (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' - r'delfilefile|echo(?:message)?|else|endif|error|execute|' - r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' - r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' - r'warning)\b', Comment.Preproc), - ], - 'interpol': [ - (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers - (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' - r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' - r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' - r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' - r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' - r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', - Name.Builtin), - (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), - (r'\$[a-z_]\w*', Name.Variable), - ], - 'str_double': [ - (r'"', String, '#pop'), - (r'\$(\\[nrt"]|\$)', String.Escape), - include('interpol'), - (r'.', String.Double), - ], - 'str_backtick': [ - (r'`', String, '#pop'), - (r'\$(\\[nrt"]|\$)', String.Escape), - include('interpol'), - (r'.', String.Double), - ], - } - - -class RPMSpecLexer(RegexLexer): - """ - For RPM *.spec files - - *New in Pygments 1.6.* - """ - - name = 'RPMSpec' - aliases = ['spec'] - filenames = ['*.spec'] - mimetypes = ['text/x-rpm-spec'] - - _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' - 'post[a-z]*|trigger[a-z]*|files)') - - tokens = { - 'root': [ - (r'#.*\n', Comment), - include('basic'), - ], - 'description': [ - (r'^(%' + _directives + ')(.*)$', - bygroups(Name.Decorator, Text), '#pop'), - (r'\n', Text), - (r'.', Text), - ], - 'changelog': [ - (r'\*.*\n', Generic.Subheading), - (r'^(%' + _directives + ')(.*)$', - bygroups(Name.Decorator, Text), '#pop'), - (r'\n', Text), - (r'.', Text), - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - include('interpol'), - (r'.', String.Double), - ], - 'basic': [ - include('macro'), - (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' - r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' - r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' - r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', - bygroups(Generic.Heading, Punctuation, using(this))), - (r'^%description', Name.Decorator, 'description'), - (r'^%changelog', Name.Decorator, 'changelog'), - (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), - (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' - r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', - Keyword), - include('interpol'), - (r"'.*?'", String.Single), - (r'"', String.Double, 'string'), - (r'.', Text), - ], - 'macro': [ - (r'%define.*\n', Comment.Preproc), - (r'%\{\!\?.*%define.*\}', Comment.Preproc), - (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', - bygroups(Comment.Preproc, Text)), - ], - 'interpol': [ - (r'%\{?__[a-z_]+\}?', Name.Function), - (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), - (r'%\{\?[A-Za-z0-9_]+\}', Name.Variable), - (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), - (r'%\{[a-zA-Z][a-zA-Z0-9_]+\}', Keyword.Constant), - ] - } - - -class AutoItLexer(RegexLexer): - """ - For `AutoIt `_ files. - - AutoIt is a freeware BASIC-like scripting language - designed for automating the Windows GUI and general scripting - - *New in Pygments 1.6.* - """ - name = 'AutoIt' - aliases = ['autoit', 'Autoit'] - filenames = ['*.au3'] - mimetypes = ['text/x-autoit'] - - # Keywords, functions, macros from au3.keywords.properties - # which can be found in AutoIt installed directory, e.g. - # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties - - keywords = """\ - #include-once #include #endregion #forcedef #forceref #region - and byref case continueloop dim do else elseif endfunc endif - endselect exit exitloop for func global - if local next not or return select step - then to until wend while exit""".split() - - functions = """\ - abs acos adlibregister adlibunregister asc ascw asin assign atan - autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen - binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor - blockinput break call cdtray ceiling chr chrw clipget clipput consoleread - consolewrite consolewriteerror controlclick controlcommand controldisable - controlenable controlfocus controlgetfocus controlgethandle controlgetpos - controlgettext controlhide controllistview controlmove controlsend - controlsettext controlshow controltreeview cos dec dircopy dircreate - dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree - dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate - dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata - drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype - drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree - drivespacetotal drivestatus envget envset envupdate eval execute exp - filechangedir fileclose filecopy filecreatentfslink filecreateshortcut - filedelete fileexists filefindfirstfile filefindnextfile fileflush - filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut - filegetshortname filegetsize filegettime filegetversion fileinstall filemove - fileopen fileopendialog fileread filereadline filerecycle filerecycleempty - filesavedialog fileselectfolder filesetattrib filesetpos filesettime - filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi - guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo - guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy - guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon - guictrlcreateinput guictrlcreatelabel guictrlcreatelist - guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu - guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj - guictrlcreatepic guictrlcreateprogress guictrlcreateradio - guictrlcreateslider guictrlcreatetab guictrlcreatetabitem - guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown - guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg - guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy - guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata - guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic - guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos - guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete - guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators - guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon - guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset - httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize - inetread inidelete iniread inireadsection inireadsectionnames - inirenamesection iniwrite iniwritesection inputbox int isadmin isarray - isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword - isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag - mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox - number objcreate objcreateinterface objevent objevent objget objname - onautoitexitregister onautoitexitunregister opt ping pixelchecksum - pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists - processgetstats processlist processsetpriority processwait processwaitclose - progressoff progresson progressset ptr random regdelete regenumkey - regenumval regread regwrite round run runas runaswait runwait send - sendkeepactive seterror setextended shellexecute shellexecutewait shutdown - sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton - sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread - string stringaddcr stringcompare stringformat stringfromasciiarray - stringinstr stringisalnum stringisalpha stringisascii stringisdigit - stringisfloat stringisint stringislower stringisspace stringisupper - stringisxdigit stringleft stringlen stringlower stringmid stringregexp - stringregexpreplace stringreplace stringright stringsplit stringstripcr - stringstripws stringtoasciiarray stringtobinary stringtrimleft - stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect - tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff - timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete - trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent - trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent - traysetpauseicon traysetstate traysettooltip traytip ubound udpbind - udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype - winactivate winactive winclose winexists winflash wingetcaretpos - wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess - wingetstate wingettext wingettitle winkill winlist winmenuselectitem - winminimizeall winminimizeallundo winmove winsetontop winsetstate - winsettitle winsettrans winwait winwaitactive winwaitclose - winwaitnotactive""".split() - - macros = """\ - @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion - @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec - @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir - @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error - @exitcode @exitmethod @extended @favoritescommondir @favoritesdir - @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid - @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour - @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf - @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang - @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype - @osversion @programfilesdir @programscommondir @programsdir @scriptdir - @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir - @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide - @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault - @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna - @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir - @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday - @windowsdir @workingdir @yday @year""".split() - - tokens = { - 'root': [ - (r';.*\n', Comment.Single), - (r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline), - (r'[\[\]{}(),;]', Punctuation), - (r'(and|or|not)\b', Operator.Word), - (r'[\$|@][a-zA-Z_][a-zA-Z0-9_]*', Name.Variable), - (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), - include('commands'), - include('labels'), - include('builtInFunctions'), - include('builtInMarcros'), - (r'"', String, combined('stringescape', 'dqs')), - include('numbers'), - (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), - (r'\\|\'', Text), - (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), - (r'_\n', Text), # Line continuation - include('garbage'), - ], - 'commands': [ - (r'(?i)(\s*)(%s)\b' % '|'.join(keywords), - bygroups(Text, Name.Builtin)), - ], - 'builtInFunctions': [ - (r'(?i)(%s)\b' % '|'.join(functions), - Name.Function), - ], - 'builtInMarcros': [ - (r'(?i)(%s)\b' % '|'.join(macros), - Name.Variable.Global), - ], - 'labels': [ - # sendkeys - (r'(^\s*)({\S+?})', bygroups(Text, Name.Label)), - ], - 'numbers': [ - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+[eE][+-]?[0-9]+', Number.Float), - (r'0\d+', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+L', Number.Integer.Long), - (r'\d+', Number.Integer) - ], - 'stringescape': [ - (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), - ], - 'strings': [ - (r'[^"\n]+', String), - ], - 'dqs': [ - (r'"', String, '#pop'), - include('strings') - ], - 'garbage': [ - (r'[^\S\n]', Text), - ], - } - - -class RexxLexer(RegexLexer): - """ - `Rexx `_ is a scripting language available for - a wide range of different platforms with its roots found on mainframe - systems. It is popular for I/O- and data based tasks and can act as glue - language to bind different applications together. - - *New in Pygments 1.7.* - """ - name = 'Rexx' - aliases = ['rexx', 'ARexx', 'arexx'] - filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] - mimetypes = ['text/x-rexx'] - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'\s', Whitespace), - (r'/\*', Comment.Multiline, 'comment'), - (r'"', String, 'string_double'), - (r"'", String, 'string_single'), - (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), - (r'([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b', - bygroups(Name.Function, Whitespace, Operator, Whitespace, - Keyword.Declaration)), - (r'([a-z_][a-z0-9_]*)(\s*)(:)', - bygroups(Name.Label, Whitespace, Operator)), - include('function'), - include('keyword'), - include('operator'), - (r'[a-z_][a-z0-9_]*', Text), - ], - 'function': [ - (r'(abbrev|abs|address|arg|b2x|bitand|bitor|bitxor|c2d|c2x|' - r'center|charin|charout|chars|compare|condition|copies|d2c|' - r'd2x|datatype|date|delstr|delword|digits|errortext|form|' - r'format|fuzz|insert|lastpos|left|length|linein|lineout|lines|' - r'max|min|overlay|pos|queued|random|reverse|right|sign|' - r'sourceline|space|stream|strip|substr|subword|symbol|time|' - r'trace|translate|trunc|value|verify|word|wordindex|' - r'wordlength|wordpos|words|x2b|x2c|x2d|xrange)(\s*)(\()', - bygroups(Name.Builtin, Whitespace, Operator)), - ], - 'keyword': [ - (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' - r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' - r'pull|push|queue|return|say|select|signal|to|then|trace|until|' - r'while)\b', Keyword.Reserved), - ], - 'operator': [ - (ur'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' - ur'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' - ur'¬>>|¬>|¬|\.|,)', Operator), - ], - 'string_double': [ - (r'[^"\n]+', String), - (r'""', String), - (r'"', String, '#pop'), - (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. - ], - 'string_single': [ - (r'[^\'\n]', String), - (r'\'\'', String), - (r'\'', String, '#pop'), - (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. - ], - 'comment': [ - (r'[^*]+', Comment.Multiline), - (r'\*/', Comment.Multiline, '#pop'), - (r'\*', Comment.Multiline), - ] - } - - _c = lambda s: re.compile(s, re.MULTILINE) - _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') - _ADDRESS_PATTERN = _c(r'^\s*address\s+') - _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') - _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') - _PROCEDURE_PATTERN = _c(r'^\s*([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b') - _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') - _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') - PATTERNS_AND_WEIGHTS = ( - (_ADDRESS_COMMAND_PATTERN, 0.2), - (_ADDRESS_PATTERN, 0.05), - (_DO_WHILE_PATTERN, 0.1), - (_ELSE_DO_PATTERN, 0.1), - (_IF_THEN_DO_PATTERN, 0.1), - (_PROCEDURE_PATTERN, 0.5), - (_PARSE_ARG_PATTERN, 0.2), - ) - - def analyse_text(text): - """ - Check for inital comment and patterns that distinguish Rexx from other - C-like languages. - """ - if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): - # Header matches MVS Rexx requirements, this is certainly a Rexx - # script. - return 1.0 - elif text.startswith('/*'): - # Header matches general Rexx requirements; the source code might - # still be any language using C comments such as C++, C# or Java. - lowerText = text.lower() - result = sum(weight - for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS - if pattern.search(lowerText)) + 0.01 - return min(result, 1.0) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/text.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/text.py deleted file mode 100644 index de9979c..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/text.py +++ /dev/null @@ -1,1893 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.text - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for non-source code file types. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from bisect import bisect - -from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \ - bygroups, include, using, this, do_insertions -from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \ - Generic, Operator, Number, Whitespace, Literal -from pygments.util import get_bool_opt, ClassNotFound -from pygments.lexers.other import BashLexer - -__all__ = ['IniLexer', 'PropertiesLexer', 'SourcesListLexer', 'BaseMakefileLexer', - 'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer', - 'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer', - 'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer', - 'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer', - 'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer', 'HttpLexer', - 'PyPyLogLexer', 'RegeditLexer', 'HxmlLexer', 'EbnfLexer'] - - -class IniLexer(RegexLexer): - """ - Lexer for configuration files in INI style. - """ - - name = 'INI' - aliases = ['ini', 'cfg', 'dosini'] - filenames = ['*.ini', '*.cfg'] - mimetypes = ['text/x-ini'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'[;#].*', Comment.Single), - (r'\[.*?\]$', Keyword), - (r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)', - bygroups(Name.Attribute, Text, Operator, Text, String)) - ] - } - - def analyse_text(text): - npos = text.find('\n') - if npos < 3: - return False - return text[0] == '[' and text[npos-1] == ']' - - -class RegeditLexer(RegexLexer): - """ - Lexer for `Windows Registry - `_ files produced - by regedit. - - *New in Pygments 1.6.* - """ - - name = 'reg' - aliases = ['registry'] - filenames = ['*.reg'] - mimetypes = ['text/x-windows-registry'] - - tokens = { - 'root': [ - (r'Windows Registry Editor.*', Text), - (r'\s+', Text), - (r'[;#].*', Comment.Single), - (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$', - bygroups(Keyword, Operator, Name.Builtin, Keyword)), - # String keys, which obey somewhat normal escaping - (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)', - bygroups(Name.Attribute, Text, Operator, Text), - 'value'), - # Bare keys (includes @) - (r'(.*?)([ \t]*)(=)([ \t]*)', - bygroups(Name.Attribute, Text, Operator, Text), - 'value'), - ], - 'value': [ - (r'-', Operator, '#pop'), # delete value - (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)', - bygroups(Name.Variable, Punctuation, Number), '#pop'), - # As far as I know, .reg files do not support line continuation. - (r'.*', String, '#pop'), - ] - } - - def analyse_text(text): - return text.startswith('Windows Registry Editor') - - -class PropertiesLexer(RegexLexer): - """ - Lexer for configuration files in Java's properties format. - - *New in Pygments 1.4.* - """ - - name = 'Properties' - aliases = ['properties', 'jproperties'] - filenames = ['*.properties'] - mimetypes = ['text/x-java-properties'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'(?:[;#]|//).*$', Comment), - (r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)', - bygroups(Name.Attribute, Text, Operator, Text, String)), - ], - } - - -class SourcesListLexer(RegexLexer): - """ - Lexer that highlights debian sources.list files. - - *New in Pygments 0.7.* - """ - - name = 'Debian Sourcelist' - aliases = ['sourceslist', 'sources.list', 'debsources'] - filenames = ['sources.list'] - mimetype = ['application/x-debian-sourceslist'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'#.*?$', Comment), - (r'^(deb(?:-src)?)(\s+)', - bygroups(Keyword, Text), 'distribution') - ], - 'distribution': [ - (r'#.*?$', Comment, '#pop'), - (r'\$\(ARCH\)', Name.Variable), - (r'[^\s$[]+', String), - (r'\[', String.Other, 'escaped-distribution'), - (r'\$', String), - (r'\s+', Text, 'components') - ], - 'escaped-distribution': [ - (r'\]', String.Other, '#pop'), - (r'\$\(ARCH\)', Name.Variable), - (r'[^\]$]+', String.Other), - (r'\$', String.Other) - ], - 'components': [ - (r'#.*?$', Comment, '#pop:2'), - (r'$', Text, '#pop:2'), - (r'\s+', Text), - (r'\S+', Keyword.Pseudo), - ] - } - - def analyse_text(text): - for line in text.split('\n'): - line = line.strip() - if not (line.startswith('#') or line.startswith('deb ') or - line.startswith('deb-src ') or not line): - return False - return True - - -class MakefileLexer(Lexer): - """ - Lexer for BSD and GNU make extensions (lenient enough to handle both in - the same file even). - - *Rewritten in Pygments 0.10.* - """ - - name = 'Makefile' - aliases = ['make', 'makefile', 'mf', 'bsdmake'] - filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'] - mimetypes = ['text/x-makefile'] - - r_special = re.compile(r'^(?:' - # BSD Make - r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|' - # GNU Make - r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)') - r_comment = re.compile(r'^\s*@?#') - - def get_tokens_unprocessed(self, text): - ins = [] - lines = text.splitlines(True) - done = '' - lex = BaseMakefileLexer(**self.options) - backslashflag = False - for line in lines: - if self.r_special.match(line) or backslashflag: - ins.append((len(done), [(0, Comment.Preproc, line)])) - backslashflag = line.strip().endswith('\\') - elif self.r_comment.match(line): - ins.append((len(done), [(0, Comment, line)])) - else: - done += line - for item in do_insertions(ins, lex.get_tokens_unprocessed(done)): - yield item - - -class BaseMakefileLexer(RegexLexer): - """ - Lexer for simple Makefiles (no preprocessing). - - *New in Pygments 0.10.* - """ - - name = 'Base Makefile' - aliases = ['basemake'] - filenames = [] - mimetypes = [] - - tokens = { - 'root': [ - (r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)), - (r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)), - (r'\s+', Text), - (r'#.*?\n', Comment), - (r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)', - bygroups(Keyword, Text), 'export'), - (r'export\s+', Keyword), - # assignment - (r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)', - bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))), - # strings - (r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double), - (r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single), - # targets - (r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text), - 'block-header'), - # TODO: add paren handling (grr) - ], - 'export': [ - (r'[a-zA-Z0-9_${}-]+', Name.Variable), - (r'\n', Text, '#pop'), - (r'\s+', Text), - ], - 'block-header': [ - (r'[^,\\\n#]+', Number), - (r',', Punctuation), - (r'#.*?\n', Comment), - (r'\\\n', Text), # line continuation - (r'\\.', Text), - (r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'), - ], - } - - -class DiffLexer(RegexLexer): - """ - Lexer for unified or context-style diffs or patches. - """ - - name = 'Diff' - aliases = ['diff', 'udiff'] - filenames = ['*.diff', '*.patch'] - mimetypes = ['text/x-diff', 'text/x-patch'] - - tokens = { - 'root': [ - (r' .*\n', Text), - (r'\+.*\n', Generic.Inserted), - (r'-.*\n', Generic.Deleted), - (r'!.*\n', Generic.Strong), - (r'@.*\n', Generic.Subheading), - (r'([Ii]ndex|diff).*\n', Generic.Heading), - (r'=.*\n', Generic.Heading), - (r'.*\n', Text), - ] - } - - def analyse_text(text): - if text[:7] == 'Index: ': - return True - if text[:5] == 'diff ': - return True - if text[:4] == '--- ': - return 0.9 - - -DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move', - 'replace'] - -class DarcsPatchLexer(RegexLexer): - """ - DarcsPatchLexer is a lexer for the various versions of the darcs patch - format. Examples of this format are derived by commands such as - ``darcs annotate --patch`` and ``darcs send``. - - *New in Pygments 0.10.* - """ - name = 'Darcs Patch' - aliases = ['dpatch'] - filenames = ['*.dpatch', '*.darcspatch'] - - tokens = { - 'root': [ - (r'<', Operator), - (r'>', Operator), - (r'{', Operator), - (r'}', Operator), - (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])', - bygroups(Operator, Keyword, Name, Text, Name, Operator, - Literal.Date, Text, Operator)), - (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)', - bygroups(Operator, Keyword, Name, Text, Name, Operator, - Literal.Date, Text), 'comment'), - (r'New patches:', Generic.Heading), - (r'Context:', Generic.Heading), - (r'Patch bundle hash:', Generic.Heading), - (r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS), - bygroups(Text, Keyword, Text)), - (r'\+', Generic.Inserted, "insert"), - (r'-', Generic.Deleted, "delete"), - (r'.*\n', Text), - ], - 'comment': [ - (r'[^\]].*\n', Comment), - (r'\]', Operator, "#pop"), - ], - 'specialText': [ # darcs add [_CODE_] special operators for clarity - (r'\n', Text, "#pop"), # line-based - (r'\[_[^_]*_]', Operator), - ], - 'insert': [ - include('specialText'), - (r'\[', Generic.Inserted), - (r'[^\n\[]+', Generic.Inserted), - ], - 'delete': [ - include('specialText'), - (r'\[', Generic.Deleted), - (r'[^\n\[]+', Generic.Deleted), - ], - } - - -class IrcLogsLexer(RegexLexer): - """ - Lexer for IRC logs in *irssi*, *xchat* or *weechat* style. - """ - - name = 'IRC logs' - aliases = ['irc'] - filenames = ['*.weechatlog'] - mimetypes = ['text/x-irclog'] - - flags = re.VERBOSE | re.MULTILINE - timestamp = r""" - ( - # irssi / xchat and others - (?: \[|\()? # Opening bracket or paren for the timestamp - (?: # Timestamp - (?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits - [T ])? # Date/time separator: T or space - (?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits - ) - (?: \]|\))?\s+ # Closing bracket or paren for the timestamp - | - # weechat - \d{4}\s\w{3}\s\d{2}\s # Date - \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace - | - # xchat - \w{3}\s\d{2}\s # Date - \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace - )? - """ - tokens = { - 'root': [ - # log start/end - (r'^\*\*\*\*(.*)\*\*\*\*$', Comment), - # hack - ("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)), - # normal msgs - ("^" + timestamp + r""" - (\s*<.*?>\s*) # Nick """, - bygroups(Comment.Preproc, Name.Tag), 'msg'), - # /me msgs - ("^" + timestamp + r""" - (\s*[*]\s+) # Star - (\S+\s+.*?\n) # Nick + rest of message """, - bygroups(Comment.Preproc, Keyword, Generic.Inserted)), - # join/part msgs - ("^" + timestamp + r""" - (\s*(?:\*{3}|?)\s*) # Star(s) or symbols - (\S+\s+) # Nick + Space - (.*?\n) # Rest of message """, - bygroups(Comment.Preproc, Keyword, String, Comment)), - (r"^.*?\n", Text), - ], - 'msg': [ - (r"\S+:(?!//)", Name.Attribute), # Prefix - (r".*\n", Text, '#pop'), - ], - } - - -class BBCodeLexer(RegexLexer): - """ - A lexer that highlights BBCode(-like) syntax. - - *New in Pygments 0.6.* - """ - - name = 'BBCode' - aliases = ['bbcode'] - mimetypes = ['text/x-bbcode'] - - tokens = { - 'root': [ - (r'[^[]+', Text), - # tag/end tag begin - (r'\[/?\w+', Keyword, 'tag'), - # stray bracket - (r'\[', Text), - ], - 'tag': [ - (r'\s+', Text), - # attribute with value - (r'(\w+)(=)("?[^\s"\]]+"?)', - bygroups(Name.Attribute, Operator, String)), - # tag argument (a la [color=green]) - (r'(=)("?[^\s"\]]+"?)', - bygroups(Operator, String)), - # tag end - (r'\]', Keyword, '#pop'), - ], - } - - -class TexLexer(RegexLexer): - """ - Lexer for the TeX and LaTeX typesetting languages. - """ - - name = 'TeX' - aliases = ['tex', 'latex'] - filenames = ['*.tex', '*.aux', '*.toc'] - mimetypes = ['text/x-tex', 'text/x-latex'] - - tokens = { - 'general': [ - (r'%.*?\n', Comment), - (r'[{}]', Name.Builtin), - (r'[&_^]', Name.Builtin), - ], - 'root': [ - (r'\\\[', String.Backtick, 'displaymath'), - (r'\\\(', String, 'inlinemath'), - (r'\$\$', String.Backtick, 'displaymath'), - (r'\$', String, 'inlinemath'), - (r'\\([a-zA-Z]+|.)', Keyword, 'command'), - include('general'), - (r'[^\\$%&_^{}]+', Text), - ], - 'math': [ - (r'\\([a-zA-Z]+|.)', Name.Variable), - include('general'), - (r'[0-9]+', Number), - (r'[-=!+*/()\[\]]', Operator), - (r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin), - ], - 'inlinemath': [ - (r'\\\)', String, '#pop'), - (r'\$', String, '#pop'), - include('math'), - ], - 'displaymath': [ - (r'\\\]', String, '#pop'), - (r'\$\$', String, '#pop'), - (r'\$', Name.Builtin), - include('math'), - ], - 'command': [ - (r'\[.*?\]', Name.Attribute), - (r'\*', Keyword), - (r'', Text, '#pop'), - ], - } - - def analyse_text(text): - for start in ("\\documentclass", "\\input", "\\documentstyle", - "\\relax"): - if text[:len(start)] == start: - return True - - -class GroffLexer(RegexLexer): - """ - Lexer for the (g)roff typesetting language, supporting groff - extensions. Mainly useful for highlighting manpage sources. - - *New in Pygments 0.6.* - """ - - name = 'Groff' - aliases = ['groff', 'nroff', 'man'] - filenames = ['*.[1234567]', '*.man'] - mimetypes = ['application/x-troff', 'text/troff'] - - tokens = { - 'root': [ - (r'(\.)(\w+)', bygroups(Text, Keyword), 'request'), - (r'\.', Punctuation, 'request'), - # Regular characters, slurp till we find a backslash or newline - (r'[^\\\n]*', Text, 'textline'), - ], - 'textline': [ - include('escapes'), - (r'[^\\\n]+', Text), - (r'\n', Text, '#pop'), - ], - 'escapes': [ - # groff has many ways to write escapes. - (r'\\"[^\n]*', Comment), - (r'\\[fn]\w', String.Escape), - (r'\\\(.{2}', String.Escape), - (r'\\.\[.*\]', String.Escape), - (r'\\.', String.Escape), - (r'\\\n', Text, 'request'), - ], - 'request': [ - (r'\n', Text, '#pop'), - include('escapes'), - (r'"[^\n"]+"', String.Double), - (r'\d+', Number), - (r'\S+', String), - (r'\s+', Text), - ], - } - - def analyse_text(text): - if text[:1] != '.': - return False - if text[:3] == '.\\"': - return True - if text[:4] == '.TH ': - return True - if text[1:3].isalnum() and text[3].isspace(): - return 0.9 - - -class ApacheConfLexer(RegexLexer): - """ - Lexer for configuration files following the Apache config file - format. - - *New in Pygments 0.6.* - """ - - name = 'ApacheConf' - aliases = ['apacheconf', 'aconf', 'apache'] - filenames = ['.htaccess', 'apache.conf', 'apache2.conf'] - mimetypes = ['text/x-apacheconf'] - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'\s+', Text), - (r'(#.*?)$', Comment), - (r'(<[^\s>]+)(?:(\s+)(.*?))?(>)', - bygroups(Name.Tag, Text, String, Name.Tag)), - (r'([a-zA-Z][a-zA-Z0-9_]*)(\s+)', - bygroups(Name.Builtin, Text), 'value'), - (r'\.+', Text), - ], - 'value': [ - (r'$', Text, '#pop'), - (r'[^\S\n]+', Text), - (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), - (r'\d+', Number), - (r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other), - (r'(on|off|none|any|all|double|email|dns|min|minimal|' - r'os|productonly|full|emerg|alert|crit|error|warn|' - r'notice|info|debug|registry|script|inetd|standalone|' - r'user|group)\b', Keyword), - (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), - (r'[^\s"]+', Text) - ] - } - - -class MoinWikiLexer(RegexLexer): - """ - For MoinMoin (and Trac) Wiki markup. - - *New in Pygments 0.7.* - """ - - name = 'MoinMoin/Trac Wiki markup' - aliases = ['trac-wiki', 'moin'] - filenames = [] - mimetypes = ['text/x-trac-wiki'] - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'^#.*$', Comment), - (r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next - # Titles - (r'^(=+)([^=]+)(=+)(\s*#.+)?$', - bygroups(Generic.Heading, using(this), Generic.Heading, String)), - # Literal code blocks, with optional shebang - (r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'), - (r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting - # Lists - (r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)), - (r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)), - # Other Formatting - (r'\[\[\w+.*?\]\]', Keyword), # Macro - (r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])', - bygroups(Keyword, String, Keyword)), # Link - (r'^----+$', Keyword), # Horizontal rules - (r'[^\n\'\[{!_~^,|]+', Text), - (r'\n', Text), - (r'.', Text), - ], - 'codeblock': [ - (r'}}}', Name.Builtin, '#pop'), - # these blocks are allowed to be nested in Trac, but not MoinMoin - (r'{{{', Text, '#push'), - (r'[^{}]+', Comment.Preproc), # slurp boring text - (r'.', Comment.Preproc), # allow loose { or } - ], - } - - -class RstLexer(RegexLexer): - """ - For `reStructuredText `_ markup. - - *New in Pygments 0.7.* - - Additional options accepted: - - `handlecodeblocks` - Highlight the contents of ``.. sourcecode:: langauge`` and - ``.. code:: language`` directives with a lexer for the given - language (default: ``True``). *New in Pygments 0.8.* - """ - name = 'reStructuredText' - aliases = ['rst', 'rest', 'restructuredtext'] - filenames = ['*.rst', '*.rest'] - mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"] - flags = re.MULTILINE - - def _handle_sourcecode(self, match): - from pygments.lexers import get_lexer_by_name - - # section header - yield match.start(1), Punctuation, match.group(1) - yield match.start(2), Text, match.group(2) - yield match.start(3), Operator.Word, match.group(3) - yield match.start(4), Punctuation, match.group(4) - yield match.start(5), Text, match.group(5) - yield match.start(6), Keyword, match.group(6) - yield match.start(7), Text, match.group(7) - - # lookup lexer if wanted and existing - lexer = None - if self.handlecodeblocks: - try: - lexer = get_lexer_by_name(match.group(6).strip()) - except ClassNotFound: - pass - indention = match.group(8) - indention_size = len(indention) - code = (indention + match.group(9) + match.group(10) + match.group(11)) - - # no lexer for this language. handle it like it was a code block - if lexer is None: - yield match.start(8), String, code - return - - # highlight the lines with the lexer. - ins = [] - codelines = code.splitlines(True) - code = '' - for line in codelines: - if len(line) > indention_size: - ins.append((len(code), [(0, Text, line[:indention_size])])) - code += line[indention_size:] - else: - code += line - for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)): - yield item - - # from docutils.parsers.rst.states - closers = u'\'")]}>\u2019\u201d\xbb!?' - unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0' - end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))' - % (re.escape(unicode_delimiters), - re.escape(closers))) - - tokens = { - 'root': [ - # Heading with overline - (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)' - r'(.+)(\n)(\1)(\n)', - bygroups(Generic.Heading, Text, Generic.Heading, - Text, Generic.Heading, Text)), - # Plain heading - (r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|' - r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)', - bygroups(Generic.Heading, Text, Generic.Heading, Text)), - # Bulleted lists - (r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)', - bygroups(Text, Number, using(this, state='inline'))), - # Numbered lists - (r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)', - bygroups(Text, Number, using(this, state='inline'))), - (r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)', - bygroups(Text, Number, using(this, state='inline'))), - # Numbered, but keep words at BOL from becoming lists - (r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)', - bygroups(Text, Number, using(this, state='inline'))), - (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)', - bygroups(Text, Number, using(this, state='inline'))), - # Line blocks - (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)', - bygroups(Text, Operator, using(this, state='inline'))), - # Sourcecode directives - (r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)' - r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)', - _handle_sourcecode), - # A directive - (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', - bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, - using(this, state='inline'))), - # A reference target - (r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$', - bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), - # A footnote/citation target - (r'^( *\.\.)(\s*)(\[.+\])(.*?)$', - bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), - # A substitution def - (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', - bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word, - Punctuation, Text, using(this, state='inline'))), - # Comments - (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc), - # Field list - (r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)), - (r'^( *)(:.*?:)([ \t]+)(.*?)$', - bygroups(Text, Name.Class, Text, Name.Function)), - # Definition list - (r'^([^ ].*(?)(`__?)', # reference with inline target - bygroups(String, String.Interpol, String)), - (r'`.+?`__?', String), # reference - (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?', - bygroups(Name.Variable, Name.Attribute)), # role - (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)', - bygroups(Name.Attribute, Name.Variable)), # role (content first) - (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis - (r'\*.+?\*', Generic.Emph), # Emphasis - (r'\[.*?\]_', String), # Footnote or citation - (r'<.+?>', Name.Tag), # Hyperlink - (r'[^\\\n\[*`:]+', Text), - (r'.', Text), - ], - 'literal': [ - (r'[^`]+', String), - (r'``' + end_string_suffix, String, '#pop'), - (r'`', String), - ] - } - - def __init__(self, **options): - self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) - RegexLexer.__init__(self, **options) - - def analyse_text(text): - if text[:2] == '..' and text[2:3] != '.': - return 0.3 - p1 = text.find("\n") - p2 = text.find("\n", p1 + 1) - if (p2 > -1 and # has two lines - p1 * 2 + 1 == p2 and # they are the same length - text[p1+1] in '-=' and # the next line both starts and ends with - text[p1+1] == text[p2-1]): # ...a sufficiently high header - return 0.5 - - -class VimLexer(RegexLexer): - """ - Lexer for VimL script files. - - *New in Pygments 0.8.* - """ - name = 'VimL' - aliases = ['vim'] - filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc', - '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'] - mimetypes = ['text/x-vim'] - flags = re.MULTILINE - - tokens = { - 'root': [ - (r'^\s*".*', Comment), - - (r'[ \t]+', Text), - # TODO: regexes can have other delims - (r'/(\\\\|\\/|[^\n/])*/', String.Regex), - (r'"(\\\\|\\"|[^\n"])*"', String.Double), - (r"'(\\\\|\\'|[^\n'])*'", String.Single), - - # Who decided that doublequote was a good comment character?? - (r'(?<=\s)"[^\-:.%#=*].*', Comment), - (r'-?\d+', Number), - (r'#[0-9a-f]{6}', Number.Hex), - (r'^:', Punctuation), - (r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent. - (r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b', - Keyword), - (r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin), - (r'\b\w+\b', Name.Other), # These are postprocessed below - (r'.', Text), - ], - } - def __init__(self, **options): - from pygments.lexers._vimbuiltins import command, option, auto - self._cmd = command - self._opt = option - self._aut = auto - - RegexLexer.__init__(self, **options) - - def is_in(self, w, mapping): - r""" - It's kind of difficult to decide if something might be a keyword - in VimL because it allows you to abbreviate them. In fact, - 'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are - valid ways to call it so rather than making really awful regexps - like:: - - \bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b - - we match `\b\w+\b` and then call is_in() on those tokens. See - `scripts/get_vimkw.py` for how the lists are extracted. - """ - p = bisect(mapping, (w,)) - if p > 0: - if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \ - mapping[p-1][1][:len(w)] == w: return True - if p < len(mapping): - return mapping[p][0] == w[:len(mapping[p][0])] and \ - mapping[p][1][:len(w)] == w - return False - - def get_tokens_unprocessed(self, text): - # TODO: builtins are only subsequent tokens on lines - # and 'keywords' only happen at the beginning except - # for :au ones - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - if token is Name.Other: - if self.is_in(value, self._cmd): - yield index, Keyword, value - elif self.is_in(value, self._opt) or \ - self.is_in(value, self._aut): - yield index, Name.Builtin, value - else: - yield index, Text, value - else: - yield index, token, value - - -class GettextLexer(RegexLexer): - """ - Lexer for Gettext catalog files. - - *New in Pygments 0.9.* - """ - name = 'Gettext Catalog' - aliases = ['pot', 'po'] - filenames = ['*.pot', '*.po'] - mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext'] - - tokens = { - 'root': [ - (r'^#,\s.*?$', Keyword.Type), - (r'^#:\s.*?$', Keyword.Declaration), - #(r'^#$', Comment), - (r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single), - (r'^(")([A-Za-z-]+:)(.*")$', - bygroups(String, Name.Property, String)), - (r'^".*"$', String), - (r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$', - bygroups(Name.Variable, Text, String)), - (r'^(msgstr\[)(\d)(\])(\s+)(".*")$', - bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)), - ] - } - - -class SquidConfLexer(RegexLexer): - """ - Lexer for `squid `_ configuration files. - - *New in Pygments 0.9.* - """ - - name = 'SquidConf' - aliases = ['squidconf', 'squid.conf', 'squid'] - filenames = ['squid.conf'] - mimetypes = ['text/x-squidconf'] - flags = re.IGNORECASE - - keywords = [ - "access_log", "acl", "always_direct", "announce_host", - "announce_period", "announce_port", "announce_to", "anonymize_headers", - "append_domain", "as_whois_server", "auth_param_basic", - "authenticate_children", "authenticate_program", "authenticate_ttl", - "broken_posts", "buffered_logs", "cache_access_log", "cache_announce", - "cache_dir", "cache_dns_program", "cache_effective_group", - "cache_effective_user", "cache_host", "cache_host_acl", - "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high", - "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer", - "cache_peer_access", "cahce_replacement_policy", "cache_stoplist", - "cache_stoplist_pattern", "cache_store_log", "cache_swap", - "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db", - "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir", - "dead_peer_timeout", "debug_options", "delay_access", "delay_class", - "delay_initial_bucket_level", "delay_parameters", "delay_pools", - "deny_info", "dns_children", "dns_defnames", "dns_nameservers", - "dns_testnames", "emulate_httpd_log", "err_html_text", - "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port", - "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width", - "ftp_passive", "ftp_user", "half_closed_clients", "header_access", - "header_replace", "hierarchy_stoplist", "high_response_time_warning", - "high_page_fault_warning", "hosts_file", "htcp_port", "http_access", - "http_anonymizer", "httpd_accel", "httpd_accel_host", - "httpd_accel_port", "httpd_accel_uses_host_header", - "httpd_accel_with_proxy", "http_port", "http_reply_access", - "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout", - "ident_lookup", "ident_lookup_access", "ident_timeout", - "incoming_http_average", "incoming_icp_average", "inside_firewall", - "ipcache_high", "ipcache_low", "ipcache_size", "local_domain", - "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries", - "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries", - "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr", - "mcast_miss_encode_key", "mcast_miss_port", "memory_pools", - "memory_pools_limit", "memory_replacement_policy", "mime_table", - "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops", - "minimum_object_size", "minimum_retry_timeout", "miss_access", - "negative_dns_ttl", "negative_ttl", "neighbor_timeout", - "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period", - "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy", - "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl", - "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp", - "quick_abort", "quick_abort", "quick_abort_max", "quick_abort_min", - "quick_abort_pct", "range_offset_limit", "read_timeout", - "redirect_children", "redirect_program", - "redirect_rewrites_host_header", "reference_age", "reference_age", - "refresh_pattern", "reload_into_ims", "request_body_max_size", - "request_size", "request_timeout", "shutdown_lifetime", - "single_parent_bypass", "siteselect_timeout", "snmp_access", - "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy", - "store_avg_object_size", "store_objects_per_bucket", - "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs", - "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize", - "test_reachability", "udp_hit_obj", "udp_hit_obj_size", - "udp_incoming_address", "udp_outgoing_address", "unique_hostname", - "unlinkd_program", "uri_whitespace", "useragent_log", - "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port", - ] - - opts = [ - "proxy-only", "weight", "ttl", "no-query", "default", "round-robin", - "multicast-responder", "on", "off", "all", "deny", "allow", "via", - "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2", - "credentialsttl", "none", "disable", "offline_toggle", "diskd", - ] - - actions = [ - "shutdown", "info", "parameter", "server_list", "client_list", - r'squid\.conf', - ] - - actions_stats = [ - "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns", - "redirector", "io", "reply_headers", "filedescriptors", "netdb", - ] - - actions_log = ["status", "enable", "disable", "clear"] - - acls = [ - "url_regex", "urlpath_regex", "referer_regex", "port", "proto", - "req_mime_type", "rep_mime_type", "method", "browser", "user", "src", - "dst", "time", "dstdomain", "ident", "snmp_community", - ] - - ip_re = ( - r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|' - r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|' - r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|' - r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?`` outputs. - - *New in Pygments 0.9.* - """ - name = 'Debian Control file' - aliases = ['control', 'debcontrol'] - filenames = ['control'] - - tokens = { - 'root': [ - (r'^(Description)', Keyword, 'description'), - (r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'), - (r'^((Build-)?Depends)', Keyword, 'depends'), - (r'^((?:Python-)?Version)(:\s*)(\S+)$', - bygroups(Keyword, Text, Number)), - (r'^((?:Installed-)?Size)(:\s*)(\S+)$', - bygroups(Keyword, Text, Number)), - (r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$', - bygroups(Keyword, Text, Number)), - (r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$', - bygroups(Keyword, Whitespace, String)), - ], - 'maintainer': [ - (r'<[^>]+>', Generic.Strong), - (r'<[^>]+>$', Generic.Strong, '#pop'), - (r',\n?', Text), - (r'.', Text), - ], - 'description': [ - (r'(.*)(Homepage)(: )(\S+)', - bygroups(Text, String, Name, Name.Class)), - (r':.*\n', Generic.Strong), - (r' .*\n', Text), - ('', Text, '#pop'), - ], - 'depends': [ - (r':\s*', Text), - (r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)), - (r'\(', Text, 'depend_vers'), - (r',', Text), - (r'\|', Operator), - (r'[\s]+', Text), - (r'[}\)]\s*$', Text, '#pop'), - (r'}', Text), - (r'[^,]$', Name.Function, '#pop'), - (r'([\+\.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)), - (r'\[.*?\]', Name.Entity), - ], - 'depend_vers': [ - (r'\),', Text, '#pop'), - (r'\)[^,]', Text, '#pop:2'), - (r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number)) - ] - } - - -class YamlLexerContext(LexerContext): - """Indentation context for the YAML lexer.""" - - def __init__(self, *args, **kwds): - super(YamlLexerContext, self).__init__(*args, **kwds) - self.indent_stack = [] - self.indent = -1 - self.next_indent = 0 - self.block_scalar_indent = None - - -class YamlLexer(ExtendedRegexLexer): - """ - Lexer for `YAML `_, a human-friendly data serialization - language. - - *New in Pygments 0.11.* - """ - - name = 'YAML' - aliases = ['yaml'] - filenames = ['*.yaml', '*.yml'] - mimetypes = ['text/x-yaml'] - - - def something(token_class): - """Do not produce empty tokens.""" - def callback(lexer, match, context): - text = match.group() - if not text: - return - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def reset_indent(token_class): - """Reset the indentation levels.""" - def callback(lexer, match, context): - text = match.group() - context.indent_stack = [] - context.indent = -1 - context.next_indent = 0 - context.block_scalar_indent = None - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def save_indent(token_class, start=False): - """Save a possible indentation level.""" - def callback(lexer, match, context): - text = match.group() - extra = '' - if start: - context.next_indent = len(text) - if context.next_indent < context.indent: - while context.next_indent < context.indent: - context.indent = context.indent_stack.pop() - if context.next_indent > context.indent: - extra = text[context.indent:] - text = text[:context.indent] - else: - context.next_indent += len(text) - if text: - yield match.start(), token_class, text - if extra: - yield match.start()+len(text), token_class.Error, extra - context.pos = match.end() - return callback - - def set_indent(token_class, implicit=False): - """Set the previously saved indentation level.""" - def callback(lexer, match, context): - text = match.group() - if context.indent < context.next_indent: - context.indent_stack.append(context.indent) - context.indent = context.next_indent - if not implicit: - context.next_indent += len(text) - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def set_block_scalar_indent(token_class): - """Set an explicit indentation level for a block scalar.""" - def callback(lexer, match, context): - text = match.group() - context.block_scalar_indent = None - if not text: - return - increment = match.group(1) - if increment: - current_indent = max(context.indent, 0) - increment = int(increment) - context.block_scalar_indent = current_indent + increment - if text: - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def parse_block_scalar_empty_line(indent_token_class, content_token_class): - """Process an empty line in a block scalar.""" - def callback(lexer, match, context): - text = match.group() - if (context.block_scalar_indent is None or - len(text) <= context.block_scalar_indent): - if text: - yield match.start(), indent_token_class, text - else: - indentation = text[:context.block_scalar_indent] - content = text[context.block_scalar_indent:] - yield match.start(), indent_token_class, indentation - yield (match.start()+context.block_scalar_indent, - content_token_class, content) - context.pos = match.end() - return callback - - def parse_block_scalar_indent(token_class): - """Process indentation spaces in a block scalar.""" - def callback(lexer, match, context): - text = match.group() - if context.block_scalar_indent is None: - if len(text) <= max(context.indent, 0): - context.stack.pop() - context.stack.pop() - return - context.block_scalar_indent = len(text) - else: - if len(text) < context.block_scalar_indent: - context.stack.pop() - context.stack.pop() - return - if text: - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def parse_plain_scalar_indent(token_class): - """Process indentation spaces in a plain scalar.""" - def callback(lexer, match, context): - text = match.group() - if len(text) <= context.indent: - context.stack.pop() - context.stack.pop() - return - if text: - yield match.start(), token_class, text - context.pos = match.end() - return callback - - - - tokens = { - # the root rules - 'root': [ - # ignored whitespaces - (r'[ ]+(?=#|$)', Text), - # line breaks - (r'\n+', Text), - # a comment - (r'#[^\n]*', Comment.Single), - # the '%YAML' directive - (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'), - # the %TAG directive - (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'), - # document start and document end indicators - (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace), - 'block-line'), - # indentation spaces - (r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True), - ('block-line', 'indentation')), - ], - - # trailing whitespaces after directives or a block scalar indicator - 'ignored-line': [ - # ignored whitespaces - (r'[ ]+(?=#|$)', Text), - # a comment - (r'#[^\n]*', Comment.Single), - # line break - (r'\n', Text, '#pop:2'), - ], - - # the %YAML directive - 'yaml-directive': [ - # the version number - (r'([ ]+)([0-9]+\.[0-9]+)', - bygroups(Text, Number), 'ignored-line'), - ], - - # the %YAG directive - 'tag-directive': [ - # a tag handle and the corresponding prefix - (r'([ ]+)(!|![0-9A-Za-z_-]*!)' - r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)', - bygroups(Text, Keyword.Type, Text, Keyword.Type), - 'ignored-line'), - ], - - # block scalar indicators and indentation spaces - 'indentation': [ - # trailing whitespaces are ignored - (r'[ ]*$', something(Text), '#pop:2'), - # whitespaces preceeding block collection indicators - (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)), - # block collection indicators - (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)), - # the beginning a block line - (r'[ ]*', save_indent(Text), '#pop'), - ], - - # an indented line in the block context - 'block-line': [ - # the line end - (r'[ ]*(?=#|$)', something(Text), '#pop'), - # whitespaces separating tokens - (r'[ ]+', Text), - # tags, anchors and aliases, - include('descriptors'), - # block collections and scalars - include('block-nodes'), - # flow collections and quoted scalars - include('flow-nodes'), - # a plain scalar - (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])', - something(Name.Variable), - 'plain-scalar-in-block-context'), - ], - - # tags, anchors, aliases - 'descriptors' : [ - # a full-form tag - (r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type), - # a tag in the form '!', '!suffix' or '!handle!suffix' - (r'!(?:[0-9A-Za-z_-]+)?' - r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type), - # an anchor - (r'&[0-9A-Za-z_-]+', Name.Label), - # an alias - (r'\*[0-9A-Za-z_-]+', Name.Variable), - ], - - # block collections and scalars - 'block-nodes': [ - # implicit key - (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)), - # literal and folded scalars - (r'[|>]', Punctuation.Indicator, - ('block-scalar-content', 'block-scalar-header')), - ], - - # flow collections and quoted scalars - 'flow-nodes': [ - # a flow sequence - (r'\[', Punctuation.Indicator, 'flow-sequence'), - # a flow mapping - (r'\{', Punctuation.Indicator, 'flow-mapping'), - # a single-quoted scalar - (r'\'', String, 'single-quoted-scalar'), - # a double-quoted scalar - (r'\"', String, 'double-quoted-scalar'), - ], - - # the content of a flow collection - 'flow-collection': [ - # whitespaces - (r'[ ]+', Text), - # line breaks - (r'\n+', Text), - # a comment - (r'#[^\n]*', Comment.Single), - # simple indicators - (r'[?:,]', Punctuation.Indicator), - # tags, anchors and aliases - include('descriptors'), - # nested collections and quoted scalars - include('flow-nodes'), - # a plain scalar - (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])', - something(Name.Variable), - 'plain-scalar-in-flow-context'), - ], - - # a flow sequence indicated by '[' and ']' - 'flow-sequence': [ - # include flow collection rules - include('flow-collection'), - # the closing indicator - (r'\]', Punctuation.Indicator, '#pop'), - ], - - # a flow mapping indicated by '{' and '}' - 'flow-mapping': [ - # include flow collection rules - include('flow-collection'), - # the closing indicator - (r'\}', Punctuation.Indicator, '#pop'), - ], - - # block scalar lines - 'block-scalar-content': [ - # line break - (r'\n', Text), - # empty line - (r'^[ ]+$', - parse_block_scalar_empty_line(Text, Name.Constant)), - # indentation spaces (we may leave the state here) - (r'^[ ]*', parse_block_scalar_indent(Text)), - # line content - (r'[^\n\r\f\v]+', Name.Constant), - ], - - # the content of a literal or folded scalar - 'block-scalar-header': [ - # indentation indicator followed by chomping flag - (r'([1-9])?[+-]?(?=[ ]|$)', - set_block_scalar_indent(Punctuation.Indicator), - 'ignored-line'), - # chomping flag followed by indentation indicator - (r'[+-]?([1-9])?(?=[ ]|$)', - set_block_scalar_indent(Punctuation.Indicator), - 'ignored-line'), - ], - - # ignored and regular whitespaces in quoted scalars - 'quoted-scalar-whitespaces': [ - # leading and trailing whitespaces are ignored - (r'^[ ]+', Text), - (r'[ ]+$', Text), - # line breaks are ignored - (r'\n+', Text), - # other whitespaces are a part of the value - (r'[ ]+', Name.Variable), - ], - - # single-quoted scalars - 'single-quoted-scalar': [ - # include whitespace and line break rules - include('quoted-scalar-whitespaces'), - # escaping of the quote character - (r'\'\'', String.Escape), - # regular non-whitespace characters - (r'[^ \t\n\r\f\v\']+', String), - # the closing quote - (r'\'', String, '#pop'), - ], - - # double-quoted scalars - 'double-quoted-scalar': [ - # include whitespace and line break rules - include('quoted-scalar-whitespaces'), - # escaping of special characters - (r'\\[0abt\tn\nvfre "\\N_LP]', String), - # escape codes - (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})', - String.Escape), - # regular non-whitespace characters - (r'[^ \t\n\r\f\v\"\\]+', String), - # the closing quote - (r'"', String, '#pop'), - ], - - # the beginning of a new line while scanning a plain scalar - 'plain-scalar-in-block-context-new-line': [ - # empty lines - (r'^[ ]+$', Text), - # line breaks - (r'\n+', Text), - # document start and document end indicators - (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'), - # indentation spaces (we may leave the block line state here) - (r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'), - ], - - # a plain scalar in the block context - 'plain-scalar-in-block-context': [ - # the scalar ends with the ':' indicator - (r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'), - # the scalar ends with whitespaces followed by a comment - (r'[ ]+(?=#)', Text, '#pop'), - # trailing whitespaces are ignored - (r'[ ]+$', Text), - # line breaks are ignored - (r'\n+', Text, 'plain-scalar-in-block-context-new-line'), - # other whitespaces are a part of the value - (r'[ ]+', Literal.Scalar.Plain), - # regular non-whitespace characters - (r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain), - ], - - # a plain scalar is the flow context - 'plain-scalar-in-flow-context': [ - # the scalar ends with an indicator character - (r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'), - # the scalar ends with a comment - (r'[ ]+(?=#)', Text, '#pop'), - # leading and trailing whitespaces are ignored - (r'^[ ]+', Text), - (r'[ ]+$', Text), - # line breaks are ignored - (r'\n+', Text), - # other whitespaces are a part of the value - (r'[ ]+', Name.Variable), - # regular non-whitespace characters - (r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable), - ], - - } - - def get_tokens_unprocessed(self, text=None, context=None): - if context is None: - context = YamlLexerContext(text, 0) - return super(YamlLexer, self).get_tokens_unprocessed(text, context) - - -class LighttpdConfLexer(RegexLexer): - """ - Lexer for `Lighttpd `_ configuration files. - - *New in Pygments 0.11.* - """ - name = 'Lighttpd configuration file' - aliases = ['lighty', 'lighttpd'] - filenames = [] - mimetypes = ['text/x-lighttpd-conf'] - - tokens = { - 'root': [ - (r'#.*\n', Comment.Single), - (r'/\S*', Name), # pathname - (r'[a-zA-Z._-]+', Keyword), - (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), - (r'[0-9]+', Number), - (r'=>|=~|\+=|==|=|\+', Operator), - (r'\$[A-Z]+', Name.Builtin), - (r'[(){}\[\],]', Punctuation), - (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), - (r'\s+', Text), - ], - - } - - -class NginxConfLexer(RegexLexer): - """ - Lexer for `Nginx `_ configuration files. - - *New in Pygments 0.11.* - """ - name = 'Nginx configuration file' - aliases = ['nginx'] - filenames = [] - mimetypes = ['text/x-nginx-conf'] - - tokens = { - 'root': [ - (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)), - (r'[^\s;#]+', Keyword, 'stmt'), - include('base'), - ], - 'block': [ - (r'}', Punctuation, '#pop:2'), - (r'[^\s;#]+', Keyword.Namespace, 'stmt'), - include('base'), - ], - 'stmt': [ - (r'{', Punctuation, 'block'), - (r';', Punctuation, '#pop'), - include('base'), - ], - 'base': [ - (r'#.*\n', Comment.Single), - (r'on|off', Name.Constant), - (r'\$[^\s;#()]+', Name.Variable), - (r'([a-z0-9.-]+)(:)([0-9]+)', - bygroups(Name, Punctuation, Number.Integer)), - (r'[a-z-]+/[a-z-+]+', String), # mimetype - #(r'[a-zA-Z._-]+', Keyword), - (r'[0-9]+[km]?\b', Number.Integer), - (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)), - (r'[:=~]', Punctuation), - (r'[^\s;#{}$]+', String), # catch all - (r'/[^\s;#]*', Name), # pathname - (r'\s+', Text), - (r'[$;]', Text), # leftover characters - ], - } - - -class CMakeLexer(RegexLexer): - """ - Lexer for `CMake `_ files. - - *New in Pygments 1.2.* - """ - name = 'CMake' - aliases = ['cmake'] - filenames = ['*.cmake', 'CMakeLists.txt'] - mimetypes = ['text/x-cmake'] - - tokens = { - 'root': [ - #(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|' - # r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|' - # r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|' - # r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|' - # r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|' - # r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|' - # r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|' - # r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|' - # r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|' - # r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|' - # r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|' - # r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|' - # r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|' - # r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|' - # r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|' - # r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|' - # r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|' - # r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|' - # r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|' - # r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|' - # r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|' - # r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|' - # r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|' - # r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|' - # r'VTK_WRAP_TCL|WHILE|WRITE_FILE|' - # r'COUNTARGS)\b', Name.Builtin, 'args'), - (r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Text, - Punctuation), 'args'), - include('keywords'), - include('ws') - ], - 'args': [ - (r'\(', Punctuation, '#push'), - (r'\)', Punctuation, '#pop'), - (r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)), - (r'(?s)".*?"', String.Double), - (r'\\\S+', String), - (r'[^\)$"# \t\n]+', String), - (r'\n', Text), # explicitly legal - include('keywords'), - include('ws') - ], - 'string': [ - - ], - 'keywords': [ - (r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|' - r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword), - ], - 'ws': [ - (r'[ \t]+', Text), - (r'#.+\n', Comment), - ] - } - - -class HttpLexer(RegexLexer): - """ - Lexer for HTTP sessions. - - *New in Pygments 1.5.* - """ - - name = 'HTTP' - aliases = ['http'] - - flags = re.DOTALL - - def header_callback(self, match): - if match.group(1).lower() == 'content-type': - content_type = match.group(5).strip() - if ';' in content_type: - content_type = content_type[:content_type.find(';')].strip() - self.content_type = content_type - yield match.start(1), Name.Attribute, match.group(1) - yield match.start(2), Text, match.group(2) - yield match.start(3), Operator, match.group(3) - yield match.start(4), Text, match.group(4) - yield match.start(5), Literal, match.group(5) - yield match.start(6), Text, match.group(6) - - def continuous_header_callback(self, match): - yield match.start(1), Text, match.group(1) - yield match.start(2), Literal, match.group(2) - yield match.start(3), Text, match.group(3) - - def content_callback(self, match): - content_type = getattr(self, 'content_type', None) - content = match.group() - offset = match.start() - if content_type: - from pygments.lexers import get_lexer_for_mimetype - try: - lexer = get_lexer_for_mimetype(content_type) - except ClassNotFound: - pass - else: - for idx, token, value in lexer.get_tokens_unprocessed(content): - yield offset + idx, token, value - return - yield offset, Text, content - - tokens = { - 'root': [ - (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)' - r'(HTTP)(/)(1\.[01])(\r?\n|$)', - bygroups(Name.Function, Text, Name.Namespace, Text, - Keyword.Reserved, Operator, Number, Text), - 'headers'), - (r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)', - bygroups(Keyword.Reserved, Operator, Number, Text, Number, - Text, Name.Exception, Text), - 'headers'), - ], - 'headers': [ - (r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)', header_callback), - (r'([\t ]+)([^\r\n]+)(\r?\n|$)', continuous_header_callback), - (r'\r?\n', Text, 'content') - ], - 'content': [ - (r'.+', content_callback) - ] - } - - -class PyPyLogLexer(RegexLexer): - """ - Lexer for PyPy log files. - - *New in Pygments 1.5.* - """ - name = "PyPy Log" - aliases = ["pypylog", "pypy"] - filenames = ["*.pypylog"] - mimetypes = ['application/x-pypylog'] - - tokens = { - "root": [ - (r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"), - (r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"), - include("extra-stuff"), - ], - "jit-log": [ - (r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"), - (r"^\+\d+: ", Comment), - (r"--end of the loop--", Comment), - (r"[ifp]\d+", Name), - (r"ptr\d+", Name), - (r"(\()(\w+(?:\.\w+)?)(\))", - bygroups(Punctuation, Name.Builtin, Punctuation)), - (r"[\[\]=,()]", Punctuation), - (r"(\d+\.\d+|inf|-inf)", Number.Float), - (r"-?\d+", Number.Integer), - (r"'.*'", String), - (r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name), - (r"<.*?>+", Name.Builtin), - (r"(label|debug_merge_point|jump|finish)", Name.Class), - (r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|" - r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|" - r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|" - r"int_is_true|" - r"uint_floordiv|uint_ge|uint_lt|" - r"float_add|float_sub|float_mul|float_truediv|float_neg|" - r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|" - r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|" - r"cast_int_to_float|cast_float_to_int|" - r"force_token|quasiimmut_field|same_as|virtual_ref_finish|" - r"virtual_ref|mark_opaque_ptr|" - r"call_may_force|call_assembler|call_loopinvariant|" - r"call_release_gil|call_pure|call|" - r"new_with_vtable|new_array|newstr|newunicode|new|" - r"arraylen_gc|" - r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|" - r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|" - r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|" - r"getfield_raw|setfield_gc|setfield_raw|" - r"strgetitem|strsetitem|strlen|copystrcontent|" - r"unicodegetitem|unicodesetitem|unicodelen|" - r"guard_true|guard_false|guard_value|guard_isnull|" - r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|" - r"guard_not_forced|guard_no_exception|guard_not_invalidated)", - Name.Builtin), - include("extra-stuff"), - ], - "jit-backend-counts": [ - (r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"), - (r":", Punctuation), - (r"\d+", Number), - include("extra-stuff"), - ], - "extra-stuff": [ - (r"\s+", Text), - (r"#.*?$", Comment), - ], - } - - -class HxmlLexer(RegexLexer): - """ - Lexer for `haXe build `_ files. - - *New in Pygments 1.6.* - """ - name = 'Hxml' - aliases = ['haxeml', 'hxml'] - filenames = ['*.hxml'] - - tokens = { - 'root': [ - # Seperator - (r'(--)(next)', bygroups(Punctuation, Generic.Heading)), - # Compiler switches with one dash - (r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)), - # Compilerswitches with two dashes - (r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|' - r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)), - # Targets and other options that take an argument - (r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|' - r'cp|cmd)( +)(.+)', - bygroups(Punctuation, Keyword, Whitespace, String)), - # Options that take only numerical arguments - (r'(-)(swf-version)( +)(\d+)', - bygroups(Punctuation, Keyword, Number.Integer)), - # An Option that defines the size, the fps and the background - # color of an flash movie - (r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})', - bygroups(Punctuation, Keyword, Whitespace, Number.Integer, - Punctuation, Number.Integer, Punctuation, Number.Integer, - Punctuation, Number.Hex)), - # options with two dashes that takes arguments - (r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)' - r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)), - # Single line comment, multiline ones are not allowed. - (r'#.*', Comment.Single) - ] - } - - -class EbnfLexer(RegexLexer): - """ - Lexer for `ISO/IEC 14977 EBNF - `_ - grammars. - - *New in Pygments 1.7.* - """ - - name = 'EBNF' - aliases = ['ebnf'] - filenames = ['*.ebnf'] - mimetypes = ['text/x-ebnf'] - - tokens = { - 'root': [ - include('whitespace'), - include('comment_start'), - include('identifier'), - (r'=', Operator, 'production'), - ], - 'production': [ - include('whitespace'), - include('comment_start'), - include('identifier'), - (r'"[^"]*"', String.Double), - (r"'[^']*'", String.Single), - (r'(\?[^?]*\?)', Name.Entity), - (r'[\[\]{}(),|]', Punctuation), - (r'-', Operator), - (r';', Punctuation, '#pop'), - ], - 'whitespace': [ - (r'\s+', Text), - ], - 'comment_start': [ - (r'\(\*', Comment.Multiline, 'comment'), - ], - 'comment': [ - (r'[^*)]', Comment.Multiline), - include('comment_start'), - (r'\*\)', Comment.Multiline, '#pop'), - (r'[*)]', Comment.Multiline), - ], - 'identifier': [ - (r'([a-zA-Z][a-zA-Z0-9 \-]*)', Keyword), - ], - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/web.py b/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/web.py deleted file mode 100644 index 142fef5..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/web.py +++ /dev/null @@ -1,4045 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.web - ~~~~~~~~~~~~~~~~~~~ - - Lexers for web-related languages and markup. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -import copy - -from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \ - include, this -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Other, Punctuation, Literal -from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \ - html_doctype_matches, unirange -from pygments.lexers.agile import RubyLexer -from pygments.lexers.compiled import ScalaLexer - - -__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer', - 'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer', - 'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer', - 'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer', - 'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer', - 'DtdLexer', 'DartLexer', 'LassoLexer', 'QmlLexer', 'TypeScriptLexer'] - - -class JavascriptLexer(RegexLexer): - """ - For JavaScript source code. - """ - - name = 'JavaScript' - aliases = ['js', 'javascript'] - filenames = ['*.js', ] - mimetypes = ['application/javascript', 'application/x-javascript', - 'text/x-javascript', 'text/javascript', ] - - flags = re.DOTALL - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'', Comment, '#pop'), - ('-', Comment), - ], - 'tag': [ - (r'\s+', Text), - (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'), - (r'[a-zA-Z0-9_:-]+', Name.Attribute), - (r'/?\s*>', Name.Tag, '#pop'), - ], - 'script-content': [ - (r'<\s*/\s*script\s*>', Name.Tag, '#pop'), - (r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)), - ], - 'style-content': [ - (r'<\s*/\s*style\s*>', Name.Tag, '#pop'), - (r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)), - ], - 'attr': [ - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - def analyse_text(text): - if html_doctype_matches(text): - return 0.5 - - -class PhpLexer(RegexLexer): - """ - For `PHP `_ source code. - For PHP embedded in HTML, use the `HtmlPhpLexer`. - - Additional options accepted: - - `startinline` - If given and ``True`` the lexer starts highlighting with - php code (i.e.: no starting ``>> from pygments.lexers._phpbuiltins import MODULES - >>> MODULES.keys() - ['PHP Options/Info', 'Zip', 'dba', ...] - - In fact the names of those modules match the module names from - the php documentation. - """ - - name = 'PHP' - aliases = ['php', 'php3', 'php4', 'php5'] - filenames = ['*.php', '*.php[345]', '*.inc'] - mimetypes = ['text/x-php'] - - flags = re.IGNORECASE | re.DOTALL | re.MULTILINE - tokens = { - 'root': [ - (r'<\?(php)?', Comment.Preproc, 'php'), - (r'[^<]+', Other), - (r'<', Other) - ], - 'php': [ - (r'\?>', Comment.Preproc, '#pop'), - (r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String), - (r'\s+', Text), - (r'#.*?\n', Comment.Single), - (r'//.*?\n', Comment.Single), - # put the empty comment here, it is otherwise seen as - # the start of a docstring - (r'/\*\*/', Comment.Multiline), - (r'/\*\*.*?\*/', String.Doc), - (r'/\*.*?\*/', Comment.Multiline), - (r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Text, Name.Attribute)), - (r'[~!%^&*+=|:.<>/?@-]+', Operator), - (r'[\[\]{}();,]+', Punctuation), - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)), - (r'(function)(\s+)(&?)(\s*)', - bygroups(Keyword, Text, Operator, Text), 'functionname'), - (r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Constant)), - (r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|' - r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|' - r'FALSE|print|for|require|continue|foreach|require_once|' - r'declare|return|default|static|do|switch|die|stdClass|' - r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|' - r'virtual|endfor|include_once|while|endforeach|global|__FILE__|' - r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|' - r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|' - r'implements|public|private|protected|abstract|clone|try|' - r'catch|throw|this|use|namespace|trait)\b', Keyword), - (r'(true|false|null)\b', Keyword.Constant), - (r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable), - (r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable), - (r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other), - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+[eE][+-]?[0-9]+', Number.Float), - (r'0[0-7]+', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+', Number.Integer), - (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single), - (r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick), - (r'"', String.Double, 'string'), - ], - 'classname': [ - (r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'functionname': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop') - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'[^{$"\\]+', String.Double), - (r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape), - (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?', - String.Interpol), - (r'(\{\$\{)(.*?)(\}\})', - bygroups(String.Interpol, using(this, _startinline=True), - String.Interpol)), - (r'(\{)(\$.*?)(\})', - bygroups(String.Interpol, using(this, _startinline=True), - String.Interpol)), - (r'(\$\{)(\S+)(\})', - bygroups(String.Interpol, Name.Variable, String.Interpol)), - (r'[${\\]+', String.Double) - ], - } - - def __init__(self, **options): - self.funcnamehighlighting = get_bool_opt( - options, 'funcnamehighlighting', True) - self.disabledmodules = get_list_opt( - options, 'disabledmodules', ['unknown']) - self.startinline = get_bool_opt(options, 'startinline', False) - - # private option argument for the lexer itself - if '_startinline' in options: - self.startinline = options.pop('_startinline') - - # collect activated functions in a set - self._functions = set() - if self.funcnamehighlighting: - from pygments.lexers._phpbuiltins import MODULES - for key, value in MODULES.iteritems(): - if key not in self.disabledmodules: - self._functions.update(value) - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - stack = ['root'] - if self.startinline: - stack.append('php') - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text, stack): - if token is Name.Other: - if value in self._functions: - yield index, Name.Builtin, value - continue - yield index, token, value - - def analyse_text(text): - rv = 0.0 - if re.search(r'<\?(?!xml)', text): - rv += 0.3 - if '?>' in text: - rv += 0.1 - return rv - - -class DtdLexer(RegexLexer): - """ - A lexer for DTDs (Document Type Definitions). - - *New in Pygments 1.5.* - """ - - flags = re.MULTILINE | re.DOTALL - - name = 'DTD' - aliases = ['dtd'] - filenames = ['*.dtd'] - mimetypes = ['application/xml-dtd'] - - tokens = { - 'root': [ - include('common'), - - (r'(\s]+)', - bygroups(Keyword, Text, Name.Tag)), - (r'PUBLIC|SYSTEM', Keyword.Constant), - (r'[\[\]>]', Keyword), - ], - - 'common': [ - (r'\s+', Text), - (r'(%|&)[^;]*;', Name.Entity), - ('', Comment, '#pop'), - ('-', Comment), - ], - - 'element': [ - include('common'), - (r'EMPTY|ANY|#PCDATA', Keyword.Constant), - (r'[^>\s\|()?+*,]+', Name.Tag), - (r'>', Keyword, '#pop'), - ], - - 'attlist': [ - include('common'), - (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', - Keyword.Constant), - (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant), - (r'xml:space|xml:lang', Keyword.Reserved), - (r'[^>\s\|()?+*,]+', Name.Attribute), - (r'>', Keyword, '#pop'), - ], - - 'entity': [ - include('common'), - (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant), - (r'[^>\s\|()?+*,]+', Name.Entity), - (r'>', Keyword, '#pop'), - ], - - 'notation': [ - include('common'), - (r'SYSTEM|PUBLIC', Keyword.Constant), - (r'[^>\s\|()?+*,]+', Name.Attribute), - (r'>', Keyword, '#pop'), - ], - } - - def analyse_text(text): - if not looks_like_xml(text) and \ - ('', Comment.Preproc), - ('', Comment, '#pop'), - ('-', Comment), - ], - 'tag': [ - (r'\s+', Text), - (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), - (r'/?\s*>', Name.Tag, '#pop'), - ], - 'attr': [ - ('\s+', Text), - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - def analyse_text(text): - if looks_like_xml(text): - return 0.5 - - -class XsltLexer(XmlLexer): - ''' - A lexer for XSLT. - - *New in Pygments 0.10.* - ''' - - name = 'XSLT' - aliases = ['xslt'] - filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc - mimetypes = ['application/xsl+xml', 'application/xslt+xml'] - - EXTRA_KEYWORDS = set([ - 'apply-imports', 'apply-templates', 'attribute', - 'attribute-set', 'call-template', 'choose', 'comment', - 'copy', 'copy-of', 'decimal-format', 'element', 'fallback', - 'for-each', 'if', 'import', 'include', 'key', 'message', - 'namespace-alias', 'number', 'otherwise', 'output', 'param', - 'preserve-space', 'processing-instruction', 'sort', - 'strip-space', 'stylesheet', 'template', 'text', 'transform', - 'value-of', 'variable', 'when', 'with-param' - ]) - - def get_tokens_unprocessed(self, text): - for index, token, value in XmlLexer.get_tokens_unprocessed(self, text): - m = re.match(']*)/?>?', value) - - if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS: - yield index, Keyword, value - else: - yield index, token, value - - def analyse_text(text): - if looks_like_xml(text) and ' tags is highlighted by the appropriate lexer. - - *New in Pygments 1.1.* - """ - flags = re.MULTILINE | re.DOTALL - name = 'MXML' - aliases = ['mxml'] - filenames = ['*.mxml'] - mimetimes = ['text/xml', 'application/xml'] - - tokens = { - 'root': [ - ('[^<&]+', Text), - (r'&\S*?;', Name.Entity), - (r'(\<\!\[CDATA\[)(.*?)(\]\]\>)', - bygroups(String, using(ActionScript3Lexer), String)), - ('', Comment, '#pop'), - ('-', Comment), - ], - 'tag': [ - (r'\s+', Text), - (r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'), - (r'/?\s*>', Name.Tag, '#pop'), - ], - 'attr': [ - ('\s+', Text), - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - -class HaxeLexer(ExtendedRegexLexer): - """ - For Haxe source code (http://haxe.org/). - - *New in Pygments 1.3.* - """ - - name = 'Haxe' - aliases = ['hx', 'Haxe', 'haxe', 'haXe', 'hxsl'] - filenames = ['*.hx', '*.hxsl'] - mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx'] - - # keywords extracted from lexer.mll in the haxe compiler source - keyword = (r'(?:function|class|static|var|if|else|while|do|for|' - r'break|return|continue|extends|implements|import|' - r'switch|case|default|public|private|try|untyped|' - r'catch|new|this|throw|extern|enum|in|interface|' - r'cast|override|dynamic|typedef|package|' - r'inline|using|null|true|false|abstract)\b') - - # idtype in lexer.mll - typeid = r'_*[A-Z][_a-zA-Z0-9]*' - - # combined ident and dollar and idtype - ident = r'(?:_*[a-z][_a-zA-Z0-9]*|_+[0-9][_a-zA-Z0-9]*|' + typeid + \ - '|_+|\$[_a-zA-Z0-9]+)' - - binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|' - r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|' - r'/|\-|=>|=)') - - # ident except keywords - ident_no_keyword = r'(?!' + keyword + ')' + ident - - flags = re.DOTALL | re.MULTILINE - - preproc_stack = [] - - def preproc_callback(self, match, ctx): - proc = match.group(2) - - if proc == 'if': - # store the current stack - self.preproc_stack.append(ctx.stack[:]) - elif proc in ['else', 'elseif']: - # restore the stack back to right before #if - if self.preproc_stack: ctx.stack = self.preproc_stack[-1][:] - elif proc == 'end': - # remove the saved stack of previous #if - if self.preproc_stack: self.preproc_stack.pop() - - # #if and #elseif should follow by an expr - if proc in ['if', 'elseif']: - ctx.stack.append('preproc-expr') - - # #error can be optionally follow by the error msg - if proc in ['error']: - ctx.stack.append('preproc-error') - - yield match.start(), Comment.Preproc, '#' + proc - ctx.pos = match.end() - - - tokens = { - 'root': [ - include('spaces'), - include('meta'), - (r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')), - (r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')), - (r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')), - (r'(?:extern|private)\b', Keyword.Declaration), - (r'(?:abstract)\b', Keyword.Declaration, 'abstract'), - (r'(?:class|interface)\b', Keyword.Declaration, 'class'), - (r'(?:enum)\b', Keyword.Declaration, 'enum'), - (r'(?:typedef)\b', Keyword.Declaration, 'typedef'), - - # top-level expression - # although it is not supported in haxe, but it is common to write - # expression in web pages the positive lookahead here is to prevent - # an infinite loop at the EOF - (r'(?=.)', Text, 'expr-statement'), - ], - - # space/tab/comment/preproc - 'spaces': [ - (r'\s+', Text), - (r'//[^\n\r]*', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'(#)(if|elseif|else|end|error)\b', preproc_callback), - ], - - 'string-single-interpol': [ - (r'\$\{', String.Interpol, ('string-interpol-close', 'expr')), - (r'\$\$', String.Escape), - (r'\$(?=' + ident + ')', String.Interpol, 'ident'), - include('string-single'), - ], - - 'string-single': [ - (r"'", String.Single, '#pop'), - (r'\\.', String.Escape), - (r'.', String.Single), - ], - - 'string-double': [ - (r'"', String.Double, '#pop'), - (r'\\.', String.Escape), - (r'.', String.Double), - ], - - 'string-interpol-close': [ - (r'\$'+ident, String.Interpol), - (r'\}', String.Interpol, '#pop'), - ], - - 'package': [ - include('spaces'), - (ident, Name.Namespace), - (r'\.', Punctuation, 'import-ident'), - (r'', Text, '#pop'), - ], - - 'import': [ - include('spaces'), - (ident, Name.Namespace), - (r'\*', Keyword), # wildcard import - (r'\.', Punctuation, 'import-ident'), - (r'in', Keyword.Namespace, 'ident'), - (r'', Text, '#pop'), - ], - - 'import-ident': [ - include('spaces'), - (r'\*', Keyword, '#pop'), # wildcard import - (ident, Name.Namespace, '#pop'), - ], - - 'using': [ - include('spaces'), - (ident, Name.Namespace), - (r'\.', Punctuation, 'import-ident'), - (r'', Text, '#pop'), - ], - - 'preproc-error': [ - (r'\s+', Comment.Preproc), - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - (r'', Text, '#pop'), - ], - - 'preproc-expr': [ - (r'\s+', Comment.Preproc), - (r'\!', Comment.Preproc), - (r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')), - - (ident, Comment.Preproc, '#pop'), - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - ], - - 'preproc-parenthesis': [ - (r'\s+', Comment.Preproc), - (r'\)', Comment.Preproc, '#pop'), - ('', Text, 'preproc-expr-in-parenthesis'), - ], - - 'preproc-expr-chain': [ - (r'\s+', Comment.Preproc), - (binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')), - (r'', Text, '#pop'), - ], - - # same as 'preproc-expr' but able to chain 'preproc-expr-chain' - 'preproc-expr-in-parenthesis': [ - (r'\s+', Comment.Preproc), - (r'\!', Comment.Preproc), - (r'\(', Comment.Preproc, - ('#pop', 'preproc-expr-chain', 'preproc-parenthesis')), - - (ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')), - (r"'", String.Single, - ('#pop', 'preproc-expr-chain', 'string-single')), - (r'"', String.Double, - ('#pop', 'preproc-expr-chain', 'string-double')), - ], - - 'abstract' : [ - include('spaces'), - (r'', Text, ('#pop', 'abstract-body', 'abstract-relation', - 'abstract-opaque', 'type-param-constraint', 'type-name')), - ], - - 'abstract-body' : [ - include('spaces'), - (r'\{', Punctuation, ('#pop', 'class-body')), - ], - - 'abstract-opaque' : [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')), - (r'', Text, '#pop'), - ], - - 'abstract-relation': [ - include('spaces'), - (r'(?:to|from)', Keyword.Declaration, 'type'), - (r',', Punctuation), - (r'', Text, '#pop'), - ], - - 'meta': [ - include('spaces'), - (r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')), - ], - - # optional colon - 'meta-colon': [ - include('spaces'), - (r':', Name.Decorator, '#pop'), - (r'', Text, '#pop'), - ], - - # same as 'ident' but set token as Name.Decorator instead of Name - 'meta-ident': [ - include('spaces'), - (ident, Name.Decorator, '#pop'), - ], - - 'meta-body': [ - include('spaces'), - (r'\(', Name.Decorator, ('#pop', 'meta-call')), - (r'', Text, '#pop'), - ], - - 'meta-call': [ - include('spaces'), - (r'\)', Name.Decorator, '#pop'), - (r'', Text, ('#pop', 'meta-call-sep', 'expr')), - ], - - 'meta-call-sep': [ - include('spaces'), - (r'\)', Name.Decorator, '#pop'), - (r',', Punctuation, ('#pop', 'meta-call')), - ], - - 'typedef': [ - include('spaces'), - (r'', Text, ('#pop', 'typedef-body', 'type-param-constraint', - 'type-name')), - ], - - 'typedef-body': [ - include('spaces'), - (r'=', Operator, ('#pop', 'optional-semicolon', 'type')), - ], - - 'enum': [ - include('spaces'), - (r'', Text, ('#pop', 'enum-body', 'bracket-open', - 'type-param-constraint', 'type-name')), - ], - - 'enum-body': [ - include('spaces'), - include('meta'), - (r'\}', Punctuation, '#pop'), - (ident_no_keyword, Name, ('enum-member', 'type-param-constraint')), - ], - - 'enum-member': [ - include('spaces'), - (r'\(', Punctuation, - ('#pop', 'semicolon', 'flag', 'function-param')), - (r'', Punctuation, ('#pop', 'semicolon', 'flag')), - ], - - 'class': [ - include('spaces'), - (r'', Text, ('#pop', 'class-body', 'bracket-open', 'extends', - 'type-param-constraint', 'type-name')), - ], - - 'extends': [ - include('spaces'), - (r'(?:extends|implements)\b', Keyword.Declaration, 'type'), - (r',', Punctuation), # the comma is made optional here, since haxe2 - # requires the comma but haxe3 does not allow it - (r'', Text, '#pop'), - ], - - 'bracket-open': [ - include('spaces'), - (r'\{', Punctuation, '#pop'), - ], - - 'bracket-close': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - ], - - 'class-body': [ - include('spaces'), - include('meta'), - (r'\}', Punctuation, '#pop'), - (r'(?:static|public|private|override|dynamic|inline|macro)\b', - Keyword.Declaration), - (r'', Text, 'class-member'), - ], - - 'class-member': [ - include('spaces'), - (r'(var)\b', Keyword.Declaration, - ('#pop', 'optional-semicolon', 'prop')), - (r'(function)\b', Keyword.Declaration, - ('#pop', 'optional-semicolon', 'class-method')), - ], - - # local function, anonymous or not - 'function-local': [ - include('spaces'), - (r'(' + ident_no_keyword + ')?', Name.Function, - ('#pop', 'expr', 'flag', 'function-param', - 'parenthesis-open', 'type-param-constraint')), - ], - - 'optional-expr': [ - include('spaces'), - include('expr'), - (r'', Text, '#pop'), - ], - - 'class-method': [ - include('spaces'), - (ident, Name.Function, ('#pop', 'optional-expr', 'flag', - 'function-param', 'parenthesis-open', - 'type-param-constraint')), - ], - - # function arguments - 'function-param': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r'\?', Punctuation), - (ident_no_keyword, Name, - ('#pop', 'function-param-sep', 'assign', 'flag')), - ], - - 'function-param-sep': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'function-param')), - ], - - # class property - # eg. var prop(default, null):String; - 'prop': [ - include('spaces'), - (ident_no_keyword, Name, ('#pop', 'assign', 'flag', 'prop-get-set')), - ], - - 'prop-get-set': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'parenthesis-close', - 'prop-get-set-opt', 'comma', 'prop-get-set-opt')), - (r'', Text, '#pop'), - ], - - 'prop-get-set-opt': [ - include('spaces'), - (r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'), - (ident_no_keyword, Text, '#pop'), #custom getter/setter - ], - - 'expr-statement': [ - include('spaces'), - # makes semicolon optional here, just to avoid checking the last - # one is bracket or not. - (r'', Text, ('#pop', 'optional-semicolon', 'expr')), - ], - - 'expr': [ - include('spaces'), - (r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body', - 'meta-ident', 'meta-colon')), - (r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator), - (r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')), - (r'(?:inline)\b', Keyword.Declaration), - (r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain', - 'function-local')), - (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')), - (r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')), - (r'(?:this)\b', Keyword, ('#pop', 'expr-chain')), - (r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')), - (r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')), - (r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')), - (r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')), - (r'(?:switch)\b', Keyword, ('#pop', 'switch')), - (r'(?:if)\b', Keyword, ('#pop', 'if')), - (r'(?:do)\b', Keyword, ('#pop', 'do')), - (r'(?:while)\b', Keyword, ('#pop', 'while')), - (r'(?:for)\b', Keyword, ('#pop', 'for')), - (r'(?:untyped|throw)\b', Keyword), - (r'(?:return)\b', Keyword, ('#pop', 'optional-expr')), - (r'(?:macro)\b', Keyword, ('#pop', 'macro')), - (r'(?:continue|break)\b', Keyword, '#pop'), - (r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')), - (ident_no_keyword, Name, ('#pop', 'expr-chain')), - - # Float - (r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')), - - # Int - (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')), - (r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')), - - # String - (r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')), - (r'"', String.Double, ('#pop', 'expr-chain', 'string-double')), - - # EReg - (r'~/(\\\\|\\/|[^/\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')), - - # Array - (r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')), - ], - - 'expr-chain': [ - include('spaces'), - (r'(?:\+\+|\-\-)', Operator), - (binop, Operator, ('#pop', 'expr')), - (r'(?:in)\b', Keyword, ('#pop', 'expr')), - (r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')), - (r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)), - (r'\[', Punctuation, 'array-access'), - (r'\(', Punctuation, 'call'), - (r'', Text, '#pop'), - ], - - # macro reification - 'macro': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'type')), - (r'', Text, ('#pop', 'expr')), - ], - - # cast can be written as "cast expr" or "cast(expr, type)" - 'cast': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'parenthesis-close', - 'cast-type', 'expr')), - (r'', Text, ('#pop', 'expr')), - ], - - # optionally give a type as the 2nd argument of cast() - 'cast-type': [ - include('spaces'), - (r',', Punctuation, ('#pop', 'type')), - (r'', Text, '#pop'), - ], - - 'catch': [ - include('spaces'), - (r'(?:catch)\b', Keyword, ('expr', 'function-param', - 'parenthesis-open')), - (r'', Text, '#pop'), - ], - - # do-while loop - 'do': [ - include('spaces'), - (r'', Punctuation, ('#pop', 'do-while', 'expr')), - ], - - # the while after do - 'do-while': [ - include('spaces'), - (r'(?:while)\b', Keyword, ('#pop', 'parenthesis', - 'parenthesis-open')), - ], - - 'while': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), - ], - - 'for': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), - ], - - 'if': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr', - 'parenthesis')), - ], - - 'else': [ - include('spaces'), - (r'(?:else)\b', Keyword, ('#pop', 'expr')), - (r'', Text, '#pop'), - ], - - 'switch': [ - include('spaces'), - (r'', Text, ('#pop', 'switch-body', 'bracket-open', 'expr')), - ], - - 'switch-body': [ - include('spaces'), - (r'(?:case|default)\b', Keyword, ('case-block', 'case')), - (r'\}', Punctuation, '#pop'), - ], - - 'case': [ - include('spaces'), - (r':', Punctuation, '#pop'), - (r'', Text, ('#pop', 'case-sep', 'case-guard', 'expr')), - ], - - 'case-sep': [ - include('spaces'), - (r':', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'case')), - ], - - 'case-guard': [ - include('spaces'), - (r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')), - (r'', Text, '#pop'), - ], - - # optional multiple expr under a case - 'case-block': [ - include('spaces'), - (r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'), - (r'', Text, '#pop'), - ], - - 'new': [ - include('spaces'), - (r'', Text, ('#pop', 'call', 'parenthesis-open', 'type')), - ], - - 'array-decl': [ - include('spaces'), - (r'\]', Punctuation, '#pop'), - (r'', Text, ('#pop', 'array-decl-sep', 'expr')), - ], - - 'array-decl-sep': [ - include('spaces'), - (r'\]', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'array-decl')), - ], - - 'array-access': [ - include('spaces'), - (r'', Text, ('#pop', 'array-access-close', 'expr')), - ], - - 'array-access-close': [ - include('spaces'), - (r'\]', Punctuation, '#pop'), - ], - - 'comma': [ - include('spaces'), - (r',', Punctuation, '#pop'), - ], - - 'colon': [ - include('spaces'), - (r':', Punctuation, '#pop'), - ], - - 'semicolon': [ - include('spaces'), - (r';', Punctuation, '#pop'), - ], - - 'optional-semicolon': [ - include('spaces'), - (r';', Punctuation, '#pop'), - (r'', Text, '#pop'), - ], - - # identity that CAN be a Haxe keyword - 'ident': [ - include('spaces'), - (ident, Name, '#pop'), - ], - - 'dollar': [ - include('spaces'), - (r'\{', Keyword, ('#pop', 'bracket-close', 'expr')), - (r'', Text, ('#pop', 'expr-chain')), - ], - - 'type-name': [ - include('spaces'), - (typeid, Name, '#pop'), - ], - - 'type-full-name': [ - include('spaces'), - (r'\.', Punctuation, 'ident'), - (r'', Text, '#pop'), - ], - - 'type': [ - include('spaces'), - (r'\?', Punctuation), - (ident, Name, ('#pop', 'type-check', 'type-full-name')), - (r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')), - (r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')), - ], - - 'type-parenthesis': [ - include('spaces'), - (r'', Text, ('#pop', 'parenthesis-close', 'type')), - ], - - 'type-check': [ - include('spaces'), - (r'->', Punctuation, ('#pop', 'type')), - (r'<(?!=)', Punctuation, 'type-param'), - (r'', Text, '#pop'), - ], - - 'type-struct': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r'\?', Punctuation), - (r'>', Punctuation, ('comma', 'type')), - (ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')), - include('class-body'), - ], - - 'type-struct-sep': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'type-struct')), - ], - - # type-param can be a normal type or a constant literal... - 'type-param-type': [ - # Float - (r'\.[0-9]+', Number.Float, '#pop'), - (r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, '#pop'), - (r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, '#pop'), - (r'[0-9]+\.[0-9]+', Number.Float, '#pop'), - (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'), - - # Int - (r'0x[0-9a-fA-F]+', Number.Hex, '#pop'), - (r'[0-9]+', Number.Integer, '#pop'), - - # String - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - - # EReg - (r'~/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex, '#pop'), - - # Array - (r'\[', Operator, ('#pop', 'array-decl')), - - include('type'), - ], - - # type-param part of a type - # ie. the path in Map - 'type-param': [ - include('spaces'), - (r'', Text, ('#pop', 'type-param-sep', 'type-param-type')), - ], - - 'type-param-sep': [ - include('spaces'), - (r'>', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'type-param')), - ], - - # optional type-param that may include constraint - # ie. - 'type-param-constraint': [ - include('spaces'), - (r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep', - 'type-param-constraint-flag', 'type-name')), - (r'', Text, '#pop'), - ], - - 'type-param-constraint-sep': [ - include('spaces'), - (r'>', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'type-param-constraint-sep', - 'type-param-constraint-flag', 'type-name')), - ], - - # the optional constraint inside type-param - 'type-param-constraint-flag': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')), - (r'', Text, '#pop'), - ], - - 'type-param-constraint-flag-type': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep', - 'type')), - (r'', Text, ('#pop', 'type')), - ], - - 'type-param-constraint-flag-type-sep': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r',', Punctuation, 'type'), - ], - - # a parenthesis expr that contain exactly one expr - 'parenthesis': [ - include('spaces'), - (r'', Text, ('#pop', 'parenthesis-close', 'expr')), - ], - - 'parenthesis-open': [ - include('spaces'), - (r'\(', Punctuation, '#pop'), - ], - - 'parenthesis-close': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - ], - - 'var': [ - include('spaces'), - (ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag')), - ], - - # optional more var decl. - 'var-sep': [ - include('spaces'), - (r',', Punctuation, ('#pop', 'var')), - (r'', Text, '#pop'), - ], - - # optional assignment - 'assign': [ - include('spaces'), - (r'=', Operator, ('#pop', 'expr')), - (r'', Text, '#pop'), - ], - - # optional type flag - 'flag': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'type')), - (r'', Text, '#pop'), - ], - - # colon as part of a ternary operator (?:) - 'ternary': [ - include('spaces'), - (r':', Operator, '#pop'), - ], - - # function call - 'call': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r'', Text, ('#pop', 'call-sep', 'expr')), - ], - - # after a call param - 'call-sep': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'call')), - ], - - # bracket can be block or object - 'bracket': [ - include('spaces'), - (r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name, - ('#pop', 'bracket-check')), - (r"'", String.Single, ('#pop', 'bracket-check', 'string-single')), - (r'"', String.Double, ('#pop', 'bracket-check', 'string-double')), - (r'', Text, ('#pop', 'block')), - ], - - 'bracket-check': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'object-sep', 'expr')), #is object - (r'', Text, ('#pop', 'block', 'optional-semicolon', 'expr-chain')), #is block - ], - - # code block - 'block': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r'', Text, 'expr-statement'), - ], - - # object in key-value pairs - 'object': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r'', Text, ('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string')) - ], - - # a key of an object - 'ident-or-string': [ - include('spaces'), - (ident_no_keyword, Name, '#pop'), - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - ], - - # after a key-value pair in object - 'object-sep': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'object')), - ], - - - - } - - def analyse_text(text): - if re.match(r'\w+\s*:\s*\w', text): return 0.3 - - -def _indentation(lexer, match, ctx): - indentation = match.group(0) - yield match.start(), Text, indentation - ctx.last_indentation = indentation - ctx.pos = match.end() - - if hasattr(ctx, 'block_state') and ctx.block_state and \ - indentation.startswith(ctx.block_indentation) and \ - indentation != ctx.block_indentation: - ctx.stack.append(ctx.block_state) - else: - ctx.block_state = None - ctx.block_indentation = None - ctx.stack.append('content') - -def _starts_block(token, state): - def callback(lexer, match, ctx): - yield match.start(), token, match.group(0) - - if hasattr(ctx, 'last_indentation'): - ctx.block_indentation = ctx.last_indentation - else: - ctx.block_indentation = '' - - ctx.block_state = state - ctx.pos = match.end() - - return callback - - -class HamlLexer(ExtendedRegexLexer): - """ - For Haml markup. - - *New in Pygments 1.3.* - """ - - name = 'Haml' - aliases = ['haml', 'HAML'] - filenames = ['*.haml'] - mimetypes = ['text/x-haml'] - - flags = re.IGNORECASE - # Haml can include " |\n" anywhere, - # which is ignored and used to wrap long lines. - # To accomodate this, use this custom faux dot instead. - _dot = r'(?: \|\n(?=.* \|)|.)' - - # In certain places, a comma at the end of the line - # allows line wrapping as well. - _comma_dot = r'(?:,\s*\n|' + _dot + ')' - tokens = { - 'root': [ - (r'[ \t]*\n', Text), - (r'[ \t]*', _indentation), - ], - - 'css': [ - (r'\.[a-z0-9_:-]+', Name.Class, 'tag'), - (r'\#[a-z0-9_:-]+', Name.Function, 'tag'), - ], - - 'eval-or-plain': [ - (r'[&!]?==', Punctuation, 'plain'), - (r'([&!]?[=~])(' + _comma_dot + r'*\n)', - bygroups(Punctuation, using(RubyLexer)), - 'root'), - (r'', Text, 'plain'), - ], - - 'content': [ - include('css'), - (r'%[a-z0-9_:-]+', Name.Tag, 'tag'), - (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), - (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', - bygroups(Comment, Comment.Special, Comment), - '#pop'), - (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), - '#pop'), - (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, - 'haml-comment-block'), '#pop'), - (r'(-)(' + _comma_dot + r'*\n)', - bygroups(Punctuation, using(RubyLexer)), - '#pop'), - (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), - '#pop'), - include('eval-or-plain'), - ], - - 'tag': [ - include('css'), - (r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)), - (r'\[' + _dot + '*?\]', using(RubyLexer)), - (r'\(', Text, 'html-attributes'), - (r'/[ \t]*\n', Punctuation, '#pop:2'), - (r'[<>]{1,2}(?=[ \t=])', Punctuation), - include('eval-or-plain'), - ], - - 'plain': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), - (r'(#\{)(' + _dot + '*?)(\})', - bygroups(String.Interpol, using(RubyLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - - 'html-attributes': [ - (r'\s+', Text), - (r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), - (r'[a-z0-9_:-]+', Name.Attribute), - (r'\)', Text, '#pop'), - ], - - 'html-attribute-value': [ - (r'[ \t]+', Text), - (r'[a-z0-9_]+', Name.Variable, '#pop'), - (r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'), - (r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'), - (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), - (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), - ], - - 'html-comment-block': [ - (_dot + '+', Comment), - (r'\n', Text, 'root'), - ], - - 'haml-comment-block': [ - (_dot + '+', Comment.Preproc), - (r'\n', Text, 'root'), - ], - - 'filter-block': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), - (r'(#\{)(' + _dot + '*?)(\})', - bygroups(String.Interpol, using(RubyLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - } - - -common_sass_tokens = { - 'value': [ - (r'[ \t]+', Text), - (r'[!$][\w-]+', Name.Variable), - (r'url\(', String.Other, 'string-url'), - (r'[a-z_-][\w-]*(?=\()', Name.Function), - (r'(azimuth|background-attachment|background-color|' - r'background-image|background-position|background-repeat|' - r'background|border-bottom-color|border-bottom-style|' - r'border-bottom-width|border-left-color|border-left-style|' - r'border-left-width|border-right|border-right-color|' - r'border-right-style|border-right-width|border-top-color|' - r'border-top-style|border-top-width|border-bottom|' - r'border-collapse|border-left|border-width|border-color|' - r'border-spacing|border-style|border-top|border|caption-side|' - r'clear|clip|color|content|counter-increment|counter-reset|' - r'cue-after|cue-before|cue|cursor|direction|display|' - r'elevation|empty-cells|float|font-family|font-size|' - r'font-size-adjust|font-stretch|font-style|font-variant|' - r'font-weight|font|height|letter-spacing|line-height|' - r'list-style-type|list-style-image|list-style-position|' - r'list-style|margin-bottom|margin-left|margin-right|' - r'margin-top|margin|marker-offset|marks|max-height|max-width|' - r'min-height|min-width|opacity|orphans|outline|outline-color|' - r'outline-style|outline-width|overflow|padding-bottom|' - r'padding-left|padding-right|padding-top|padding|page|' - r'page-break-after|page-break-before|page-break-inside|' - r'pause-after|pause-before|pause|pitch|pitch-range|' - r'play-during|position|quotes|richness|right|size|' - r'speak-header|speak-numeral|speak-punctuation|speak|' - r'speech-rate|stress|table-layout|text-align|text-decoration|' - r'text-indent|text-shadow|text-transform|top|unicode-bidi|' - r'vertical-align|visibility|voice-family|volume|white-space|' - r'widows|width|word-spacing|z-index|bottom|left|' - r'above|absolute|always|armenian|aural|auto|avoid|baseline|' - r'behind|below|bidi-override|blink|block|bold|bolder|both|' - r'capitalize|center-left|center-right|center|circle|' - r'cjk-ideographic|close-quote|collapse|condensed|continuous|' - r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|' - r'decimal|default|digits|disc|dotted|double|e-resize|embed|' - r'extra-condensed|extra-expanded|expanded|fantasy|far-left|' - r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|' - r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|' - r'inherit|inline-table|inline|inset|inside|invert|italic|' - r'justify|katakana-iroha|katakana|landscape|larger|large|' - r'left-side|leftwards|level|lighter|line-through|list-item|' - r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|' - r'lower|low|medium|message-box|middle|mix|monospace|' - r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|' - r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|' - r'open-quote|outset|outside|overline|pointer|portrait|px|' - r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|' - r'rightwards|s-resize|sans-serif|scroll|se-resize|' - r'semi-condensed|semi-expanded|separate|serif|show|silent|' - r'slow|slower|small-caps|small-caption|smaller|soft|solid|' - r'spell-out|square|static|status-bar|super|sw-resize|' - r'table-caption|table-cell|table-column|table-column-group|' - r'table-footer-group|table-header-group|table-row|' - r'table-row-group|text|text-bottom|text-top|thick|thin|' - r'transparent|ultra-condensed|ultra-expanded|underline|' - r'upper-alpha|upper-latin|upper-roman|uppercase|url|' - r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|' - r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant), - (r'(indigo|gold|firebrick|indianred|darkolivegreen|' - r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|' - r'mediumslateblue|springgreen|crimson|lightsalmon|brown|' - r'turquoise|olivedrab|cyan|skyblue|darkturquoise|' - r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|' - r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|' - r'violet|orchid|ghostwhite|honeydew|cornflowerblue|' - r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|' - r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|' - r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|' - r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|' - r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|' - r'lightcoral|orangered|navajowhite|palegreen|burlywood|' - r'seashell|mediumspringgreen|papayawhip|blanchedalmond|' - r'peru|aquamarine|darkslategray|ivory|dodgerblue|' - r'lemonchiffon|chocolate|orange|forestgreen|slateblue|' - r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|' - r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|' - r'plum|darkgoldenrod|sandybrown|magenta|tan|' - r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|' - r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|' - r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|' - r'lightslategray|lawngreen|lightgreen|tomato|hotpink|' - r'lightyellow|lavenderblush|linen|mediumaquamarine|' - r'blueviolet|peachpuff)\b', Name.Entity), - (r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|' - r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin), - (r'\!(important|default)', Name.Exception), - (r'(true|false)', Name.Pseudo), - (r'(and|or|not)', Operator.Word), - (r'/\*', Comment.Multiline, 'inline-comment'), - (r'//[^\n]*', Comment.Single), - (r'\#[a-z0-9]{1,6}', Number.Hex), - (r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)), - (r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)), - (r'#{', String.Interpol, 'interpolation'), - (r'[~\^\*!&%<>\|+=@:,./?-]+', Operator), - (r'[\[\]()]+', Punctuation), - (r'"', String.Double, 'string-double'), - (r"'", String.Single, 'string-single'), - (r'[a-z_-][\w-]*', Name), - ], - - 'interpolation': [ - (r'\}', String.Interpol, '#pop'), - include('value'), - ], - - 'selector': [ - (r'[ \t]+', Text), - (r'\:', Name.Decorator, 'pseudo-class'), - (r'\.', Name.Class, 'class'), - (r'\#', Name.Namespace, 'id'), - (r'[a-zA-Z0-9_-]+', Name.Tag), - (r'#\{', String.Interpol, 'interpolation'), - (r'&', Keyword), - (r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator), - (r'"', String.Double, 'string-double'), - (r"'", String.Single, 'string-single'), - ], - - 'string-double': [ - (r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double), - (r'#\{', String.Interpol, 'interpolation'), - (r'"', String.Double, '#pop'), - ], - - 'string-single': [ - (r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double), - (r'#\{', String.Interpol, 'interpolation'), - (r"'", String.Double, '#pop'), - ], - - 'string-url': [ - (r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other), - (r'#\{', String.Interpol, 'interpolation'), - (r'\)', String.Other, '#pop'), - ], - - 'pseudo-class': [ - (r'[\w-]+', Name.Decorator), - (r'#\{', String.Interpol, 'interpolation'), - (r'', Text, '#pop'), - ], - - 'class': [ - (r'[\w-]+', Name.Class), - (r'#\{', String.Interpol, 'interpolation'), - (r'', Text, '#pop'), - ], - - 'id': [ - (r'[\w-]+', Name.Namespace), - (r'#\{', String.Interpol, 'interpolation'), - (r'', Text, '#pop'), - ], - - 'for': [ - (r'(from|to|through)', Operator.Word), - include('value'), - ], -} - -class SassLexer(ExtendedRegexLexer): - """ - For Sass stylesheets. - - *New in Pygments 1.3.* - """ - - name = 'Sass' - aliases = ['sass', 'SASS'] - filenames = ['*.sass'] - mimetypes = ['text/x-sass'] - - flags = re.IGNORECASE - tokens = { - 'root': [ - (r'[ \t]*\n', Text), - (r'[ \t]*', _indentation), - ], - - 'content': [ - (r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'), - 'root'), - (r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'), - 'root'), - (r'@import', Keyword, 'import'), - (r'@for', Keyword, 'for'), - (r'@(debug|warn|if|while)', Keyword, 'value'), - (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), - (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), - (r'@extend', Keyword, 'selector'), - (r'@[a-z0-9_-]+', Keyword, 'selector'), - (r'=[\w-]+', Name.Function, 'value'), - (r'\+[\w-]+', Name.Decorator, 'value'), - (r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))', - bygroups(Name.Variable, Operator), 'value'), - (r':', Name.Attribute, 'old-style-attr'), - (r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'), - (r'', Text, 'selector'), - ], - - 'single-comment': [ - (r'.+', Comment.Single), - (r'\n', Text, 'root'), - ], - - 'multi-comment': [ - (r'.+', Comment.Multiline), - (r'\n', Text, 'root'), - ], - - 'import': [ - (r'[ \t]+', Text), - (r'\S+', String), - (r'\n', Text, 'root'), - ], - - 'old-style-attr': [ - (r'[^\s:="\[]+', Name.Attribute), - (r'#{', String.Interpol, 'interpolation'), - (r'[ \t]*=', Operator, 'value'), - (r'', Text, 'value'), - ], - - 'new-style-attr': [ - (r'[^\s:="\[]+', Name.Attribute), - (r'#{', String.Interpol, 'interpolation'), - (r'[ \t]*[=:]', Operator, 'value'), - ], - - 'inline-comment': [ - (r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline), - (r'#\{', String.Interpol, 'interpolation'), - (r"\*/", Comment, '#pop'), - ], - } - for group, common in common_sass_tokens.iteritems(): - tokens[group] = copy.copy(common) - tokens['value'].append((r'\n', Text, 'root')) - tokens['selector'].append((r'\n', Text, 'root')) - - -class ScssLexer(RegexLexer): - """ - For SCSS stylesheets. - """ - - name = 'SCSS' - aliases = ['scss'] - filenames = ['*.scss'] - mimetypes = ['text/x-scss'] - - flags = re.IGNORECASE | re.DOTALL - tokens = { - 'root': [ - (r'\s+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@import', Keyword, 'value'), - (r'@for', Keyword, 'for'), - (r'@(debug|warn|if|while)', Keyword, 'value'), - (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), - (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), - (r'@extend', Keyword, 'selector'), - (r'@[a-z0-9_-]+', Keyword, 'selector'), - (r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'), - (r'(?=[^;{}][;}])', Name.Attribute, 'attr'), - (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'), - (r'', Text, 'selector'), - ], - - 'attr': [ - (r'[^\s:="\[]+', Name.Attribute), - (r'#{', String.Interpol, 'interpolation'), - (r'[ \t]*:', Operator, 'value'), - ], - - 'inline-comment': [ - (r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline), - (r'#\{', String.Interpol, 'interpolation'), - (r"\*/", Comment, '#pop'), - ], - } - for group, common in common_sass_tokens.iteritems(): - tokens[group] = copy.copy(common) - tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')]) - tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')]) - - -class CoffeeScriptLexer(RegexLexer): - """ - For `CoffeeScript`_ source code. - - .. _CoffeeScript: http://coffeescript.org - - *New in Pygments 1.3.* - """ - - name = 'CoffeeScript' - aliases = ['coffee-script', 'coffeescript', 'coffee'] - filenames = ['*.coffee'] - mimetypes = ['text/coffeescript'] - - flags = re.DOTALL - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'###[^#].*?###', Comment.Multiline), - (r'#(?!##[^#]).*?\n', Comment.Single), - ], - 'multilineregex': [ - (r'[^/#]+', String.Regex), - (r'///([gim]+\b|\B)', String.Regex, '#pop'), - (r'#{', String.Interpol, 'interpoling_string'), - (r'[/#]', String.Regex), - ], - 'slashstartsregex': [ - include('commentsandwhitespace'), - (r'///', String.Regex, ('#pop', 'multilineregex')), - (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' - r'([gim]+\b|\B)', String.Regex, '#pop'), - (r'', Text, '#pop'), - ], - 'root': [ - # this next expr leads to infinite loops root -> slashstartsregex - #(r'^(?=\s|/|)', popstate_xmlcomment_callback), - (r'[^-]{1,2}', Literal), - (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - ], - 'processing_instruction': [ - (r'\s+', Text, 'processing_instruction_content'), - (r'\?>', String.Doc, '#pop'), - (pitarget, Name), - ], - 'processing_instruction_content': [ - (r'\?>', String.Doc, '#pop'), - (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - ], - 'cdata_section': [ - (r']]>', String.Doc, '#pop'), - (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - ], - 'start_tag': [ - include('whitespace'), - (r'(/>)', popstate_tag_callback), - (r'>', Name.Tag, 'element_content'), - (r'"', Punctuation, 'quot_attribute_content'), - (r"'", Punctuation, 'apos_attribute_content'), - (r'=', Operator), - (qname, Name.Tag), - ], - 'quot_attribute_content': [ - (r'"', Punctuation, 'start_tag'), - (r'(\{)', pushstate_root_callback), - (r'""', Name.Attribute), - (quotattrcontentchar, Name.Attribute), - (entityref, Name.Attribute), - (charref, Name.Attribute), - (r'\{\{|\}\}', Name.Attribute), - ], - 'apos_attribute_content': [ - (r"'", Punctuation, 'start_tag'), - (r'\{', Punctuation, 'root'), - (r"''", Name.Attribute), - (aposattrcontentchar, Name.Attribute), - (entityref, Name.Attribute), - (charref, Name.Attribute), - (r'\{\{|\}\}', Name.Attribute), - ], - 'element_content': [ - (r')', popstate_tag_callback), - (qname, Name.Tag), - ], - 'xmlspace_decl': [ - (r'\(:', Comment, 'comment'), - (r'preserve|strip', Keyword, '#pop'), - ], - 'declareordering': [ - (r'\(:', Comment, 'comment'), - include('whitespace'), - (r'ordered|unordered', Keyword, '#pop'), - ], - 'xqueryversion': [ - include('whitespace'), - (r'\(:', Comment, 'comment'), - (stringdouble, String.Double), - (stringsingle, String.Single), - (r'encoding', Keyword), - (r';', Punctuation, '#pop'), - ], - 'pragma': [ - (qname, Name.Variable, 'pragmacontents'), - ], - 'pragmacontents': [ - (r'#\)', Punctuation, 'operator'), - (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - (r'(\s+)', Text), - ], - 'occurrenceindicator': [ - include('whitespace'), - (r'\(:', Comment, 'comment'), - (r'\*|\?|\+', Operator, 'operator'), - (r':=', Operator, 'root'), - (r'', Text, 'operator'), - ], - 'option': [ - include('whitespace'), - (qname, Name.Variable, '#pop'), - ], - 'qname_braren': [ - include('whitespace'), - (r'(\{)', pushstate_operator_root_callback), - (r'(\()', Punctuation, 'root'), - ], - 'element_qname': [ - (qname, Name.Variable, 'root'), - ], - 'attribute_qname': [ - (qname, Name.Variable, 'root'), - ], - 'root': [ - include('whitespace'), - (r'\(:', Comment, 'comment'), - - # handle operator state - # order on numbers matters - handle most complex first - (r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'), - (r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'), - (r'(\.\d+|\d+\.\d*)', Number, 'operator'), - (r'(\d+)', Number.Integer, 'operator'), - (r'(\.\.|\.|\))', Punctuation, 'operator'), - (r'(declare)(\s+)(construction)', - bygroups(Keyword, Text, Keyword), 'operator'), - (r'(declare)(\s+)(default)(\s+)(order)', - bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'), - (ncname + ':\*', Name, 'operator'), - ('\*:'+ncname, Name.Tag, 'operator'), - ('\*', Name.Tag, 'operator'), - (stringdouble, String.Double, 'operator'), - (stringsingle, String.Single, 'operator'), - - (r'(\})', popstate_callback), - - #NAMESPACE DECL - (r'(declare)(\s+)(default)(\s+)(collation)', - bygroups(Keyword, Text, Keyword, Text, Keyword)), - (r'(module|declare)(\s+)(namespace)', - bygroups(Keyword, Text, Keyword), 'namespacedecl'), - (r'(declare)(\s+)(base-uri)', - bygroups(Keyword, Text, Keyword), 'namespacedecl'), - - #NAMESPACE KEYWORD - (r'(declare)(\s+)(default)(\s+)(element|function)', - bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'), - (r'(import)(\s+)(schema|module)', - bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'), - (r'(declare)(\s+)(copy-namespaces)', - bygroups(Keyword, Text, Keyword), 'namespacekeyword'), - - #VARNAMEs - (r'(for|let|some|every)(\s+)(\$)', - bygroups(Keyword, Text, Name.Variable), 'varname'), - (r'\$', Name.Variable, 'varname'), - (r'(declare)(\s+)(variable)(\s+)(\$)', - bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'), - - #ITEMTYPE - (r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), - - (r'(element|attribute|schema-element|schema-attribute|comment|' - r'text|node|document-node|empty-sequence)(\s+)(\()', - pushstate_operator_kindtest_callback), - - (r'(processing-instruction)(\s+)(\()', - pushstate_operator_kindtestforpi_callback), - - (r'(', Punctuation), - (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' - r'\\[0-7]+\\|\\[\w\W]|[^"])*"', String.Double), - (r"'(?:''|[^'])*'", String.Atom), # quoted atom - # Needs to not be followed by an atom. - #(r'=(?=\s|[a-zA-Z\[])', Operator), - (r'is\b', Operator), - (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])', - Operator), - (r'(mod|div|not)\b', Operator), - (r'_', Keyword), # The don't-care variable - (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)), - ('([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - '[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' - '(\\s*)(:-|-->)', - bygroups(Name.Function, Text, Operator)), # function defn - ('([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - '[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' - '(\\s*)(\\()', - bygroups(Name.Function, Text, Punctuation)), - ('[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - '[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*', - String.Atom), # atom, characters - # This one includes ! - ('[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+', - String.Atom), # atom, graphics - (r'[A-Z_][A-Za-z0-9_]*', Name.Variable), - ('\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text), - ], - 'nested-comment': [ - (r'\*/', Comment.Multiline, '#pop'), - (r'/\*', Comment.Multiline, '#push'), - (r'[^*/]+', Comment.Multiline), - (r'[*/]', Comment.Multiline), - ], - } - - def analyse_text(text): - return ':-' in text - - -class CythonLexer(RegexLexer): - """ - For Pyrex and `Cython `_ source code. - - *New in Pygments 1.1.* - """ - - name = 'Cython' - aliases = ['cython', 'pyx', 'pyrex'] - filenames = ['*.pyx', '*.pxd', '*.pxi'] - mimetypes = ['text/x-cython', 'application/x-cython'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)), - (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)), - (r'[^\S\n]+', Text), - (r'#.*$', Comment), - (r'[]{}:(),;[]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'(<)([a-zA-Z0-9.?]+)(>)', - bygroups(Punctuation, Keyword.Type, Punctuation)), - (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), - (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', - bygroups(Keyword, Number.Integer, Operator, Name, Operator, - Name, Punctuation)), - include('keywords'), - (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), - (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), - (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), - (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), - include('builtins'), - include('backtick'), - ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), - ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), - ('[uU]?"""', String, combined('stringescape', 'tdqs')), - ("[uU]?'''", String, combined('stringescape', 'tsqs')), - ('[uU]?"', String, combined('stringescape', 'dqs')), - ("[uU]?'", String, combined('stringescape', 'sqs')), - include('name'), - include('numbers'), - ], - 'keywords': [ - (r'(assert|break|by|continue|ctypedef|del|elif|else|except\??|exec|' - r'finally|for|gil|global|if|include|lambda|nogil|pass|print|raise|' - r'return|try|while|yield|as|with)\b', Keyword), - (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), - ], - 'builtins': [ - (r'(?/-]', Operator), - (r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])', - bygroups(Punctuation, Name.Decorator, Punctuation)), - # TODO: "correctly" parse complex code attributes - (r'(\[)(CCode|(?:Integer|Floating)Type)', - bygroups(Punctuation, Name.Decorator)), - (r'[()\[\],.]', Punctuation), - (r'(as|base|break|case|catch|construct|continue|default|delete|do|' - r'else|enum|finally|for|foreach|get|if|in|is|lock|new|out|params|' - r'return|set|sizeof|switch|this|throw|try|typeof|while|yield)\b', - Keyword), - (r'(abstract|const|delegate|dynamic|ensures|extern|inline|internal|' - r'override|owned|private|protected|public|ref|requires|signal|' - r'static|throws|unowned|var|virtual|volatile|weak|yields)\b', - Keyword.Declaration), - (r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text), - 'namespace'), - (r'(class|errordomain|interface|struct)(\s+)', - bygroups(Keyword.Declaration, Text), 'class'), - (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Name.Attribute)), - # void is an actual keyword, others are in glib-2.0.vapi - (r'(void|bool|char|double|float|int|int8|int16|int32|int64|long|' - r'short|size_t|ssize_t|string|time_t|uchar|uint|uint8|uint16|' - r'uint32|uint64|ulong|unichar|ushort)\b', Keyword.Type), - (r'(true|false|null)\b', Name.Builtin), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'root': [ - include('whitespace'), - ('', Text, 'statement'), - ], - 'statement' : [ - include('whitespace'), - include('statements'), - ('[{}]', Punctuation), - (';', Punctuation, '#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'if0': [ - (r'^\s*#if.*?(?`_ source code - - *New in Pygments 1.2.* - """ - name = 'Ooc' - aliases = ['ooc'] - filenames = ['*.ooc'] - mimetypes = ['text/x-ooc'] - - tokens = { - 'root': [ - (r'\b(class|interface|implement|abstract|extends|from|' - r'this|super|new|const|final|static|import|use|extern|' - r'inline|proto|break|continue|fallthrough|operator|if|else|for|' - r'while|do|switch|case|as|in|version|return|true|false|null)\b', - Keyword), - (r'include\b', Keyword, 'include'), - (r'(cover)([ \t]+)(from)([ \t]+)([a-zA-Z0-9_]+[*@]?)', - bygroups(Keyword, Text, Keyword, Text, Name.Class)), - (r'(func)((?:[ \t]|\\\n)+)(~[a-z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Function)), - (r'\bfunc\b', Keyword), - # Note: %= and ^= not listed on http://ooc-lang.org/syntax - (r'//.*', Comment), - (r'(?s)/\*.*?\*/', Comment.Multiline), - (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' - r'&&?|\|\|?|\^=?)', Operator), - (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, - Name.Function)), - (r'[A-Z][A-Z0-9_]+', Name.Constant), - (r'[A-Z][a-zA-Z0-9_]*([@*]|\[[ \t]*\])?', Name.Class), - - (r'([a-z][a-zA-Z0-9_]*(?:~[a-z][a-zA-Z0-9_]*)?)((?:[ \t]|\\\n)*)(?=\()', - bygroups(Name.Function, Text)), - (r'[a-z][a-zA-Z0-9_]*', Name.Variable), - - # : introduces types - (r'[:(){}\[\];,]', Punctuation), - - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'0c[0-9]+', Number.Oct), - (r'0b[01]+', Number.Binary), - (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), - (r'[0-9_]+', Number.Decimal), - - (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\"])*"', - String.Double), - (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", - String.Char), - (r'@', Punctuation), # pointer dereference - (r'\.', Punctuation), # imports or chain operator - - (r'\\[ \t\n]', Text), - (r'[ \t]+', Text), - ], - 'include': [ - (r'[\w/]+', Name), - (r',', Punctuation), - (r'[ \t]', Text), - (r'[;\n]', Text, '#pop'), - ], - } - - -class GoLexer(RegexLexer): - """ - For `Go `_ source. - """ - name = 'Go' - filenames = ['*.go'] - aliases = ['go'] - mimetypes = ['text/x-gosrc'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuations - (r'//(.*?)\n', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'(import|package)\b', Keyword.Namespace), - (r'(var|func|struct|map|chan|type|interface|const)\b', Keyword.Declaration), - (r'(break|default|select|case|defer|go' - r'|else|goto|switch|fallthrough|if|range' - r'|continue|for|return)\b', Keyword), - (r'(true|false|iota|nil)\b', Keyword.Constant), - # It seems the builtin types aren't actually keywords, but - # can be used as functions. So we need two declarations. - (r'(uint|uint8|uint16|uint32|uint64' - r'|int|int8|int16|int32|int64' - r'|float|float32|float64' - r'|complex64|complex128|byte|rune' - r'|string|bool|error|uintptr' - r'|print|println|panic|recover|close|complex|real|imag' - r'|len|cap|append|copy|delete|new|make)\b(\()', - bygroups(Name.Builtin, Punctuation)), - (r'(uint|uint8|uint16|uint32|uint64' - r'|int|int8|int16|int32|int64' - r'|float|float32|float64' - r'|complex64|complex128|byte|rune' - r'|string|bool|error|uintptr)\b', Keyword.Type), - # imaginary_lit - (r'\d+i', Number), - (r'\d+\.\d*([Ee][-+]\d+)?i', Number), - (r'\.\d+([Ee][-+]\d+)?i', Number), - (r'\d+[Ee][-+]\d+i', Number), - # float_lit - (r'\d+(\.\d+[eE][+\-]?\d+|' - r'\.\d*|[eE][+\-]?\d+)', Number.Float), - (r'\.\d+([eE][+\-]?\d+)?', Number.Float), - # int_lit - # -- octal_lit - (r'0[0-7]+', Number.Oct), - # -- hex_lit - (r'0[xX][0-9a-fA-F]+', Number.Hex), - # -- decimal_lit - (r'(0|[1-9][0-9]*)', Number.Integer), - # char_lit - (r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""", - String.Char - ), - # StringLiteral - # -- raw_string_lit - (r'`[^`]*`', String), - # -- interpreted_string_lit - (r'"(\\\\|\\"|[^"])*"', String), - # Tokens - (r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|' - r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator), - (r'[|^<>=!()\[\]{}.,;:]', Punctuation), - # identifier - (r'[a-zA-Z_]\w*', Name.Other), - ] - } - - -class FelixLexer(RegexLexer): - """ - For `Felix `_ source code. - - *New in Pygments 1.2.* - """ - - name = 'Felix' - aliases = ['felix', 'flx'] - filenames = ['*.flx', '*.flxh'] - mimetypes = ['text/x-felix'] - - preproc = [ - 'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef', - ] - - keywords = [ - '_', '_deref', 'all', 'as', - 'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass', - 'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else', - 'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except', - 'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork', - 'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance', - 'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace', - 'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise', - 'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then', - 'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto', - 'when', 'whilst', 'with', 'yield', - ] - - keyword_directives = [ - '_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export', - 'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn', - 'package', 'private', 'pod', 'property', 'public', 'publish', - 'requires', 'todo', 'virtual', 'use', - ] - - keyword_declarations = [ - 'def', 'let', 'ref', 'val', 'var', - ] - - keyword_types = [ - 'unit', 'void', 'any', 'bool', - 'byte', 'offset', - 'address', 'caddress', 'cvaddress', 'vaddress', - 'tiny', 'short', 'int', 'long', 'vlong', - 'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong', - 'int8', 'int16', 'int32', 'int64', - 'uint8', 'uint16', 'uint32', 'uint64', - 'float', 'double', 'ldouble', - 'complex', 'dcomplex', 'lcomplex', - 'imaginary', 'dimaginary', 'limaginary', - 'char', 'wchar', 'uchar', - 'charp', 'charcp', 'ucharp', 'ucharcp', - 'string', 'wstring', 'ustring', - 'cont', - 'array', 'varray', 'list', - 'lvalue', 'opt', 'slice', - ] - - keyword_constants = [ - 'false', 'true', - ] - - operator_words = [ - 'and', 'not', 'in', 'is', 'isin', 'or', 'xor', - ] - - name_builtins = [ - '_svc', 'while', - ] - - name_pseudo = [ - 'root', 'self', 'this', - ] - - decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?' - - tokens = { - 'root': [ - include('whitespace'), - - # Keywords - (r'(axiom|ctor|fun|gen|proc|reduce|union)\b', Keyword, - 'funcname'), - (r'(class|cclass|cstruct|obj|struct)\b', Keyword, 'classname'), - (r'(instance|module|typeclass)\b', Keyword, 'modulename'), - - (r'(%s)\b' % '|'.join(keywords), Keyword), - (r'(%s)\b' % '|'.join(keyword_directives), Name.Decorator), - (r'(%s)\b' % '|'.join(keyword_declarations), Keyword.Declaration), - (r'(%s)\b' % '|'.join(keyword_types), Keyword.Type), - (r'(%s)\b' % '|'.join(keyword_constants), Keyword.Constant), - - # Operators - include('operators'), - - # Float Literal - # -- Hex Float - (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' - r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float), - # -- DecimalFloat - (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' - r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float), - (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?', - Number.Float), - - # IntegerLiteral - # -- Binary - (r'0[Bb][01_]+%s' % decimal_suffixes, Number), - # -- Octal - (r'0[0-7_]+%s' % decimal_suffixes, Number.Oct), - # -- Hexadecimal - (r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex), - # -- Decimal - (r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer), - - # Strings - ('([rR][cC]?|[cC][rR])"""', String, 'tdqs'), - ("([rR][cC]?|[cC][rR])'''", String, 'tsqs'), - ('([rR][cC]?|[cC][rR])"', String, 'dqs'), - ("([rR][cC]?|[cC][rR])'", String, 'sqs'), - ('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')), - ("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')), - ('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')), - ("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')), - - # Punctuation - (r'[\[\]{}:(),;?]', Punctuation), - - # Labels - (r'[a-zA-Z_]\w*:>', Name.Label), - - # Identifiers - (r'(%s)\b' % '|'.join(name_builtins), Name.Builtin), - (r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo), - (r'[a-zA-Z_]\w*', Name), - ], - 'whitespace': [ - (r'\n', Text), - (r'\s+', Text), - - include('comment'), - - # Preprocessor - (r'#\s*if\s+0', Comment.Preproc, 'if0'), - (r'#', Comment.Preproc, 'macro'), - ], - 'operators': [ - (r'(%s)\b' % '|'.join(operator_words), Operator.Word), - (r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator), - ], - 'comment': [ - (r'//(.*?)\n', Comment.Single), - (r'/[*]', Comment.Multiline, 'comment2'), - ], - 'comment2': [ - (r'[^\/*]', Comment.Multiline), - (r'/[*]', Comment.Multiline, '#push'), - (r'[*]/', Comment.Multiline, '#pop'), - (r'[\/*]', Comment.Multiline), - ], - 'if0': [ - (r'^\s*#if.*?(?]*?>)', - bygroups(Comment.Preproc, Text, String), '#pop'), - (r'(import|include)(\s+)("[^"]*?")', - bygroups(Comment.Preproc, Text, String), '#pop'), - (r"(import|include)(\s+)('[^']*?')", - bygroups(Comment.Preproc, Text, String), '#pop'), - (r'[^/\n]+', Comment.Preproc), - ##(r'/[*](.|\n)*?[*]/', Comment), - ##(r'//.*?\n', Comment, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'funcname': [ - include('whitespace'), - (r'[a-zA-Z_]\w*', Name.Function, '#pop'), - # anonymous functions - (r'(?=\()', Text, '#pop'), - ], - 'classname': [ - include('whitespace'), - (r'[a-zA-Z_]\w*', Name.Class, '#pop'), - # anonymous classes - (r'(?=\{)', Text, '#pop'), - ], - 'modulename': [ - include('whitespace'), - (r'\[', Punctuation, ('modulename2', 'tvarlist')), - (r'', Error, 'modulename2'), - ], - 'modulename2': [ - include('whitespace'), - (r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'), - ], - 'tvarlist': [ - include('whitespace'), - include('operators'), - (r'\[', Punctuation, '#push'), - (r'\]', Punctuation, '#pop'), - (r',', Punctuation), - (r'(with|where)\b', Keyword), - (r'[a-zA-Z_]\w*', Name), - ], - 'stringescape': [ - (r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|' - r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) - ], - 'strings': [ - (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' - '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol), - (r'[^\\\'"%\n]+', String), - # quotes, percents and backslashes must be parsed one at a time - (r'[\'"\\]', String), - # unhandled string formatting sign - (r'%', String) - # newlines are an error (use "nl" state) - ], - 'nl': [ - (r'\n', String) - ], - 'dqs': [ - (r'"', String, '#pop'), - # included here again for raw strings - (r'\\\\|\\"|\\\n', String.Escape), - include('strings') - ], - 'sqs': [ - (r"'", String, '#pop'), - # included here again for raw strings - (r"\\\\|\\'|\\\n", String.Escape), - include('strings') - ], - 'tdqs': [ - (r'"""', String, '#pop'), - include('strings'), - include('nl') - ], - 'tsqs': [ - (r"'''", String, '#pop'), - include('strings'), - include('nl') - ], - } - - -class AdaLexer(RegexLexer): - """ - For Ada source code. - - *New in Pygments 1.3.* - """ - - name = 'Ada' - aliases = ['ada', 'ada95' 'ada2005'] - filenames = ['*.adb', '*.ads', '*.ada'] - mimetypes = ['text/x-ada'] - - flags = re.MULTILINE | re.I # Ignore case - - tokens = { - 'root': [ - (r'[^\S\n]+', Text), - (r'--.*?\n', Comment.Single), - (r'[^\S\n]+', Text), - (r'function|procedure|entry', Keyword.Declaration, 'subprogram'), - (r'(subtype|type)(\s+)([a-z0-9_]+)', - bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), - (r'task|protected', Keyword.Declaration), - (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)), - (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'), - (r'(pragma)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Reserved, Text, - Comment.Preproc)), - (r'(true|false|null)\b', Keyword.Constant), - (r'(Address|Byte|Boolean|Character|Controlled|Count|Cursor|' - r'Duration|File_Mode|File_Type|Float|Generator|Integer|Long_Float|' - r'Long_Integer|Long_Long_Float|Long_Long_Integer|Natural|Positive|' - r'Reference_Type|Short_Float|Short_Integer|Short_Short_Float|' - r'Short_Short_Integer|String|Wide_Character|Wide_String)\b', - Keyword.Type), - (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word), - (r'generic|private', Keyword.Declaration), - (r'package', Keyword.Declaration, 'package'), - (r'array\b', Keyword.Reserved, 'array_def'), - (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)', - bygroups(Name.Constant, Text, Punctuation, Text, - Keyword.Reserved)), - (r'<<[a-z0-9_]+>>', Name.Label), - (r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)', - bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)), - (r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|' - r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|' - r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|' - r'null|of|or|others|out|overriding|pragma|protected|raise|range|' - r'record|renames|requeue|return|reverse|select|separate|subtype|' - r'synchronized|task|tagged|terminate|then|type|until|when|while|' - r'xor)\b', - Keyword.Reserved), - (r'"[^"]*"', String), - include('attribute'), - include('numbers'), - (r"'[^']'", String.Character), - (r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))), - (r"(<>|=>|:=|[()|:;,.'])", Punctuation), - (r'[*<>+=/&-]', Operator), - (r'\n+', Text), - ], - 'numbers' : [ - (r'[0-9_]+#[0-9a-f]+#', Number.Hex), - (r'[0-9_]+\.[0-9_]*', Number.Float), - (r'[0-9_]+', Number.Integer), - ], - 'attribute' : [ - (r"(')([a-zA-Z0-9_]+)", bygroups(Punctuation, Name.Attribute)), - ], - 'subprogram' : [ - (r'\(', Punctuation, ('#pop', 'formal_part')), - (r';', Punctuation, '#pop'), - (r'is\b', Keyword.Reserved, '#pop'), - (r'"[^"]+"|[a-z0-9_]+', Name.Function), - include('root'), - ], - 'end' : [ - ('(if|case|record|loop|select)', Keyword.Reserved), - ('"[^"]+"|[a-zA-Z0-9_.]+', Name.Function), - ('\s+', Text), - (';', Punctuation, '#pop'), - ], - 'type_def': [ - (r';', Punctuation, '#pop'), - (r'\(', Punctuation, 'formal_part'), - (r'with|and|use', Keyword.Reserved), - (r'array\b', Keyword.Reserved, ('#pop', 'array_def')), - (r'record\b', Keyword.Reserved, ('record_def')), - (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'), - include('root'), - ], - 'array_def' : [ - (r';', Punctuation, '#pop'), - (r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text, - Keyword.Reserved)), - include('root'), - ], - 'record_def' : [ - (r'end record', Keyword.Reserved, '#pop'), - include('root'), - ], - 'import': [ - (r'[a-z0-9_.]+', Name.Namespace, '#pop'), - (r'', Text, '#pop'), - ], - 'formal_part' : [ - (r'\)', Punctuation, '#pop'), - (r'[a-z0-9_]+', Name.Variable), - (r',|:[^=]', Punctuation), - (r'(in|not|null|out|access)\b', Keyword.Reserved), - include('root'), - ], - 'package': [ - ('body', Keyword.Declaration), - ('is\s+new|renames', Keyword.Reserved), - ('is', Keyword.Reserved, '#pop'), - (';', Punctuation, '#pop'), - ('\(', Punctuation, 'package_instantiation'), - ('([a-zA-Z0-9_.]+)', Name.Class), - include('root'), - ], - 'package_instantiation': [ - (r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable, - Text, Punctuation)), - (r'[a-z0-9._\'"]', Text), - (r'\)', Punctuation, '#pop'), - include('root'), - ], - } - - -class Modula2Lexer(RegexLexer): - """ - For `Modula-2 `_ source code. - - Additional options that determine which keywords are highlighted: - - `pim` - Select PIM Modula-2 dialect (default: True). - `iso` - Select ISO Modula-2 dialect (default: False). - `objm2` - Select Objective Modula-2 dialect (default: False). - `gm2ext` - Also highlight GNU extensions (default: False). - - *New in Pygments 1.3.* - """ - name = 'Modula-2' - aliases = ['modula2', 'm2'] - filenames = ['*.def', '*.mod'] - mimetypes = ['text/x-modula2'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'whitespace': [ - (r'\n+', Text), # blank lines - (r'\s+', Text), # whitespace - ], - 'identifiers': [ - (r'([a-zA-Z_\$][a-zA-Z0-9_\$]*)', Name), - ], - 'numliterals': [ - (r'[01]+B', Number.Binary), # binary number (ObjM2) - (r'[0-7]+B', Number.Oct), # octal number (PIM + ISO) - (r'[0-7]+C', Number.Oct), # char code (PIM + ISO) - (r'[0-9A-F]+C', Number.Hex), # char code (ObjM2) - (r'[0-9A-F]+H', Number.Hex), # hexadecimal number - (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number - (r'[0-9]+\.[0-9]+', Number.Float), # real number - (r'[0-9]+', Number.Integer), # decimal whole number - ], - 'strings': [ - (r"'(\\\\|\\'|[^'])*'", String), # single quoted string - (r'"(\\\\|\\"|[^"])*"', String), # double quoted string - ], - 'operators': [ - (r'[*/+=#~&<>\^-]', Operator), - (r':=', Operator), # assignment - (r'@', Operator), # pointer deref (ISO) - (r'\.\.', Operator), # ellipsis or range - (r'`', Operator), # Smalltalk message (ObjM2) - (r'::', Operator), # type conversion (ObjM2) - ], - 'punctuation': [ - (r'[\(\)\[\]{},.:;|]', Punctuation), - ], - 'comments': [ - (r'//.*?\n', Comment.Single), # ObjM2 - (r'/\*(.*?)\*/', Comment.Multiline), # ObjM2 - (r'\(\*([^\$].*?)\*\)', Comment.Multiline), - # TO DO: nesting of (* ... *) comments - ], - 'pragmas': [ - (r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM - (r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2 - ], - 'root': [ - include('whitespace'), - include('comments'), - include('pragmas'), - include('identifiers'), - include('numliterals'), - include('strings'), - include('operators'), - include('punctuation'), - ] - } - - pim_reserved_words = [ - # 40 reserved words - 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', - 'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR', - 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', - 'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED', - 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', - 'UNTIL', 'VAR', 'WHILE', 'WITH', - ] - - pim_pervasives = [ - # 31 pervasives - 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC', - 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL', - 'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD', - 'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL', - ] - - iso_reserved_words = [ - # 46 reserved words - 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', - 'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY', - 'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', - 'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER', - 'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY', - 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', - 'WITH', - ] - - iso_pervasives = [ - # 42 pervasives - 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX', - 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', - 'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH', - 'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', - 'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE', - 'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', - ] - - objm2_reserved_words = [ - # base language, 42 reserved words - 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', - 'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF', - 'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD', - 'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE', - 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', - 'UNTIL', 'VAR', 'VARIADIC', 'WHILE', - # OO extensions, 16 reserved words - 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', - 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', - 'SUPER', 'TRY', - ] - - objm2_pervasives = [ - # base language, 38 pervasives - 'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE', - 'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD', - 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL', - 'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX', - 'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF', - # OO extensions, 3 pervasives - 'OBJECT', 'NO', 'YES', - ] - - gnu_reserved_words = [ - # 10 additional reserved words - 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', - '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', - ] - - gnu_pervasives = [ - # 21 identifiers, actually from pseudo-module SYSTEM - # but we will highlight them as if they were pervasives - 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', - 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', - 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', - 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', - ] - - def __init__(self, **options): - self.reserved_words = set() - self.pervasives = set() - # ISO Modula-2 - if get_bool_opt(options, 'iso', False): - self.reserved_words.update(self.iso_reserved_words) - self.pervasives.update(self.iso_pervasives) - # Objective Modula-2 - elif get_bool_opt(options, 'objm2', False): - self.reserved_words.update(self.objm2_reserved_words) - self.pervasives.update(self.objm2_pervasives) - # PIM Modula-2 (DEFAULT) - else: - self.reserved_words.update(self.pim_reserved_words) - self.pervasives.update(self.pim_pervasives) - # GNU extensions - if get_bool_opt(options, 'gm2ext', False): - self.reserved_words.update(self.gnu_reserved_words) - self.pervasives.update(self.gnu_pervasives) - # initialise - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # check for reserved words and pervasives - if token is Name: - if value in self.reserved_words: - token = Keyword.Reserved - elif value in self.pervasives: - token = Keyword.Pervasive - # return result - yield index, token, value - - -class BlitzMaxLexer(RegexLexer): - """ - For `BlitzMax `_ source code. - - *New in Pygments 1.4.* - """ - - name = 'BlitzMax' - aliases = ['blitzmax', 'bmax'] - filenames = ['*.bmx'] - mimetypes = ['text/x-bmx'] - - bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b' - bmax_sktypes = r'@{1,2}|[!#$%]' - bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b' - bmax_name = r'[a-z_][a-z0-9_]*' - bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)' - r'|([ \t]*)([:])([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \ - (bmax_name, bmax_sktypes, bmax_lktypes, bmax_name) - bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])' - - flags = re.MULTILINE | re.IGNORECASE - tokens = { - 'root': [ - # Text - (r'[ \t]+', Text), - (r'\.\.\n', Text), # Line continuation - # Comments - (r"'.*?\n", Comment.Single), - (r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline), - # Data types - ('"', String.Double, 'string'), - # Numbers - (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), - (r'\.[0-9]*(?!\.)', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\$[0-9a-f]+', Number.Hex), - (r'\%[10]+', Number), # Binary - # Other - (r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' % - (bmax_vopwords), Operator), - (r'[(),.:\[\]]', Punctuation), - (r'(?:#[\w \t]*)', Name.Label), - (r'(?:\?[\w \t]*)', Comment.Preproc), - # Identifiers - (r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name), - bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)), - (r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' % - (bmax_name, bmax_name), - bygroups(Keyword.Reserved, Text, Keyword.Namespace)), - (bmax_func, bygroups(Name.Function, Text, Keyword.Type, - Operator, Text, Punctuation, Text, - Keyword.Type, Name.Class, Text, - Keyword.Type, Text, Punctuation)), - (bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator, - Text, Punctuation, Text, Keyword.Type, - Name.Class, Text, Keyword.Type)), - (r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name), - bygroups(Keyword.Reserved, Text, Name.Class)), - # Keywords - (r'\b(Ptr)\b', Keyword.Type), - (r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant), - (r'\b(Local|Global|Const|Field)\b', Keyword.Declaration), - (r'\b(TNullMethodException|TNullFunctionException|' - r'TNullObjectException|TArrayBoundsException|' - r'TRuntimeException)\b', Name.Exception), - (r'\b(Strict|SuperStrict|Module|ModuleInfo|' - r'End|Return|Continue|Exit|Public|Private|' - r'Var|VarPtr|Chr|Len|Asc|SizeOf|Sgn|Abs|Min|Max|' - r'New|Release|Delete|' - r'Incbin|IncbinPtr|IncbinLen|' - r'Framework|Include|Import|Extern|EndExtern|' - r'Function|EndFunction|' - r'Type|EndType|Extends|' - r'Method|EndMethod|' - r'Abstract|Final|' - r'If|Then|Else|ElseIf|EndIf|' - r'For|To|Next|Step|EachIn|' - r'While|Wend|EndWhile|' - r'Repeat|Until|Forever|' - r'Select|Case|Default|EndSelect|' - r'Try|Catch|EndTry|Throw|Assert|' - r'Goto|DefData|ReadData|RestoreData)\b', Keyword.Reserved), - # Final resolve (for variable names and such) - (r'(%s)' % (bmax_name), Name.Variable), - ], - 'string': [ - (r'""', String.Double), - (r'"C?', String.Double, '#pop'), - (r'[^"]+', String.Double), - ], - } - - -class BlitzBasicLexer(RegexLexer): - """ - For `BlitzBasic `_ source code. - - *New in Pygments 1.7.* - """ - - name = 'BlitzBasic' - aliases = ['blitzbasic', 'b3d', 'bplus'] - filenames = ['*.bb', '*.decls'] - mimetypes = ['text/x-bb'] - - bb_vopwords = (r'\b(Shl|Shr|Sar|Mod|Or|And|Not|' - r'Abs|Sgn|Handle|Int|Float|Str|' - r'First|Last|Before|After)\b') - bb_sktypes = r'@{1,2}|[#$%]' - bb_name = r'[a-z][a-z0-9_]*' - bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \ - (bb_name, bb_sktypes, bb_name) - - flags = re.MULTILINE | re.IGNORECASE - tokens = { - 'root': [ - # Text - (r'[ \t]+', Text), - # Comments - (r";.*?\n", Comment.Single), - # Data types - ('"', String.Double, 'string'), - # Numbers - (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), - (r'\.[0-9]+(?!\.)', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\$[0-9a-f]+', Number.Hex), - (r'\%[10]+', Number), # Binary - # Other - (r'(?:%s|([+\-*/~=<>^]))' % (bb_vopwords), Operator), - (r'[(),:\[\]\\]', Punctuation), - (r'\.([ \t]*)(%s)' % bb_name, Name.Label), - # Identifiers - (r'\b(New)\b([ \t]+)(%s)' % (bb_name), - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name), - bygroups(Keyword.Reserved, Text, Name.Label)), - (r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name), - bygroups(Operator, Text, Punctuation, Text, Name.Class)), - (r'\b%s\b([ \t]*)(\()' % bb_var, - bygroups(Name.Function, Text, Keyword.Type,Text, Punctuation, - Text, Name.Class, Text, Punctuation)), - (r'\b(Function)\b([ \t]+)%s' % bb_var, - bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type, - Text, Punctuation, Text, Name.Class)), - (r'\b(Type)([ \t]+)(%s)' % (bb_name), - bygroups(Keyword.Reserved, Text, Name.Class)), - # Keywords - (r'\b(Pi|True|False|Null)\b', Keyword.Constant), - (r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration), - (r'\b(End|Return|Exit|' - r'Chr|Len|Asc|' - r'New|Delete|Insert|' - r'Include|' - r'Function|' - r'Type|' - r'If|Then|Else|ElseIf|EndIf|' - r'For|To|Next|Step|Each|' - r'While|Wend|' - r'Repeat|Until|Forever|' - r'Select|Case|Default|' - r'Goto|Gosub|Data|Read|Restore)\b', Keyword.Reserved), - # Final resolve (for variable names and such) -# (r'(%s)' % (bb_name), Name.Variable), - (bb_var, bygroups(Name.Variable, Text, Keyword.Type, - Text, Punctuation, Text, Name.Class)), - ], - 'string': [ - (r'""', String.Double), - (r'"C?', String.Double, '#pop'), - (r'[^"]+', String.Double), - ], - } - - -class NimrodLexer(RegexLexer): - """ - For `Nimrod `_ source code. - - *New in Pygments 1.5.* - """ - - name = 'Nimrod' - aliases = ['nimrod', 'nim'] - filenames = ['*.nim', '*.nimrod'] - mimetypes = ['text/x-nimrod'] - - flags = re.MULTILINE | re.IGNORECASE | re.UNICODE - - def underscorize(words): - newWords = [] - new = "" - for word in words: - for ch in word: - new += (ch + "_?") - newWords.append(new) - new = "" - return "|".join(newWords) - - keywords = [ - 'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break', - 'case', 'cast', 'const', 'continue', 'converter', 'discard', - 'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally', - 'for', 'generic', 'if', 'implies', 'in', 'yield', - 'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method', - 'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc', - 'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try', - 'tuple', 'type' , 'when', 'while', 'with', 'without', 'xor' - ] - - keywordsPseudo = [ - 'nil', 'true', 'false' - ] - - opWords = [ - 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in', - 'notin', 'is', 'isnot' - ] - - types = [ - 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64', - 'bool', 'char', 'range', 'array', 'seq', 'set', 'string' - ] - - tokens = { - 'root': [ - (r'##.*$', String.Doc), - (r'#.*$', Comment), - (r'\*|=|>|<|\+|-|/|@|\$|~|&|%|\!|\?|\||\\|\[|\]', Operator), - (r'\.\.|\.|,|\[\.|\.\]|{\.|\.}|\(\.|\.\)|{|}|\(|\)|:|\^|`|;', - Punctuation), - - # Strings - (r'(?:[\w]+)"', String, 'rdqs'), - (r'"""', String, 'tdqs'), - ('"', String, 'dqs'), - - # Char - ("'", String.Char, 'chars'), - - # Keywords - (r'(%s)\b' % underscorize(opWords), Operator.Word), - (r'(p_?r_?o_?c_?\s)(?![\(\[\]])', Keyword, 'funcname'), - (r'(%s)\b' % underscorize(keywords), Keyword), - (r'(%s)\b' % underscorize(['from', 'import', 'include']), - Keyword.Namespace), - (r'(v_?a_?r)\b', Keyword.Declaration), - (r'(%s)\b' % underscorize(types), Keyword.Type), - (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), - # Identifiers - (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), - # Numbers - (r'[0-9][0-9_]*(?=([eE.]|\'[fF](32|64)))', - Number.Float, ('float-suffix', 'float-number')), - (r'0[xX][a-fA-F0-9][a-fA-F0-9_]*', Number.Hex, 'int-suffix'), - (r'0[bB][01][01_]*', Number, 'int-suffix'), - (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), - (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), - # Whitespace - (r'\s+', Text), - (r'.+$', Error), - ], - 'chars': [ - (r'\\([\\abcefnrtvl"\']|x[a-fA-F0-9]{2}|[0-9]{1,3})', String.Escape), - (r"'", String.Char, '#pop'), - (r".", String.Char) - ], - 'strings': [ - (r'(?\?]*?', - ) - ) - - - tokens = { - 'comments': [ - (r'(?s)/\*.*?\*/', Comment.Multiline), #Multiline - (r'//.*?\n', Comment.Single), #Single line - #todo: highlight references in fandocs - (r'\*\*.*?\n', Comment.Special), #Fandoc - (r'#.*\n', Comment.Single) #Shell-style - ], - 'literals': [ - (r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), #Duration - (r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), - #Duration with dot - (r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), #Float/Decimal - (r'\b-?0x[0-9a-fA-F_]+', Number.Hex), #Hex - (r'\b-?[\d_]+', Number.Integer), #Int - (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), #Char - (r'"', Punctuation, 'insideStr'), #Opening quote - (r'`', Punctuation, 'insideUri'), #Opening accent - (r'\b(true|false|null)\b', Keyword.Constant), #Bool & null - (r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', #DSL - bygroups(Name.Namespace, Punctuation, Name.Class, - Punctuation, String, Punctuation)), - (r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', #Type/slot literal - bygroups(Name.Namespace, Punctuation, Name.Class, - Punctuation, Name.Function)), - (r'\[,\]', Literal), # Empty list - (s(r'($type)(\[,\])'), # Typed empty list - bygroups(using(this, state = 'inType'), Literal)), - (r'\[:\]', Literal), # Empty Map - (s(r'($type)(\[:\])'), - bygroups(using(this, state = 'inType'), Literal)), - ], - 'insideStr': [ - (r'\\\\', String.Escape), #Escaped backslash - (r'\\"', String.Escape), #Escaped " - (r'\\`', String.Escape), #Escaped ` - (r'\$\w+', String.Interpol), #Subst var - (r'\${.*?}', String.Interpol), #Subst expr - (r'"', Punctuation, '#pop'), #Closing quot - (r'.', String) #String content - ], - 'insideUri': [ #TODO: remove copy/paste str/uri - (r'\\\\', String.Escape), #Escaped backslash - (r'\\"', String.Escape), #Escaped " - (r'\\`', String.Escape), #Escaped ` - (r'\$\w+', String.Interpol), #Subst var - (r'\${.*?}', String.Interpol), #Subst expr - (r'`', Punctuation, '#pop'), #Closing tick - (r'.', String.Backtick) #URI content - ], - 'protectionKeywords': [ - (r'\b(public|protected|private|internal)\b', Keyword), - ], - 'typeKeywords': [ - (r'\b(abstract|final|const|native|facet|enum)\b', Keyword), - ], - 'methodKeywords': [ - (r'\b(abstract|native|once|override|static|virtual|final)\b', - Keyword), - ], - 'fieldKeywords': [ - (r'\b(abstract|const|final|native|override|static|virtual|' - r'readonly)\b', Keyword) - ], - 'otherKeywords': [ - (r'\b(try|catch|throw|finally|for|if|else|while|as|is|isnot|' - r'switch|case|default|continue|break|do|return|get|set)\b', - Keyword), - (r'\b(it|this|super)\b', Name.Builtin.Pseudo), - ], - 'operators': [ - (r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator) - ], - 'inType': [ - (r'[\[\]\|\->:\?]', Punctuation), - (s(r'$id'), Name.Class), - (r'', Text, '#pop'), - - ], - 'root': [ - include('comments'), - include('protectionKeywords'), - include('typeKeywords'), - include('methodKeywords'), - include('fieldKeywords'), - include('literals'), - include('otherKeywords'), - include('operators'), - (r'using\b', Keyword.Namespace, 'using'), # Using stmt - (r'@\w+', Name.Decorator, 'facet'), # Symbol - (r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class), - 'inheritance'), # Inheritance list - - - ### Type var := val - (s(r'($type)([ \t]+)($id)(\s*)(:=)'), - bygroups(using(this, state = 'inType'), Text, - Name.Variable, Text, Operator)), - - ### var := val - (s(r'($id)(\s*)(:=)'), - bygroups(Name.Variable, Text, Operator)), - - ### .someId( or ->someId( ### - (s(r'(\.|(?:\->))($id)(\s*)(\()'), - bygroups(Operator, Name.Function, Text, Punctuation), - 'insideParen'), - - ### .someId or ->someId - (s(r'(\.|(?:\->))($id)'), - bygroups(Operator, Name.Function)), - - ### new makeXXX ( #### - (r'(new)(\s+)(make\w*)(\s*)(\()', - bygroups(Keyword, Text, Name.Function, Text, Punctuation), - 'insideMethodDeclArgs'), - - ### Type name ( #### - (s(r'($type)([ \t]+)' #Return type and whitespace - r'($id)(\s*)(\()'), #method name + open brace - bygroups(using(this, state = 'inType'), Text, - Name.Function, Text, Punctuation), - 'insideMethodDeclArgs'), - - ### ArgType argName, ##### - (s(r'($type)(\s+)($id)(\s*)(,)'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation)), - - #### ArgType argName) #### - ## Covered in 'insideParen' state - - ### ArgType argName -> ArgType| ### - (s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation, Text, using(this, state = 'inType'), - Punctuation)), - - ### ArgType argName| ### - (s(r'($type)(\s+)($id)(\s*)(\|)'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation)), - - ### Type var - (s(r'($type)([ \t]+)($id)'), - bygroups(using(this, state='inType'), Text, - Name.Variable)), - - (r'\(', Punctuation, 'insideParen'), - (r'\{', Punctuation, 'insideBrace'), - (r'.', Text) - ], - 'insideParen': [ - (r'\)', Punctuation, '#pop'), - include('root'), - ], - 'insideMethodDeclArgs': [ - (r'\)', Punctuation, '#pop'), - (s(r'($type)(\s+)($id)(\s*)(\))'), - bygroups(using(this, state='inType'), Text, Name.Variable, - Text, Punctuation), '#pop'), - include('root'), - ], - 'insideBrace': [ - (r'\}', Punctuation, '#pop'), - include('root'), - ], - 'inheritance': [ - (r'\s+', Text), #Whitespace - (r':|,', Punctuation), - (r'(?:(\w+)(::))?(\w+)', - bygroups(Name.Namespace, Punctuation, Name.Class)), - (r'{', Punctuation, '#pop') - ], - 'using': [ - (r'[ \t]+', Text), # consume whitespaces - (r'(\[)(\w+)(\])', - bygroups(Punctuation, Comment.Special, Punctuation)), #ffi - (r'(\")?([\w\.]+)(\")?', - bygroups(Punctuation, Name.Namespace, Punctuation)), #podname - (r'::', Punctuation, 'usingClass'), - (r'', Text, '#pop') - ], - 'usingClass': [ - (r'[ \t]+', Text), # consume whitespaces - (r'(as)(\s+)(\w+)', - bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'), - (r'[\w\$]+', Name.Class), - (r'', Text, '#pop:2') # jump out to root state - ], - 'facet': [ - (r'\s+', Text), - (r'{', Punctuation, 'facetFields'), - (r'', Text, '#pop') - ], - 'facetFields': [ - include('comments'), - include('literals'), - include('operators'), - (r'\s+', Text), - (r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)), - (r'}', Punctuation, '#pop'), - (r'.', Text) - ], - } - - -class RustLexer(RegexLexer): - """ - Lexer for Mozilla's Rust programming language. - - *New in Pygments 1.6.* - """ - name = 'Rust' - filenames = ['*.rs', '*.rc'] - aliases = ['rust'] - mimetypes = ['text/x-rustsrc'] - - tokens = { - 'root': [ - # Whitespace and Comments - (r'\n', Text), - (r'\s+', Text), - (r'//(.*?)\n', Comment.Single), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - - # Keywords - (r'(as|assert|break|const' - r'|copy|do|else|enum|extern|fail' - r'|false|fn|for|if|impl|let|log' - r'|loop|match|mod|move|mut|once|priv|pub|pure' - r'|ref|return|static|struct|trait|true|type|unsafe|use|while' - r'|u8|u16|u32|u64|i8|i16|i32|i64|uint' - r'|int|float|f32|f64|str)\b', Keyword), - - # Character Literal - (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", - String.Char), - # Binary Literal - (r'0[Bb][01_]+', Number, 'number_lit'), - # Octal Literal - (r'0[0-7_]+', Number.Oct, 'number_lit'), - # Hexadecimal Literal - (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), - # Decimal Literal - (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?' - r'[0-9_]+|\.[0-9_]*|[eE][+\-]?[0-9_]+)?', Number, 'number_lit'), - # String Literal - (r'"', String, 'string'), - - # Operators and Punctuation - (r'[{}()\[\],.;]', Punctuation), - (r'[+\-*/%&|<>^!~@=:?]', Operator), - - # Identifier - (r'[a-zA-Z_$][a-zA-Z0-9_]*', Name), - - # Attributes - (r'#\[', Comment.Preproc, 'attribute['), - (r'#\(', Comment.Preproc, 'attribute('), - # Macros - (r'[A-Za-z_][A-Za-z0-9_]*!\[', Comment.Preproc, 'attribute['), - (r'[A-Za-z_][A-Za-z0-9_]*!\(', Comment.Preproc, 'attribute('), - ], - 'number_lit': [ - (r'(([ui](8|16|32|64)?)|(f(32|64)?))?', Keyword, '#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), - (r'[^\\"]+', String), - (r'\\', String), - ], - 'attribute_common': [ - (r'"', String, 'string'), - (r'\[', Comment.Preproc, 'attribute['), - (r'\(', Comment.Preproc, 'attribute('), - ], - 'attribute[': [ - include('attribute_common'), - (r'\];?', Comment.Preproc, '#pop'), - (r'[^"\]]+', Comment.Preproc), - ], - 'attribute(': [ - include('attribute_common'), - (r'\);?', Comment.Preproc, '#pop'), - (r'[^"\)]+', Comment.Preproc), - ], - } - - -class CudaLexer(CLexer): - """ - For NVIDIA `CUDA™ `_ - source. - - *New in Pygments 1.6.* - """ - name = 'CUDA' - filenames = ['*.cu', '*.cuh'] - aliases = ['cuda', 'cu'] - mimetypes = ['text/x-cuda'] - - function_qualifiers = ['__device__', '__global__', '__host__', - '__noinline__', '__forceinline__'] - variable_qualifiers = ['__device__', '__constant__', '__shared__', - '__restrict__'] - vector_types = ['char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3', - 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2', - 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1', - 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1', - 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4', - 'ulong4', 'longlong1', 'ulonglong1', 'longlong2', - 'ulonglong2', 'float1', 'float2', 'float3', 'float4', - 'double1', 'double2', 'dim3'] - variables = ['gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'] - functions = ['__threadfence_block', '__threadfence', '__threadfence_system', - '__syncthreads', '__syncthreads_count', '__syncthreads_and', - '__syncthreads_or'] - execution_confs = ['<<<', '>>>'] - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - CLexer.get_tokens_unprocessed(self, text): - if token is Name: - if value in self.variable_qualifiers: - token = Keyword.Type - elif value in self.vector_types: - token = Keyword.Type - elif value in self.variables: - token = Name.Builtin - elif value in self.execution_confs: - token = Keyword.Pseudo - elif value in self.function_qualifiers: - token = Keyword.Reserved - elif value in self.functions: - token = Name.Function - yield index, token, value - - -class MonkeyLexer(RegexLexer): - """ - For - `Monkey `_ - source code. - - *New in Pygments 1.6.* - """ - - name = 'Monkey' - aliases = ['monkey'] - filenames = ['*.monkey'] - mimetypes = ['text/x-monkey'] - - name_variable = r'[a-z_][a-zA-Z0-9_]*' - name_function = r'[A-Z][a-zA-Z0-9_]*' - name_constant = r'[A-Z_][A-Z0-9_]*' - name_class = r'[A-Z][a-zA-Z0-9_]*' - name_module = r'[a-z0-9_]*' - - keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)' - # ? == Bool // % == Int // # == Float // $ == String - keyword_type_special = r'[?%#$]' - - flags = re.MULTILINE - - tokens = { - 'root': [ - #Text - (r'\s+', Text), - # Comments - (r"'.*", Comment), - (r'(?i)^#rem\b', Comment.Multiline, 'comment'), - # preprocessor directives - (r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc), - # preprocessor variable (any line starting with '#' that is not a directive) - (r'^#', Comment.Preproc, 'variables'), - # String - ('"', String.Double, 'string'), - # Numbers - (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), - (r'\.[0-9]+(?!\.)', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\$[0-9a-fA-Z]+', Number.Hex), - (r'\%[10]+', Number), # Binary - # Native data types - (r'\b%s\b' % keyword_type, Keyword.Type), - # Exception handling - (r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved), - (r'Throwable', Name.Exception), - # Builtins - (r'(?i)\b(?:Null|True|False)\b', Name.Builtin), - (r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo), - (r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant), - # Keywords - (r'(?i)^(Import)(\s+)(.*)(\n)', - bygroups(Keyword.Namespace, Text, Name.Namespace, Text)), - (r'(?i)^Strict\b.*\n', Keyword.Reserved), - (r'(?i)(Const|Local|Global|Field)(\s+)', - bygroups(Keyword.Declaration, Text), 'variables'), - (r'(?i)(New|Class|Interface|Extends|Implements)(\s+)', - bygroups(Keyword.Reserved, Text), 'classname'), - (r'(?i)(Function|Method)(\s+)', - bygroups(Keyword.Reserved, Text), 'funcname'), - (r'(?i)(?:End|Return|Public|Private|Extern|Property|' - r'Final|Abstract)\b', Keyword.Reserved), - # Flow Control stuff - (r'(?i)(?:If|Then|Else|ElseIf|EndIf|' - r'Select|Case|Default|' - r'While|Wend|' - r'Repeat|Until|Forever|' - r'For|To|Until|Step|EachIn|Next|' - r'Exit|Continue)\s+', Keyword.Reserved), - # not used yet - (r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved), - # Array - (r'[\[\]]', Punctuation), - # Other - (r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator), - (r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word), - (r'[\(\){}!#,.:]', Punctuation), - # catch the rest - (r'%s\b' % name_constant, Name.Constant), - (r'%s\b' % name_function, Name.Function), - (r'%s\b' % name_variable, Name.Variable), - ], - 'funcname': [ - (r'(?i)%s\b' % name_function, Name.Function), - (r':', Punctuation, 'classname'), - (r'\s+', Text), - (r'\(', Punctuation, 'variables'), - (r'\)', Punctuation, '#pop') - ], - 'classname': [ - (r'%s\.' % name_module, Name.Namespace), - (r'%s\b' % keyword_type, Keyword.Type), - (r'%s\b' % name_class, Name.Class), - # array (of given size) - (r'(\[)(\s*)(\d*)(\s*)(\])', - bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)), - # generics - (r'\s+(?!<)', Text, '#pop'), - (r'<', Punctuation, '#push'), - (r'>', Punctuation, '#pop'), - (r'\n', Text, '#pop'), - (r'', Text, '#pop') - ], - 'variables': [ - (r'%s\b' % name_constant, Name.Constant), - (r'%s\b' % name_variable, Name.Variable), - (r'%s' % keyword_type_special, Keyword.Type), - (r'\s+', Text), - (r':', Punctuation, 'classname'), - (r',', Punctuation, '#push'), - (r'', Text, '#pop') - ], - 'string': [ - (r'[^"~]+', String.Double), - (r'~q|~n|~r|~t|~z|~~', String.Escape), - (r'"', String.Double, '#pop'), - ], - 'comment' : [ - (r'(?i)^#rem.*?', Comment.Multiline, "#push"), - (r'(?i)^#end.*?', Comment.Multiline, "#pop"), - (r'\n', Comment.Multiline), - (r'.+', Comment.Multiline), - ], - } - - -class CobolLexer(RegexLexer): - """ - Lexer for OpenCOBOL code. - - *New in Pygments 1.6.* - """ - name = 'COBOL' - aliases = ['cobol'] - filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY'] - mimetypes = ['text/x-cobol'] - flags = re.IGNORECASE | re.MULTILINE - - # Data Types: by PICTURE and USAGE - # Operators: **, *, +, -, /, <, >, <=, >=, =, <> - # Logical (?): NOT, AND, OR - - # Reserved words: - # http://opencobol.add1tocobol.com/#reserved-words - # Intrinsics: - # http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions - - tokens = { - 'root': [ - include('comment'), - include('strings'), - include('core'), - include('nums'), - (r'[a-z0-9]([_a-z0-9\-]*[a-z0-9]+)?', Name.Variable), - # (r'[\s]+', Text), - (r'[ \t]+', Text), - ], - 'comment': [ - (r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment), - ], - 'core': [ - # Figurative constants - (r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?' - r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)' - r'\s*($|(?=[^0-9a-z_\-]))', - Name.Constant), - - # Reserved words STATEMENTS and other bolds - (r'(^|(?<=[^0-9a-z_\-]))' - r'(ACCEPT|ADD|ALLOCATE|CALL|CANCEL|CLOSE|COMPUTE|' - r'CONFIGURATION|CONTINUE|' - r'DATA|DELETE|DISPLAY|DIVIDE|DIVISION|ELSE|END|END-ACCEPT|' - r'END-ADD|END-CALL|END-COMPUTE|END-DELETE|END-DISPLAY|' - r'END-DIVIDE|END-EVALUATE|END-IF|END-MULTIPLY|END-OF-PAGE|' - r'END-PERFORM|END-READ|END-RETURN|END-REWRITE|END-SEARCH|' - r'END-START|END-STRING|END-SUBTRACT|END-UNSTRING|END-WRITE|' - r'ENVIRONMENT|EVALUATE|EXIT|FD|FILE|FILE-CONTROL|FOREVER|' - r'FREE|GENERATE|GO|GOBACK|' - r'IDENTIFICATION|IF|INITIALIZE|' - r'INITIATE|INPUT-OUTPUT|INSPECT|INVOKE|I-O-CONTROL|LINKAGE|' - r'LOCAL-STORAGE|MERGE|MOVE|MULTIPLY|OPEN|' - r'PERFORM|PROCEDURE|PROGRAM-ID|RAISE|READ|RELEASE|RESUME|' - r'RETURN|REWRITE|SCREEN|' - r'SD|SEARCH|SECTION|SET|SORT|START|STOP|STRING|SUBTRACT|' - r'SUPPRESS|TERMINATE|THEN|UNLOCK|UNSTRING|USE|VALIDATE|' - r'WORKING-STORAGE|WRITE)' - r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Reserved), - - # Reserved words - (r'(^|(?<=[^0-9a-z_\-]))' - r'(ACCESS|ADDRESS|ADVANCING|AFTER|ALL|' - r'ALPHABET|ALPHABETIC|ALPHABETIC-LOWER|ALPHABETIC-UPPER|' - r'ALPHANUMERIC|ALPHANUMERIC-EDITED|ALSO|ALTER|ALTERNATE' - r'ANY|ARE|AREA|AREAS|ARGUMENT-NUMBER|ARGUMENT-VALUE|AS|' - r'ASCENDING|ASSIGN|AT|AUTO|AUTO-SKIP|AUTOMATIC|AUTOTERMINATE|' - r'BACKGROUND-COLOR|BASED|BEEP|BEFORE|BELL|' - r'BLANK|' - r'BLINK|BLOCK|BOTTOM|BY|BYTE-LENGTH|CHAINING|' - r'CHARACTER|CHARACTERS|CLASS|CODE|CODE-SET|COL|COLLATING|' - r'COLS|COLUMN|COLUMNS|COMMA|COMMAND-LINE|COMMIT|COMMON|' - r'CONSTANT|CONTAINS|CONTENT|CONTROL|' - r'CONTROLS|CONVERTING|COPY|CORR|CORRESPONDING|COUNT|CRT|' - r'CURRENCY|CURSOR|CYCLE|DATE|DAY|DAY-OF-WEEK|DE|DEBUGGING|' - r'DECIMAL-POINT|DECLARATIVES|DEFAULT|DELIMITED|' - r'DELIMITER|DEPENDING|DESCENDING|DETAIL|DISK|' - r'DOWN|DUPLICATES|DYNAMIC|EBCDIC|' - r'ENTRY|ENVIRONMENT-NAME|ENVIRONMENT-VALUE|EOL|EOP|' - r'EOS|ERASE|ERROR|ESCAPE|EXCEPTION|' - r'EXCLUSIVE|EXTEND|EXTERNAL|' - r'FILE-ID|FILLER|FINAL|FIRST|FIXED|FLOAT-LONG|FLOAT-SHORT|' - r'FOOTING|FOR|FOREGROUND-COLOR|FORMAT|FROM|FULL|FUNCTION|' - r'FUNCTION-ID|GIVING|GLOBAL|GROUP|' - r'HEADING|HIGHLIGHT|I-O|ID|' - r'IGNORE|IGNORING|IN|INDEX|INDEXED|INDICATE|' - r'INITIAL|INITIALIZED|INPUT|' - r'INTO|INTRINSIC|INVALID|IS|JUST|JUSTIFIED|KEY|LABEL|' - r'LAST|LEADING|LEFT|LENGTH|LIMIT|LIMITS|LINAGE|' - r'LINAGE-COUNTER|LINE|LINES|LOCALE|LOCK|' - r'LOWLIGHT|MANUAL|MEMORY|MINUS|MODE|' - r'MULTIPLE|NATIONAL|NATIONAL-EDITED|NATIVE|' - r'NEGATIVE|NEXT|NO|NULL|NULLS|NUMBER|NUMBERS|NUMERIC|' - r'NUMERIC-EDITED|OBJECT-COMPUTER|OCCURS|OF|OFF|OMITTED|ON|ONLY|' - r'OPTIONAL|ORDER|ORGANIZATION|OTHER|OUTPUT|OVERFLOW|' - r'OVERLINE|PACKED-DECIMAL|PADDING|PAGE|PARAGRAPH|' - r'PLUS|POINTER|POSITION|POSITIVE|PRESENT|PREVIOUS|' - r'PRINTER|PRINTING|PROCEDURE-POINTER|PROCEDURES|' - r'PROCEED|PROGRAM|PROGRAM-POINTER|PROMPT|QUOTE|' - r'QUOTES|RANDOM|RD|RECORD|RECORDING|RECORDS|RECURSIVE|' - r'REDEFINES|REEL|REFERENCE|RELATIVE|REMAINDER|REMOVAL|' - r'RENAMES|REPLACING|REPORT|REPORTING|REPORTS|REPOSITORY|' - r'REQUIRED|RESERVE|RETURNING|REVERSE-VIDEO|REWIND|' - r'RIGHT|ROLLBACK|ROUNDED|RUN|SAME|SCROLL|' - r'SECURE|SEGMENT-LIMIT|SELECT|SENTENCE|SEPARATE|' - r'SEQUENCE|SEQUENTIAL|SHARING|SIGN|SIGNED|SIGNED-INT|' - r'SIGNED-LONG|SIGNED-SHORT|SIZE|SORT-MERGE|SOURCE|' - r'SOURCE-COMPUTER|SPECIAL-NAMES|STANDARD|' - r'STANDARD-1|STANDARD-2|STATUS|SUM|' - r'SYMBOLIC|SYNC|SYNCHRONIZED|TALLYING|TAPE|' - r'TEST|THROUGH|THRU|TIME|TIMES|TO|TOP|TRAILING|' - r'TRANSFORM|TYPE|UNDERLINE|UNIT|UNSIGNED|' - r'UNSIGNED-INT|UNSIGNED-LONG|UNSIGNED-SHORT|UNTIL|UP|' - r'UPDATE|UPON|USAGE|USING|VALUE|VALUES|VARYING|WAIT|WHEN|' - r'WITH|WORDS|YYYYDDD|YYYYMMDD)' - r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Pseudo), - - # inactive reserved words - (r'(^|(?<=[^0-9a-z_\-]))' - r'(ACTIVE-CLASS|ALIGNED|ANYCASE|ARITHMETIC|ATTRIBUTE|B-AND|' - r'B-NOT|B-OR|B-XOR|BIT|BOOLEAN|CD|CENTER|CF|CH|CHAIN|CLASS-ID|' - r'CLASSIFICATION|COMMUNICATION|CONDITION|DATA-POINTER|' - r'DESTINATION|DISABLE|EC|EGI|EMI|ENABLE|END-RECEIVE|' - r'ENTRY-CONVENTION|EO|ESI|EXCEPTION-OBJECT|EXPANDS|FACTORY|' - r'FLOAT-BINARY-16|FLOAT-BINARY-34|FLOAT-BINARY-7|' - r'FLOAT-DECIMAL-16|FLOAT-DECIMAL-34|FLOAT-EXTENDED|FORMAT|' - r'FUNCTION-POINTER|GET|GROUP-USAGE|IMPLEMENTS|INFINITY|' - r'INHERITS|INTERFACE|INTERFACE-ID|INVOKE|LC_ALL|LC_COLLATE|' - r'LC_CTYPE|LC_MESSAGES|LC_MONETARY|LC_NUMERIC|LC_TIME|' - r'LINE-COUNTER|MESSAGE|METHOD|METHOD-ID|NESTED|NONE|NORMAL|' - r'OBJECT|OBJECT-REFERENCE|OPTIONS|OVERRIDE|PAGE-COUNTER|PF|PH|' - r'PROPERTY|PROTOTYPE|PURGE|QUEUE|RAISE|RAISING|RECEIVE|' - r'RELATION|REPLACE|REPRESENTS-NOT-A-NUMBER|RESET|RESUME|RETRY|' - r'RF|RH|SECONDS|SEGMENT|SELF|SEND|SOURCES|STATEMENT|STEP|' - r'STRONG|SUB-QUEUE-1|SUB-QUEUE-2|SUB-QUEUE-3|SUPER|SYMBOL|' - r'SYSTEM-DEFAULT|TABLE|TERMINAL|TEXT|TYPEDEF|UCS-4|UNIVERSAL|' - r'USER-DEFAULT|UTF-16|UTF-8|VAL-STATUS|VALID|VALIDATE|' - r'VALIDATE-STATUS)\s*($|(?=[^0-9a-z_\-]))', Error), - - # Data Types - (r'(^|(?<=[^0-9a-z_\-]))' - r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|' - r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|' - r'BINARY-C-LONG|' - r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|' - r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type), - - # Operators - (r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator), - - # (r'(::)', Keyword.Declaration), - - (r'([(),;:&%.])', Punctuation), - - # Intrinsics - (r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|' - r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|' - r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|' - r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|' - r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|' - r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG10|LOG|' - r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|' - r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|' - r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|' - r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|' - r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|' - r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*' - r'($|(?=[^0-9a-z_\-]))', Name.Function), - - # Booleans - (r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin), - # Comparing Operators - (r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|' - r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word), - ], - - # \"[^\"\n]*\"|\'[^\'\n]*\' - 'strings': [ - # apparently strings can be delimited by EOL if they are continued - # in the next line - (r'"[^"\n]*("|\n)', String.Double), - (r"'[^'\n]*('|\n)", String.Single), - ], - - 'nums': [ - (r'\d+(\s*|\.$|$)', Number.Integer), - (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float), - (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float), - ], - } - - -class CobolFreeformatLexer(CobolLexer): - """ - Lexer for Free format OpenCOBOL code. - - *New in Pygments 1.6.* - """ - name = 'COBOLFree' - aliases = ['cobolfree'] - filenames = ['*.cbl', '*.CBL'] - mimetypes = [] - flags = re.IGNORECASE | re.MULTILINE - - tokens = { - 'comment': [ - (r'(\*>.*\n|^\w*\*.*$)', Comment), - ], - } - - -class LogosLexer(ObjectiveCppLexer): - """ - For Logos + Objective-C source code with preprocessor directives. - - *New in Pygments 1.6.* - """ - - name = 'Logos' - aliases = ['logos'] - filenames = ['*.x', '*.xi', '*.xm', '*.xmi'] - mimetypes = ['text/x-logos'] - priority = 0.25 - - tokens = { - 'statements': [ - (r'(%orig|%log)\b', Keyword), - (r'(%c)\b(\()(\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\))', - bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), - (r'(%init)\b(\()', - bygroups(Keyword, Punctuation), 'logos_init_directive'), - (r'(%init)(?=\s*;)', bygroups(Keyword)), - (r'(%hook|%group)(\s+)([a-zA-Z$_][a-zA-Z0-9$_]+)', - bygroups(Keyword, Text, Name.Class), '#pop'), - (r'(%subclass)(\s+)', bygroups(Keyword, Text), - ('#pop', 'logos_classname')), - inherit, - ], - 'logos_init_directive' : [ - ('\s+', Text), - (',', Punctuation, ('logos_init_directive', '#pop')), - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(=)(\s*)([^);]*)', - bygroups(Name.Class, Text, Punctuation, Text, Text)), - ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class), - ('\)', Punctuation, '#pop'), - ], - 'logos_classname' : [ - ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?', - bygroups(Name.Class, Text, Name.Class), '#pop'), - ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop') - ], - 'root': [ - (r'(%subclass)(\s+)', bygroups(Keyword, Text), - 'logos_classname'), - (r'(%hook|%group)(\s+)([a-zA-Z$_][a-zA-Z0-9$_]+)', - bygroups(Keyword, Text, Name.Class)), - (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', - bygroups(Keyword, Text, Name.Variable, Text, String, Text)), - (r'(%ctor)(\s*)({)', bygroups(Keyword, Text, Punctuation), - 'function'), - (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', - bygroups(Keyword, Text, Keyword, String, Keyword)), - (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), - inherit, - ], - } - - _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()') - - def analyse_text(text): - if LogosLexer._logos_keywords.search(text): - return 1.0 - return 0 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/functional.py b/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/functional.py deleted file mode 100644 index 97da894..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/functional.py +++ /dev/null @@ -1,2731 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.functional - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for functional languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions -from pygments.token import Text, Comment, Operator, Keyword, Name, \ - String, Number, Punctuation, Literal, Generic, Error - -__all__ = ['RacketLexer', 'SchemeLexer', 'CommonLispLexer', 'HaskellLexer', - 'AgdaLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer', - 'SMLLexer', 'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer', - 'OpaLexer', 'CoqLexer', 'NewLispLexer', 'ElixirLexer', - 'ElixirConsoleLexer', 'KokaLexer'] - - -line_re = re.compile('.*?\n') - - -class RacketLexer(RegexLexer): - """ - Lexer for `Racket `_ source code (formerly known as - PLT Scheme). - - *New in Pygments 1.6.* - """ - - name = 'Racket' - aliases = ['racket', 'rkt'] - filenames = ['*.rkt', '*.rktl'] - mimetypes = ['text/x-racket', 'application/x-racket'] - - # From namespace-mapped-symbols - keywords = [ - '#%app', '#%datum', '#%expression', '#%module-begin', - '#%plain-app', '#%plain-lambda', '#%plain-module-begin', - '#%provide', '#%require', '#%stratified-body', '#%top', - '#%top-interaction', '#%variable-reference', '...', 'and', 'begin', - 'begin-for-syntax', 'begin0', 'case', 'case-lambda', 'cond', - 'datum->syntax-object', 'define', 'define-for-syntax', - 'define-struct', 'define-syntax', 'define-syntax-rule', - 'define-syntaxes', 'define-values', 'define-values-for-syntax', - 'delay', 'do', 'expand-path', 'fluid-let', 'hash-table-copy', - 'hash-table-count', 'hash-table-for-each', 'hash-table-get', - 'hash-table-iterate-first', 'hash-table-iterate-key', - 'hash-table-iterate-next', 'hash-table-iterate-value', - 'hash-table-map', 'hash-table-put!', 'hash-table-remove!', - 'hash-table?', 'if', 'lambda', 'let', 'let*', 'let*-values', - 'let-struct', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc', - 'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes', - 'letrec-syntaxes+values', 'letrec-values', 'list-immutable', - 'make-hash-table', 'make-immutable-hash-table', 'make-namespace', - 'module', 'module-identifier=?', 'module-label-identifier=?', - 'module-template-identifier=?', 'module-transformer-identifier=?', - 'namespace-transformer-require', 'or', 'parameterize', - 'parameterize*', 'parameterize-break', 'provide', - 'provide-for-label', 'provide-for-syntax', 'quasiquote', - 'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax', - 'quote-syntax/prune', 'require', 'require-for-label', - 'require-for-syntax', 'require-for-template', 'set!', - 'set!-values', 'syntax', 'syntax-case', 'syntax-case*', - 'syntax-id-rules', 'syntax-object->datum', 'syntax-rules', - 'syntax/loc', 'time', 'transcript-off', 'transcript-on', 'unless', - 'unquote', 'unquote-splicing', 'unsyntax', 'unsyntax-splicing', - 'when', 'with-continuation-mark', 'with-handlers', - 'with-handlers*', 'with-syntax', 'λ' - ] - - # From namespace-mapped-symbols - builtins = [ - '*', '+', '-', '/', '<', '<=', '=', '>', '>=', - 'abort-current-continuation', 'abs', 'absolute-path?', 'acos', - 'add1', 'alarm-evt', 'always-evt', 'andmap', 'angle', 'append', - 'apply', 'arithmetic-shift', 'arity-at-least', - 'arity-at-least-value', 'arity-at-least?', 'asin', 'assoc', 'assq', - 'assv', 'atan', 'banner', 'bitwise-and', 'bitwise-bit-field', - 'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor', - 'boolean?', 'bound-identifier=?', 'box', 'box-immutable', 'box?', - 'break-enabled', 'break-thread', 'build-path', - 'build-path/convention-type', 'byte-pregexp', 'byte-pregexp?', - 'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes', - 'bytes->immutable-bytes', 'bytes->list', 'bytes->path', - 'bytes->path-element', 'bytes->string/latin-1', - 'bytes->string/locale', 'bytes->string/utf-8', 'bytes-append', - 'bytes-close-converter', 'bytes-convert', 'bytes-convert-end', - 'bytes-converter?', 'bytes-copy', 'bytes-copy!', 'bytes-fill!', - 'bytes-length', 'bytes-open-converter', 'bytes-ref', 'bytes-set!', - 'bytes-utf-8-index', 'bytes-utf-8-length', 'bytes-utf-8-ref', - 'bytes?', 'bytes?', 'caaaar', 'caaadr', - 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', - 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', - 'call-in-nested-thread', 'call-with-break-parameterization', - 'call-with-composable-continuation', - 'call-with-continuation-barrier', 'call-with-continuation-prompt', - 'call-with-current-continuation', 'call-with-escape-continuation', - 'call-with-exception-handler', - 'call-with-immediate-continuation-mark', 'call-with-input-file', - 'call-with-output-file', 'call-with-parameterization', - 'call-with-semaphore', 'call-with-semaphore/enable-break', - 'call-with-values', 'call/cc', 'call/ec', 'car', 'cdaaar', - 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', - 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', - 'ceiling', 'channel-get', 'channel-put', 'channel-put-evt', - 'channel-try-get', 'channel?', 'chaperone-box', 'chaperone-evt', - 'chaperone-hash', 'chaperone-of?', 'chaperone-procedure', - 'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector', - 'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?', - 'char-ci<=?', 'char-ci=?', 'char-ci>?', - 'char-downcase', 'char-foldcase', 'char-general-category', - 'char-graphic?', 'char-iso-control?', 'char-lower-case?', - 'char-numeric?', 'char-punctuation?', 'char-ready?', - 'char-symbolic?', 'char-title-case?', 'char-titlecase', - 'char-upcase', 'char-upper-case?', 'char-utf-8-length', - 'char-whitespace?', 'char<=?', 'char=?', - 'char>?', 'char?', 'check-duplicate-identifier', - 'checked-procedure-check-and-extract', 'choice-evt', - 'cleanse-path', 'close-input-port', 'close-output-port', - 'collect-garbage', 'collection-file-path', 'collection-path', - 'compile', 'compile-allow-set!-undefined', - 'compile-context-preservation-enabled', - 'compile-enforce-module-constants', 'compile-syntax', - 'compiled-expression?', 'compiled-module-expression?', - 'complete-path?', 'complex?', 'cons', - 'continuation-mark-set->context', 'continuation-mark-set->list', - 'continuation-mark-set->list*', 'continuation-mark-set-first', - 'continuation-mark-set?', 'continuation-marks', - 'continuation-prompt-available?', 'continuation-prompt-tag?', - 'continuation?', 'copy-file', 'cos', - 'current-break-parameterization', 'current-code-inspector', - 'current-command-line-arguments', 'current-compile', - 'current-continuation-marks', 'current-custodian', - 'current-directory', 'current-drive', 'current-error-port', - 'current-eval', 'current-evt-pseudo-random-generator', - 'current-gc-milliseconds', 'current-get-interaction-input-port', - 'current-inexact-milliseconds', 'current-input-port', - 'current-inspector', 'current-library-collection-paths', - 'current-load', 'current-load-extension', - 'current-load-relative-directory', 'current-load/use-compiled', - 'current-locale', 'current-memory-use', 'current-milliseconds', - 'current-module-declare-name', 'current-module-declare-source', - 'current-module-name-resolver', 'current-namespace', - 'current-output-port', 'current-parameterization', - 'current-preserved-thread-cell-values', 'current-print', - 'current-process-milliseconds', 'current-prompt-read', - 'current-pseudo-random-generator', 'current-read-interaction', - 'current-reader-guard', 'current-readtable', 'current-seconds', - 'current-security-guard', 'current-subprocess-custodian-mode', - 'current-thread', 'current-thread-group', - 'current-thread-initial-stack-size', - 'current-write-relative-directory', 'custodian-box-value', - 'custodian-box?', 'custodian-limit-memory', - 'custodian-managed-list', 'custodian-memory-accounting-available?', - 'custodian-require-memory', 'custodian-shutdown-all', 'custodian?', - 'custom-print-quotable-accessor', 'custom-print-quotable?', - 'custom-write-accessor', 'custom-write?', 'date', 'date*', - 'date*-nanosecond', 'date*-time-zone-name', 'date*?', 'date-day', - 'date-dst?', 'date-hour', 'date-minute', 'date-month', - 'date-second', 'date-time-zone-offset', 'date-week-day', - 'date-year', 'date-year-day', 'date?', 'datum-intern-literal', - 'default-continuation-prompt-tag', 'delete-directory', - 'delete-file', 'denominator', 'directory-exists?', - 'directory-list', 'display', 'displayln', 'dump-memory-stats', - 'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-wind', - 'eof', 'eof-object?', 'ephemeron-value', 'ephemeron?', 'eprintf', - 'eq-hash-code', 'eq?', 'equal-hash-code', - 'equal-secondary-hash-code', 'equal?', 'equal?/recur', - 'eqv-hash-code', 'eqv?', 'error', 'error-display-handler', - 'error-escape-handler', 'error-print-context-length', - 'error-print-source-location', 'error-print-width', - 'error-value->string-handler', 'eval', 'eval-jit-enabled', - 'eval-syntax', 'even?', 'evt?', 'exact->inexact', 'exact-integer?', - 'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact?', - 'executable-yield-handler', 'exit', 'exit-handler', 'exn', - 'exn-continuation-marks', 'exn-message', 'exn:break', - 'exn:break-continuation', 'exn:break?', 'exn:fail', - 'exn:fail:contract', 'exn:fail:contract:arity', - 'exn:fail:contract:arity?', 'exn:fail:contract:continuation', - 'exn:fail:contract:continuation?', - 'exn:fail:contract:divide-by-zero', - 'exn:fail:contract:divide-by-zero?', - 'exn:fail:contract:non-fixnum-result', - 'exn:fail:contract:non-fixnum-result?', - 'exn:fail:contract:variable', 'exn:fail:contract:variable-id', - 'exn:fail:contract:variable?', 'exn:fail:contract?', - 'exn:fail:filesystem', 'exn:fail:filesystem:exists', - 'exn:fail:filesystem:exists?', 'exn:fail:filesystem:version', - 'exn:fail:filesystem:version?', 'exn:fail:filesystem?', - 'exn:fail:network', 'exn:fail:network?', 'exn:fail:out-of-memory', - 'exn:fail:out-of-memory?', 'exn:fail:read', - 'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?', - 'exn:fail:read:non-char', 'exn:fail:read:non-char?', - 'exn:fail:read?', 'exn:fail:syntax', 'exn:fail:syntax-exprs', - 'exn:fail:syntax:unbound', 'exn:fail:syntax:unbound?', - 'exn:fail:syntax?', 'exn:fail:unsupported', - 'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?', - 'exn:fail?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?', 'exp', - 'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once', - 'expand-syntax-to-top-form', 'expand-to-top-form', - 'expand-user-path', 'expt', 'file-exists?', - 'file-or-directory-identity', 'file-or-directory-modify-seconds', - 'file-or-directory-permissions', 'file-position', 'file-size', - 'file-stream-buffer-mode', 'file-stream-port?', - 'filesystem-root-list', 'find-executable-path', - 'find-library-collection-paths', 'find-system-path', 'fixnum?', - 'floating-point-bytes->real', 'flonum?', 'floor', 'flush-output', - 'for-each', 'force', 'format', 'fprintf', 'free-identifier=?', - 'gcd', 'generate-temporaries', 'gensym', 'get-output-bytes', - 'get-output-string', 'getenv', 'global-port-print-handler', - 'guard-evt', 'handle-evt', 'handle-evt?', 'hash', 'hash-equal?', - 'hash-eqv?', 'hash-has-key?', 'hash-placeholder?', 'hash-ref!', - 'hasheq', 'hasheqv', 'identifier-binding', - 'identifier-label-binding', 'identifier-prune-lexical-context', - 'identifier-prune-to-source-module', - 'identifier-remove-from-definition-context', - 'identifier-template-binding', 'identifier-transformer-binding', - 'identifier?', 'imag-part', 'immutable?', 'impersonate-box', - 'impersonate-hash', 'impersonate-procedure', 'impersonate-struct', - 'impersonate-vector', 'impersonator-of?', - 'impersonator-prop:application-mark', - 'impersonator-property-accessor-procedure?', - 'impersonator-property?', 'impersonator?', 'inexact->exact', - 'inexact-real?', 'inexact?', 'input-port?', 'inspector?', - 'integer->char', 'integer->integer-bytes', - 'integer-bytes->integer', 'integer-length', 'integer-sqrt', - 'integer-sqrt/remainder', 'integer?', - 'internal-definition-context-seal', 'internal-definition-context?', - 'keyword->string', 'keywordbytes', 'list->string', 'list->vector', 'list-ref', - 'list-tail', 'list?', 'load', 'load-extension', - 'load-on-demand-enabled', 'load-relative', - 'load-relative-extension', 'load/cd', 'load/use-compiled', - 'local-expand', 'local-expand/capture-lifts', - 'local-transformer-expand', - 'local-transformer-expand/capture-lifts', 'locale-string-encoding', - 'log', 'magnitude', 'make-arity-at-least', 'make-bytes', - 'make-channel', 'make-continuation-prompt-tag', 'make-custodian', - 'make-custodian-box', 'make-date', 'make-date*', - 'make-derived-parameter', 'make-directory', 'make-ephemeron', - 'make-exn', 'make-exn:break', 'make-exn:fail', - 'make-exn:fail:contract', 'make-exn:fail:contract:arity', - 'make-exn:fail:contract:continuation', - 'make-exn:fail:contract:divide-by-zero', - 'make-exn:fail:contract:non-fixnum-result', - 'make-exn:fail:contract:variable', 'make-exn:fail:filesystem', - 'make-exn:fail:filesystem:exists', - 'make-exn:fail:filesystem:version', 'make-exn:fail:network', - 'make-exn:fail:out-of-memory', 'make-exn:fail:read', - 'make-exn:fail:read:eof', 'make-exn:fail:read:non-char', - 'make-exn:fail:syntax', 'make-exn:fail:syntax:unbound', - 'make-exn:fail:unsupported', 'make-exn:fail:user', - 'make-file-or-directory-link', 'make-hash-placeholder', - 'make-hasheq-placeholder', 'make-hasheqv', - 'make-hasheqv-placeholder', 'make-immutable-hasheqv', - 'make-impersonator-property', 'make-input-port', 'make-inspector', - 'make-known-char-range-list', 'make-output-port', 'make-parameter', - 'make-pipe', 'make-placeholder', 'make-polar', - 'make-prefab-struct', 'make-pseudo-random-generator', - 'make-reader-graph', 'make-readtable', 'make-rectangular', - 'make-rename-transformer', 'make-resolved-module-path', - 'make-security-guard', 'make-semaphore', 'make-set!-transformer', - 'make-shared-bytes', 'make-sibling-inspector', - 'make-special-comment', 'make-srcloc', 'make-string', - 'make-struct-field-accessor', 'make-struct-field-mutator', - 'make-struct-type', 'make-struct-type-property', - 'make-syntax-delta-introducer', 'make-syntax-introducer', - 'make-thread-cell', 'make-thread-group', 'make-vector', - 'make-weak-box', 'make-weak-hasheqv', 'make-will-executor', 'map', - 'max', 'mcar', 'mcdr', 'mcons', 'member', 'memq', 'memv', 'min', - 'module->exports', 'module->imports', 'module->language-info', - 'module->namespace', 'module-compiled-exports', - 'module-compiled-imports', 'module-compiled-language-info', - 'module-compiled-name', 'module-path-index-join', - 'module-path-index-resolve', 'module-path-index-split', - 'module-path-index?', 'module-path?', 'module-predefined?', - 'module-provide-protected?', 'modulo', 'mpair?', 'nack-guard-evt', - 'namespace-attach-module', 'namespace-attach-module-declaration', - 'namespace-base-phase', 'namespace-mapped-symbols', - 'namespace-module-identifier', 'namespace-module-registry', - 'namespace-require', 'namespace-require/constant', - 'namespace-require/copy', 'namespace-require/expansion-time', - 'namespace-set-variable-value!', 'namespace-symbol->identifier', - 'namespace-syntax-introduce', 'namespace-undefine-variable!', - 'namespace-unprotect-module', 'namespace-variable-value', - 'namespace?', 'negative?', 'never-evt', 'newline', - 'normal-case-path', 'not', 'null', 'null?', 'number->string', - 'number?', 'numerator', 'object-name', 'odd?', 'open-input-bytes', - 'open-input-file', 'open-input-output-file', 'open-input-string', - 'open-output-bytes', 'open-output-file', 'open-output-string', - 'ormap', 'output-port?', 'pair?', 'parameter-procedure=?', - 'parameter?', 'parameterization?', 'path->bytes', - 'path->complete-path', 'path->directory-path', 'path->string', - 'path-add-suffix', 'path-convention-type', 'path-element->bytes', - 'path-element->string', 'path-for-some-system?', - 'path-list-string->path-list', 'path-replace-suffix', - 'path-string?', 'path?', 'peek-byte', 'peek-byte-or-special', - 'peek-bytes', 'peek-bytes!', 'peek-bytes-avail!', - 'peek-bytes-avail!*', 'peek-bytes-avail!/enable-break', - 'peek-char', 'peek-char-or-special', 'peek-string', 'peek-string!', - 'pipe-content-length', 'placeholder-get', 'placeholder-set!', - 'placeholder?', 'poll-guard-evt', 'port-closed-evt', - 'port-closed?', 'port-commit-peeked', 'port-count-lines!', - 'port-count-lines-enabled', 'port-display-handler', - 'port-file-identity', 'port-file-unlock', 'port-next-location', - 'port-print-handler', 'port-progress-evt', - 'port-provides-progress-evts?', 'port-read-handler', - 'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?', - 'port-writes-special?', 'port?', 'positive?', - 'prefab-key->struct-type', 'prefab-struct-key', 'pregexp', - 'pregexp?', 'primitive-closure?', 'primitive-result-arity', - 'primitive?', 'print', 'print-as-expression', - 'print-boolean-long-form', 'print-box', 'print-graph', - 'print-hash-table', 'print-mpair-curly-braces', - 'print-pair-curly-braces', 'print-reader-abbreviations', - 'print-struct', 'print-syntax-width', 'print-unreadable', - 'print-vector-length', 'printf', 'procedure->method', - 'procedure-arity', 'procedure-arity-includes?', 'procedure-arity?', - 'procedure-closure-contents-eq?', 'procedure-extract-target', - 'procedure-reduce-arity', 'procedure-rename', - 'procedure-struct-type?', 'procedure?', 'promise?', - 'prop:arity-string', 'prop:checked-procedure', - 'prop:custom-print-quotable', 'prop:custom-write', - 'prop:equal+hash', 'prop:evt', 'prop:exn:srclocs', - 'prop:impersonator-of', 'prop:input-port', - 'prop:liberal-define-context', 'prop:output-port', - 'prop:procedure', 'prop:rename-transformer', - 'prop:set!-transformer', 'pseudo-random-generator->vector', - 'pseudo-random-generator-vector?', 'pseudo-random-generator?', - 'putenv', 'quotient', 'quotient/remainder', 'raise', - 'raise-arity-error', 'raise-mismatch-error', 'raise-syntax-error', - 'raise-type-error', 'raise-user-error', 'random', 'random-seed', - 'rational?', 'rationalize', 'read', 'read-accept-bar-quote', - 'read-accept-box', 'read-accept-compiled', 'read-accept-dot', - 'read-accept-graph', 'read-accept-infix-dot', 'read-accept-lang', - 'read-accept-quasiquote', 'read-accept-reader', 'read-byte', - 'read-byte-or-special', 'read-bytes', 'read-bytes!', - 'read-bytes-avail!', 'read-bytes-avail!*', - 'read-bytes-avail!/enable-break', 'read-bytes-line', - 'read-case-sensitive', 'read-char', 'read-char-or-special', - 'read-curly-brace-as-paren', 'read-decimal-as-inexact', - 'read-eval-print-loop', 'read-language', 'read-line', - 'read-on-demand-source', 'read-square-bracket-as-paren', - 'read-string', 'read-string!', 'read-syntax', - 'read-syntax/recursive', 'read/recursive', 'readtable-mapping', - 'readtable?', 'real->double-flonum', 'real->floating-point-bytes', - 'real->single-flonum', 'real-part', 'real?', 'regexp', - 'regexp-match', 'regexp-match-peek', 'regexp-match-peek-immediate', - 'regexp-match-peek-positions', - 'regexp-match-peek-positions-immediate', - 'regexp-match-peek-positions-immediate/end', - 'regexp-match-peek-positions/end', 'regexp-match-positions', - 'regexp-match-positions/end', 'regexp-match/end', 'regexp-match?', - 'regexp-max-lookbehind', 'regexp-replace', 'regexp-replace*', - 'regexp?', 'relative-path?', 'remainder', - 'rename-file-or-directory', 'rename-transformer-target', - 'rename-transformer?', 'resolve-path', 'resolved-module-path-name', - 'resolved-module-path?', 'reverse', 'round', 'seconds->date', - 'security-guard?', 'semaphore-peek-evt', 'semaphore-post', - 'semaphore-try-wait?', 'semaphore-wait', - 'semaphore-wait/enable-break', 'semaphore?', - 'set!-transformer-procedure', 'set!-transformer?', 'set-box!', - 'set-mcar!', 'set-mcdr!', 'set-port-next-location!', - 'shared-bytes', 'shell-execute', 'simplify-path', 'sin', - 'single-flonum?', 'sleep', 'special-comment-value', - 'special-comment?', 'split-path', 'sqrt', 'srcloc', - 'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source', - 'srcloc-span', 'srcloc?', 'string', 'string->bytes/latin-1', - 'string->bytes/locale', 'string->bytes/utf-8', - 'string->immutable-string', 'string->keyword', 'string->list', - 'string->number', 'string->path', 'string->path-element', - 'string->symbol', 'string->uninterned-symbol', - 'string->unreadable-symbol', 'string-append', 'string-ci<=?', - 'string-ci=?', 'string-ci>?', - 'string-copy', 'string-copy!', 'string-downcase', 'string-fill!', - 'string-foldcase', 'string-length', 'string-locale-ci?', - 'string-locale-downcase', 'string-locale-upcase', - 'string-locale?', - 'string-normalize-nfc', 'string-normalize-nfd', - 'string-normalize-nfkc', 'string-normalize-nfkd', 'string-ref', - 'string-set!', 'string-titlecase', 'string-upcase', - 'string-utf-8-length', 'string<=?', 'string=?', 'string>?', 'string?', 'struct->vector', - 'struct-accessor-procedure?', 'struct-constructor-procedure?', - 'struct-info', 'struct-mutator-procedure?', - 'struct-predicate-procedure?', 'struct-type-info', - 'struct-type-make-constructor', 'struct-type-make-predicate', - 'struct-type-property-accessor-procedure?', - 'struct-type-property?', 'struct-type?', 'struct:arity-at-least', - 'struct:date', 'struct:date*', 'struct:exn', 'struct:exn:break', - 'struct:exn:fail', 'struct:exn:fail:contract', - 'struct:exn:fail:contract:arity', - 'struct:exn:fail:contract:continuation', - 'struct:exn:fail:contract:divide-by-zero', - 'struct:exn:fail:contract:non-fixnum-result', - 'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem', - 'struct:exn:fail:filesystem:exists', - 'struct:exn:fail:filesystem:version', 'struct:exn:fail:network', - 'struct:exn:fail:out-of-memory', 'struct:exn:fail:read', - 'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char', - 'struct:exn:fail:syntax', 'struct:exn:fail:syntax:unbound', - 'struct:exn:fail:unsupported', 'struct:exn:fail:user', - 'struct:srcloc', 'struct?', 'sub1', 'subbytes', 'subprocess', - 'subprocess-group-enabled', 'subprocess-kill', 'subprocess-pid', - 'subprocess-status', 'subprocess-wait', 'subprocess?', 'substring', - 'symbol->string', 'symbol-interned?', 'symbol-unreadable?', - 'symbol?', 'sync', 'sync/enable-break', 'sync/timeout', - 'sync/timeout/enable-break', 'syntax->list', 'syntax-arm', - 'syntax-column', 'syntax-disarm', 'syntax-e', 'syntax-line', - 'syntax-local-bind-syntaxes', 'syntax-local-certifier', - 'syntax-local-context', 'syntax-local-expand-expression', - 'syntax-local-get-shadower', 'syntax-local-introduce', - 'syntax-local-lift-context', 'syntax-local-lift-expression', - 'syntax-local-lift-module-end-declaration', - 'syntax-local-lift-provide', 'syntax-local-lift-require', - 'syntax-local-lift-values-expression', - 'syntax-local-make-definition-context', - 'syntax-local-make-delta-introducer', - 'syntax-local-module-defined-identifiers', - 'syntax-local-module-exports', - 'syntax-local-module-required-identifiers', 'syntax-local-name', - 'syntax-local-phase-level', - 'syntax-local-transforming-module-provides?', 'syntax-local-value', - 'syntax-local-value/immediate', 'syntax-original?', - 'syntax-position', 'syntax-property', - 'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm', - 'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source', - 'syntax-source-module', 'syntax-span', 'syntax-taint', - 'syntax-tainted?', 'syntax-track-origin', - 'syntax-transforming-module-expression?', 'syntax-transforming?', - 'syntax?', 'system-big-endian?', 'system-idle-evt', - 'system-language+country', 'system-library-subpath', - 'system-path-convention-type', 'system-type', 'tan', - 'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt', - 'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses', - 'tcp-close', 'tcp-connect', 'tcp-connect/enable-break', - 'tcp-listen', 'tcp-listener?', 'tcp-port?', 'terminal-port?', - 'thread', 'thread-cell-ref', 'thread-cell-set!', 'thread-cell?', - 'thread-dead-evt', 'thread-dead?', 'thread-group?', - 'thread-resume', 'thread-resume-evt', 'thread-rewind-receive', - 'thread-running?', 'thread-suspend', 'thread-suspend-evt', - 'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply', - 'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?', - 'udp-close', 'udp-connect!', 'udp-connected?', 'udp-open-socket', - 'udp-receive!', 'udp-receive!*', 'udp-receive!-evt', - 'udp-receive!/enable-break', 'udp-receive-ready-evt', 'udp-send', - 'udp-send*', 'udp-send-evt', 'udp-send-ready-evt', 'udp-send-to', - 'udp-send-to*', 'udp-send-to-evt', 'udp-send-to/enable-break', - 'udp-send/enable-break', 'udp?', 'unbox', - 'uncaught-exception-handler', 'use-collection-link-paths', - 'use-compiled-file-paths', 'use-user-specific-search-paths', - 'values', 'variable-reference->empty-namespace', - 'variable-reference->module-base-phase', - 'variable-reference->module-declaration-inspector', - 'variable-reference->module-source', - 'variable-reference->namespace', 'variable-reference->phase', - 'variable-reference->resolved-module-path', - 'variable-reference-constant?', 'variable-reference?', 'vector', - 'vector->immutable-vector', 'vector->list', - 'vector->pseudo-random-generator', - 'vector->pseudo-random-generator!', 'vector->values', - 'vector-fill!', 'vector-immutable', 'vector-length', 'vector-ref', - 'vector-set!', 'vector-set-performance-stats!', 'vector?', - 'version', 'void', 'void?', 'weak-box-value', 'weak-box?', - 'will-execute', 'will-executor?', 'will-register', - 'will-try-execute', 'with-input-from-file', 'with-output-to-file', - 'wrap-evt', 'write', 'write-byte', 'write-bytes', - 'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt', - 'write-bytes-avail/enable-break', 'write-char', 'write-special', - 'write-special-avail*', 'write-special-evt', 'write-string', 'zero?' - ] - - # From SchemeLexer - valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+' - - tokens = { - 'root' : [ - (r';.*$', Comment.Single), - (r'#\|[^|]+\|#', Comment.Multiline), - - # whitespaces - usually not relevant - (r'\s+', Text), - - ## numbers: Keep in mind Racket reader hash prefixes, - ## which can denote the base or the type. These don't map - ## neatly onto pygments token types; some judgment calls - ## here. Note that none of these regexps attempt to - ## exclude identifiers that start with a number, such as a - ## variable named "100-Continue". - - # #b - (r'#b[-+]?[01]+\.[01]+', Number.Float), - (r'#b[01]+e[-+]?[01]+', Number.Float), - (r'#b[-+]?[01]/[01]+', Number), - (r'#b[-+]?[01]+', Number.Integer), - (r'#b\S*', Error), - - # #d OR no hash prefix - (r'(#d)?[-+]?\d+\.\d+', Number.Float), - (r'(#d)?\d+e[-+]?\d+', Number.Float), - (r'(#d)?[-+]?\d+/\d+', Number), - (r'(#d)?[-+]?\d+', Number.Integer), - (r'#d\S*', Error), - - # #e - (r'#e[-+]?\d+\.\d+', Number.Float), - (r'#e\d+e[-+]?\d+', Number.Float), - (r'#e[-+]?\d+/\d+', Number), - (r'#e[-+]?\d+', Number), - (r'#e\S*', Error), - - # #i is always inexact-real, i.e. float - (r'#i[-+]?\d+\.\d+', Number.Float), - (r'#i\d+e[-+]?\d+', Number.Float), - (r'#i[-+]?\d+/\d+', Number.Float), - (r'#i[-+]?\d+', Number.Float), - (r'#i\S*', Error), - - # #o - (r'#o[-+]?[0-7]+\.[0-7]+', Number.Oct), - (r'#o[0-7]+e[-+]?[0-7]+', Number.Oct), - (r'#o[-+]?[0-7]+/[0-7]+', Number.Oct), - (r'#o[-+]?[0-7]+', Number.Oct), - (r'#o\S*', Error), - - # #x - (r'#x[-+]?[0-9a-fA-F]+\.[0-9a-fA-F]+', Number.Hex), - # the exponent variation (e.g. #x1e1) is N/A - (r'#x[-+]?[0-9a-fA-F]+/[0-9a-fA-F]+', Number.Hex), - (r'#x[-+]?[0-9a-fA-F]+', Number.Hex), - (r'#x\S*', Error), - - - # strings, symbols and characters - (r'"(\\\\|\\"|[^"])*"', String), - (r"'" + valid_name, String.Symbol), - (r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char), - (r'#rx".+"', String.Regex), - (r'#px".+"', String.Regex), - - # constants - (r'(#t|#f)', Name.Constant), - - # keyword argument names (e.g. #:keyword) - (r'#:\S+', Keyword.Declaration), - - # #lang - (r'#lang \S+', Keyword.Namespace), - - # special operators - (r"('|#|`|,@|,|\.)", Operator), - - # highlight the keywords - ('(%s)' % '|'.join([ - re.escape(entry) + ' ' for entry in keywords]), - Keyword - ), - - # first variable in a quoted string like - # '(this is syntactic sugar) - (r"(?<='\()" + valid_name, Name.Variable), - (r"(?<=#\()" + valid_name, Name.Variable), - - # highlight the builtins - ("(?<=\()(%s)" % '|'.join([ - re.escape(entry) + ' ' for entry in builtins]), - Name.Builtin - ), - - # the remaining functions; handle both ( and [ - (r'(?<=(\(|\[|\{))' + valid_name, Name.Function), - - # find the remaining variables - (valid_name, Name.Variable), - - # the famous parentheses! - (r'(\(|\)|\[|\]|\{|\})', Punctuation), - ], - } - - -class SchemeLexer(RegexLexer): - """ - A Scheme lexer, parsing a stream and outputting the tokens - needed to highlight scheme code. - This lexer could be most probably easily subclassed to parse - other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp. - - This parser is checked with pastes from the LISP pastebin - at http://paste.lisp.org/ to cover as much syntax as possible. - - It supports the full Scheme syntax as defined in R5RS. - - *New in Pygments 0.6.* - """ - name = 'Scheme' - aliases = ['scheme', 'scm'] - filenames = ['*.scm', '*.ss'] - mimetypes = ['text/x-scheme', 'application/x-scheme'] - - # list of known keywords and builtins taken form vim 6.4 scheme.vim - # syntax file. - keywords = [ - 'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let', - 'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote', - 'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax', - 'let-syntax', 'letrec-syntax', 'syntax-rules' - ] - builtins = [ - '*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle', - 'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan', - 'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', - 'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', - 'cadr', 'call-with-current-continuation', 'call-with-input-file', - 'call-with-output-file', 'call-with-values', 'call/cc', 'car', - 'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', - 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', - 'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?', - 'char-ci=?', 'char-ci>?', 'char-downcase', - 'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase', - 'char-upper-case?', 'char-whitespace?', 'char<=?', 'char=?', 'char>?', 'char?', 'close-input-port', 'close-output-port', - 'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port', - 'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?', - 'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp', - 'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part', - 'inexact->exact', 'inexact?', 'input-port?', 'integer->char', - 'integer?', 'interaction-environment', 'lcm', 'length', 'list', - 'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?', - 'load', 'log', 'magnitude', 'make-polar', 'make-rectangular', - 'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv', - 'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment', - 'null?', 'number->string', 'number?', 'numerator', 'odd?', - 'open-input-file', 'open-output-file', 'output-port?', 'pair?', - 'peek-char', 'port?', 'positive?', 'procedure?', 'quotient', - 'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?', - 'remainder', 'reverse', 'round', 'scheme-report-environment', - 'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list', - 'string->number', 'string->symbol', 'string-append', 'string-ci<=?', - 'string-ci=?', 'string-ci>?', - 'string-copy', 'string-fill!', 'string-length', 'string-ref', - 'string-set!', 'string<=?', 'string=?', - 'string>?', 'string?', 'substring', 'symbol->string', 'symbol?', - 'tan', 'transcript-off', 'transcript-on', 'truncate', 'values', - 'vector', 'vector->list', 'vector-fill!', 'vector-length', - 'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file', - 'with-output-to-file', 'write', 'write-char', 'zero?' - ] - - # valid names for identifiers - # well, names can only not consist fully of numbers - # but this should be good enough for now - valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+' - - tokens = { - 'root' : [ - # the comments - always starting with semicolon - # and going to the end of the line - (r';.*$', Comment.Single), - - # whitespaces - usually not relevant - (r'\s+', Text), - - # numbers - (r'-?\d+\.\d+', Number.Float), - (r'-?\d+', Number.Integer), - # support for uncommon kinds of numbers - - # have to figure out what the characters mean - #(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number), - - # strings, symbols and characters - (r'"(\\\\|\\"|[^"])*"', String), - (r"'" + valid_name, String.Symbol), - (r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char), - - # constants - (r'(#t|#f)', Name.Constant), - - # special operators - (r"('|#|`|,@|,|\.)", Operator), - - # highlight the keywords - ('(%s)' % '|'.join([ - re.escape(entry) + ' ' for entry in keywords]), - Keyword - ), - - # first variable in a quoted string like - # '(this is syntactic sugar) - (r"(?<='\()" + valid_name, Name.Variable), - (r"(?<=#\()" + valid_name, Name.Variable), - - # highlight the builtins - ("(?<=\()(%s)" % '|'.join([ - re.escape(entry) + ' ' for entry in builtins]), - Name.Builtin - ), - - # the remaining functions - (r'(?<=\()' + valid_name, Name.Function), - # find the remaining variables - (valid_name, Name.Variable), - - # the famous parentheses! - (r'(\(|\))', Punctuation), - (r'(\[|\])', Punctuation), - ], - } - - -class CommonLispLexer(RegexLexer): - """ - A Common Lisp lexer. - - *New in Pygments 0.9.* - """ - name = 'Common Lisp' - aliases = ['common-lisp', 'cl', 'lisp'] - filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too - mimetypes = ['text/x-common-lisp'] - - flags = re.IGNORECASE | re.MULTILINE - - ### couple of useful regexes - - # characters that are not macro-characters and can be used to begin a symbol - nonmacro = r'\\.|[a-zA-Z0-9!$%&*+-/<=>?@\[\]^_{}~]' - constituent = nonmacro + '|[#.:]' - terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters - - ### symbol token, reverse-engineered from hyperspec - # Take a deep breath... - symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent) - - def __init__(self, **options): - from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \ - SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \ - BUILTIN_TYPES, BUILTIN_CLASSES - self.builtin_function = BUILTIN_FUNCTIONS - self.special_forms = SPECIAL_FORMS - self.macros = MACROS - self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS - self.declarations = DECLARATIONS - self.builtin_types = BUILTIN_TYPES - self.builtin_classes = BUILTIN_CLASSES - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - stack = ['root'] - for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack): - if token is Name.Variable: - if value in self.builtin_function: - yield index, Name.Builtin, value - continue - if value in self.special_forms: - yield index, Keyword, value - continue - if value in self.macros: - yield index, Name.Builtin, value - continue - if value in self.lambda_list_keywords: - yield index, Keyword, value - continue - if value in self.declarations: - yield index, Keyword, value - continue - if value in self.builtin_types: - yield index, Keyword.Type, value - continue - if value in self.builtin_classes: - yield index, Name.Class, value - continue - yield index, token, value - - tokens = { - 'root' : [ - ('', Text, 'body'), - ], - 'multiline-comment' : [ - (r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19) - (r'\|#', Comment.Multiline, '#pop'), - (r'[^|#]+', Comment.Multiline), - (r'[|#]', Comment.Multiline), - ], - 'commented-form' : [ - (r'\(', Comment.Preproc, '#push'), - (r'\)', Comment.Preproc, '#pop'), - (r'[^()]+', Comment.Preproc), - ], - 'body' : [ - # whitespace - (r'\s+', Text), - - # single-line comment - (r';.*$', Comment.Single), - - # multi-line comment - (r'#\|', Comment.Multiline, 'multiline-comment'), - - # encoding comment (?) - (r'#\d*Y.*$', Comment.Special), - - # strings and characters - (r'"(\\.|\\\n|[^"\\])*"', String), - # quoting - (r":" + symbol, String.Symbol), - (r"::" + symbol, String.Symbol), - (r":#" + symbol, String.Symbol), - (r"'" + symbol, String.Symbol), - (r"'", Operator), - (r"`", Operator), - - # decimal numbers - (r'[-+]?\d+\.?' + terminated, Number.Integer), - (r'[-+]?\d+/\d+' + terminated, Number), - (r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' \ - + terminated, Number.Float), - - # sharpsign strings and characters - (r"#\\." + terminated, String.Char), - (r"#\\" + symbol, String.Char), - - # vector - (r'#\(', Operator, 'body'), - - # bitstring - (r'#\d*\*[01]*', Literal.Other), - - # uninterned symbol - (r'#:' + symbol, String.Symbol), - - # read-time and load-time evaluation - (r'#[.,]', Operator), - - # function shorthand - (r'#\'', Name.Function), - - # binary rational - (r'#[bB][+-]?[01]+(/[01]+)?', Number), - - # octal rational - (r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct), - - # hex rational - (r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex), - - # radix rational - (r'#\d+[rR][+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number), - - # complex - (r'(#[cC])(\()', bygroups(Number, Punctuation), 'body'), - - # array - (r'(#\d+[aA])(\()', bygroups(Literal.Other, Punctuation), 'body'), - - # structure - (r'(#[sS])(\()', bygroups(Literal.Other, Punctuation), 'body'), - - # path - (r'#[pP]?"(\\.|[^"])*"', Literal.Other), - - # reference - (r'#\d+=', Operator), - (r'#\d+#', Operator), - - # read-time comment - (r'#+nil' + terminated + '\s*\(', Comment.Preproc, 'commented-form'), - - # read-time conditional - (r'#[+-]', Operator), - - # special operators that should have been parsed already - (r'(,@|,|\.)', Operator), - - # special constants - (r'(t|nil)' + terminated, Name.Constant), - - # functions and variables - (r'\*' + symbol + '\*', Name.Variable.Global), - (symbol, Name.Variable), - - # parentheses - (r'\(', Punctuation, 'body'), - (r'\)', Punctuation, '#pop'), - ], - } - - -class HaskellLexer(RegexLexer): - """ - A Haskell lexer based on the lexemes defined in the Haskell 98 Report. - - *New in Pygments 0.8.* - """ - name = 'Haskell' - aliases = ['haskell', 'hs'] - filenames = ['*.hs'] - mimetypes = ['text/x-haskell'] - - reserved = ['case','class','data','default','deriving','do','else', - 'if','in','infix[lr]?','instance', - 'let','newtype','of','then','type','where','_'] - ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK', - 'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE', - 'DC[1-4]','NAK','SYN','ETB','CAN', - 'EM','SUB','ESC','[FGRU]S','SP','DEL'] - - tokens = { - 'root': [ - # Whitespace: - (r'\s+', Text), - #(r'--\s*|.*$', Comment.Doc), - (r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single), - (r'{-', Comment.Multiline, 'comment'), - # Lexemes: - # Identifiers - (r'\bimport\b', Keyword.Reserved, 'import'), - (r'\bmodule\b', Keyword.Reserved, 'module'), - (r'\berror\b', Name.Exception), - (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), - (r'^[_a-z][\w\']*', Name.Function), - (r"'?[_a-z][\w']*", Name), - (r"('')?[A-Z][\w\']*", Keyword.Type), - # Operators - (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator - (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials - (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators - (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators - # Numbers - (r'\d+[eE][+-]?\d+', Number.Float), - (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), - (r'0[oO][0-7]+', Number.Oct), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'\d+', Number.Integer), - # Character/String Literals - (r"'", String.Char, 'character'), - (r'"', String, 'string'), - # Special - (r'\[\]', Keyword.Type), - (r'\(\)', Name.Builtin), - (r'[][(),;`{}]', Punctuation), - ], - 'import': [ - # Import statements - (r'\s+', Text), - (r'"', String, 'string'), - # after "funclist" state - (r'\)', Punctuation, '#pop'), - (r'qualified\b', Keyword), - # import X as Y - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(as)(\s+)([A-Z][a-zA-Z0-9_.]*)', - bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'), - # import X hiding (functions) - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(hiding)(\s+)(\()', - bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'), - # import X (functions) - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()', - bygroups(Name.Namespace, Text, Punctuation), 'funclist'), - # import X - (r'[a-zA-Z0-9_.]+', Name.Namespace, '#pop'), - ], - 'module': [ - (r'\s+', Text), - (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()', - bygroups(Name.Namespace, Text, Punctuation), 'funclist'), - (r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'), - ], - 'funclist': [ - (r'\s+', Text), - (r'[A-Z][a-zA-Z0-9_]*', Keyword.Type), - (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), - (r'--.*$', Comment.Single), - (r'{-', Comment.Multiline, 'comment'), - (r',', Punctuation), - (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), - # (HACK, but it makes sense to push two instances, believe me) - (r'\(', Punctuation, ('funclist', 'funclist')), - (r'\)', Punctuation, '#pop:2'), - ], - # NOTE: the next four states are shared in the AgdaLexer; make sure - # any change is compatible with Agda as well or copy over and change - 'comment': [ - # Multiline Comments - (r'[^-{}]+', Comment.Multiline), - (r'{-', Comment.Multiline, '#push'), - (r'-}', Comment.Multiline, '#pop'), - (r'[-{}]', Comment.Multiline), - ], - 'character': [ - # Allows multi-chars, incorrectly. - (r"[^\\']", String.Char), - (r"\\", String.Escape, 'escape'), - ("'", String.Char, '#pop'), - ], - 'string': [ - (r'[^\\"]+', String), - (r"\\", String.Escape, 'escape'), - ('"', String, '#pop'), - ], - 'escape': [ - (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), - (r'\^[][A-Z@\^_]', String.Escape, '#pop'), - ('|'.join(ascii), String.Escape, '#pop'), - (r'o[0-7]+', String.Escape, '#pop'), - (r'x[\da-fA-F]+', String.Escape, '#pop'), - (r'\d+', String.Escape, '#pop'), - (r'\s+\\', String.Escape, '#pop'), - ], - } - - -class AgdaLexer(RegexLexer): - """ - For the `Agda `_ - dependently typed functional programming language and proof assistant. - - *New in Pygments 1.7.* - """ - - name = 'Agda' - aliases = ['agda'] - filenames = ['*.agda'] - mimetypes = ['text/x-agda'] - - reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data', - 'field', 'forall', 'hiding', 'in', 'inductive', 'infix', - 'infixl', 'infixr', 'let', 'open', 'pattern', 'primitive', - 'private', 'mutual', 'quote', 'quoteGoal', 'quoteTerm', - 'record', 'syntax', 'rewrite', 'unquote', 'using', 'where', - 'with'] - - tokens = { - 'root': [ - # Declaration - (r'^(\s*)([^\s\(\)\{\}]+)(\s*)(:)(\s*)', - bygroups(Text, Name.Function, Text, Operator.Word, Text)), - # Comments - (r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single), - (r'{-', Comment.Multiline, 'comment'), - # Holes - (r'{!', Comment.Directive, 'hole'), - # Lexemes: - # Identifiers - (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), - (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'), - (r'\b(Set|Prop)\b', Keyword.Type), - # Special Symbols - (r'(\(|\)|\{|\})', Operator), - (r'(\.{1,3}|\||[\u039B]|[\u2200]|[\u2192]|:|=|->)', Operator.Word), - # Numbers - (r'\d+[eE][+-]?\d+', Number.Float), - (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'\d+', Number.Integer), - # Strings - (r"'", String.Char, 'character'), - (r'"', String, 'string'), - (r'[^\s\(\)\{\}]+', Text), - (r'\s+?', Text), # Whitespace - ], - 'hole': [ - # Holes - (r'[^!{}]+', Comment.Directive), - (r'{!', Comment.Directive, '#push'), - (r'!}', Comment.Directive, '#pop'), - (r'[!{}]', Comment.Directive), - ], - 'module': [ - (r'{-', Comment.Multiline, 'comment'), - (r'[a-zA-Z][a-zA-Z0-9_.]*', Name, '#pop'), - (r'[^a-zA-Z]*', Text) - ], - 'comment': HaskellLexer.tokens['comment'], - 'character': HaskellLexer.tokens['character'], - 'string': HaskellLexer.tokens['string'], - 'escape': HaskellLexer.tokens['escape'] - } - - -class LiterateLexer(Lexer): - """ - Base class for lexers of literate file formats based on LaTeX or Bird-style - (prefixing each code line with ">"). - - Additional options accepted: - - `litstyle` - If given, must be ``"bird"`` or ``"latex"``. If not given, the style - is autodetected: if the first non-whitespace character in the source - is a backslash or percent character, LaTeX is assumed, else Bird. - """ - - bird_re = re.compile(r'(>[ \t]*)(.*\n)') - - def __init__(self, baselexer, **options): - self.baselexer = baselexer - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - style = self.options.get('litstyle') - if style is None: - style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird' - - code = '' - insertions = [] - if style == 'bird': - # bird-style - for match in line_re.finditer(text): - line = match.group() - m = self.bird_re.match(line) - if m: - insertions.append((len(code), - [(0, Comment.Special, m.group(1))])) - code += m.group(2) - else: - insertions.append((len(code), [(0, Text, line)])) - else: - # latex-style - from pygments.lexers.text import TexLexer - lxlexer = TexLexer(**self.options) - codelines = 0 - latex = '' - for match in line_re.finditer(text): - line = match.group() - if codelines: - if line.lstrip().startswith('\\end{code}'): - codelines = 0 - latex += line - else: - code += line - elif line.lstrip().startswith('\\begin{code}'): - codelines = 1 - latex += line - insertions.append((len(code), - list(lxlexer.get_tokens_unprocessed(latex)))) - latex = '' - else: - latex += line - insertions.append((len(code), - list(lxlexer.get_tokens_unprocessed(latex)))) - for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)): - yield item - - -class LiterateHaskellLexer(LiterateLexer): - """ - For Literate Haskell (Bird-style or LaTeX) source. - - Additional options accepted: - - `litstyle` - If given, must be ``"bird"`` or ``"latex"``. If not given, the style - is autodetected: if the first non-whitespace character in the source - is a backslash or percent character, LaTeX is assumed, else Bird. - - *New in Pygments 0.9.* - """ - name = 'Literate Haskell' - aliases = ['lhs', 'literate-haskell', 'lhaskell'] - filenames = ['*.lhs'] - mimetypes = ['text/x-literate-haskell'] - - def __init__(self, **options): - hslexer = HaskellLexer(**options) - LiterateLexer.__init__(self, hslexer, **options) - - -class LiterateAgdaLexer(LiterateLexer): - """ - For Literate Agda source. - - Additional options accepted: - - `litstyle` - If given, must be ``"bird"`` or ``"latex"``. If not given, the style - is autodetected: if the first non-whitespace character in the source - is a backslash or percent character, LaTeX is assumed, else Bird. - - *New in Pygments 1.7.* - """ - name = 'Literate Agda' - aliases = ['lagda', 'literate-agda'] - filenames = ['*.lagda'] - mimetypes = ['text/x-literate-agda'] - - def __init__(self, **options): - agdalexer = AgdaLexer(**options) - LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options) - - -class SMLLexer(RegexLexer): - """ - For the Standard ML language. - - *New in Pygments 1.5.* - """ - - name = 'Standard ML' - aliases = ['sml'] - filenames = ['*.sml', '*.sig', '*.fun',] - mimetypes = ['text/x-standardml', 'application/x-standardml'] - - alphanumid_reserved = [ - # Core - 'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else', - 'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix', - 'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse', - 'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while', - # Modules - 'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature', - 'struct', 'structure', 'where', - ] - - symbolicid_reserved = [ - # Core - ':', '\|', '=', '=>', '->', '#', - # Modules - ':>', - ] - - nonid_reserved = [ '(', ')', '[', ']', '{', '}', ',', ';', '...', '_' ] - - alphanumid_re = r"[a-zA-Z][a-zA-Z0-9_']*" - symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+" - - # A character constant is a sequence of the form #s, where s is a string - # constant denoting a string of size one character. This setup just parses - # the entire string as either a String.Double or a String.Char (depending - # on the argument), even if the String.Char is an erronous - # multiple-character string. - def stringy (whatkind): - return [ - (r'[^"\\]', whatkind), - (r'\\[\\\"abtnvfr]', String.Escape), - # Control-character notation is used for codes < 32, - # where \^@ == \000 - (r'\\\^[\x40-\x5e]', String.Escape), - # Docs say 'decimal digits' - (r'\\[0-9]{3}', String.Escape), - (r'\\u[0-9a-fA-F]{4}', String.Escape), - (r'\\\s+\\', String.Interpol), - (r'"', whatkind, '#pop'), - ] - - # Callbacks for distinguishing tokens and reserved words - def long_id_callback(self, match): - if match.group(1) in self.alphanumid_reserved: token = Error - else: token = Name.Namespace - yield match.start(1), token, match.group(1) - yield match.start(2), Punctuation, match.group(2) - - def end_id_callback(self, match): - if match.group(1) in self.alphanumid_reserved: token = Error - elif match.group(1) in self.symbolicid_reserved: token = Error - else: token = Name - yield match.start(1), token, match.group(1) - - def id_callback(self, match): - str = match.group(1) - if str in self.alphanumid_reserved: token = Keyword.Reserved - elif str in self.symbolicid_reserved: token = Punctuation - else: token = Name - yield match.start(1), token, str - - tokens = { - # Whitespace and comments are (almost) everywhere - 'whitespace': [ - (r'\s+', Text), - (r'\(\*', Comment.Multiline, 'comment'), - ], - - 'delimiters': [ - # This lexer treats these delimiters specially: - # Delimiters define scopes, and the scope is how the meaning of - # the `|' is resolved - is it a case/handle expression, or function - # definition by cases? (This is not how the Definition works, but - # it's how MLton behaves, see http://mlton.org/SMLNJDeviations) - (r'\(|\[|{', Punctuation, 'main'), - (r'\)|\]|}', Punctuation, '#pop'), - (r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')), - (r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'), - (r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'), - ], - - 'core': [ - # Punctuation that doesn't overlap symbolic identifiers - (r'(%s)' % '|'.join([re.escape(z) for z in nonid_reserved]), - Punctuation), - - # Special constants: strings, floats, numbers in decimal and hex - (r'#"', String.Char, 'char'), - (r'"', String.Double, 'string'), - (r'~?0x[0-9a-fA-F]+', Number.Hex), - (r'0wx[0-9a-fA-F]+', Number.Hex), - (r'0w\d+', Number.Integer), - (r'~?\d+\.\d+[eE]~?\d+', Number.Float), - (r'~?\d+\.\d+', Number.Float), - (r'~?\d+[eE]~?\d+', Number.Float), - (r'~?\d+', Number.Integer), - - # Labels - (r'#\s*[1-9][0-9]*', Name.Label), - (r'#\s*(%s)' % alphanumid_re, Name.Label), - (r'#\s+(%s)' % symbolicid_re, Name.Label), - # Some reserved words trigger a special, local lexer state change - (r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'), - (r'(?=\b(exception)\b(?!\'))', Text, ('ename')), - (r'\b(functor|include|open|signature|structure)\b(?!\')', - Keyword.Reserved, 'sname'), - (r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'), - - # Regular identifiers, long and otherwise - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"), - (r'(%s)' % alphanumid_re, id_callback), - (r'(%s)' % symbolicid_re, id_callback), - ], - 'dotted': [ - (r'(%s)(\.)' % alphanumid_re, long_id_callback), - (r'(%s)' % alphanumid_re, end_id_callback, "#pop"), - (r'(%s)' % symbolicid_re, end_id_callback, "#pop"), - (r'\s+', Error), - (r'\S+', Error), - ], - - - # Main parser (prevents errors in files that have scoping errors) - 'root': [ (r'', Text, 'main') ], - - # In this scope, I expect '|' to not be followed by a function name, - # and I expect 'and' to be followed by a binding site - 'main': [ - include('whitespace'), - - # Special behavior of val/and/fun - (r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'), - (r'\b(fun)\b(?!\')', Keyword.Reserved, - ('#pop', 'main-fun', 'fname')), - - include('delimiters'), - include('core'), - (r'\S+', Error), - ], - - # In this scope, I expect '|' and 'and' to be followed by a function - 'main-fun': [ - include('whitespace'), - - (r'\s', Text), - (r'\(\*', Comment.Multiline, 'comment'), - - # Special behavior of val/and/fun - (r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'), - (r'\b(val)\b(?!\')', Keyword.Reserved, - ('#pop', 'main', 'vname')), - - # Special behavior of '|' and '|'-manipulating keywords - (r'\|', Punctuation, 'fname'), - (r'\b(case|handle)\b(?!\')', Keyword.Reserved, - ('#pop', 'main')), - - include('delimiters'), - include('core'), - (r'\S+', Error), - ], - - # Character and string parsers - 'char': stringy(String.Char), - 'string': stringy(String.Double), - - 'breakout': [ - (r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'), - ], - - # Dealing with what comes after module system keywords - 'sname': [ - include('whitespace'), - include('breakout'), - - (r'(%s)' % alphanumid_re, Name.Namespace), - (r'', Text, '#pop'), - ], - - # Dealing with what comes after the 'fun' (or 'and' or '|') keyword - 'fname': [ - include('whitespace'), - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - - (r'(%s)' % alphanumid_re, Name.Function, '#pop'), - (r'(%s)' % symbolicid_re, Name.Function, '#pop'), - - # Ignore interesting function declarations like "fun (x + y) = ..." - (r'', Text, '#pop'), - ], - - # Dealing with what comes after the 'val' (or 'and') keyword - 'vname': [ - include('whitespace'), - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - - (r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re), - bygroups(Name.Variable, Text, Punctuation), '#pop'), - (r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re), - bygroups(Name.Variable, Text, Punctuation), '#pop'), - (r'(%s)' % alphanumid_re, Name.Variable, '#pop'), - (r'(%s)' % symbolicid_re, Name.Variable, '#pop'), - - # Ignore interesting patterns like 'val (x, y)' - (r'', Text, '#pop'), - ], - - # Dealing with what comes after the 'type' (or 'and') keyword - 'tname': [ - include('whitespace'), - include('breakout'), - - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - (r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')), - - (r'(%s)' % alphanumid_re, Keyword.Type), - (r'(%s)' % symbolicid_re, Keyword.Type), - (r'\S+', Error, '#pop'), - ], - - # A type binding includes most identifiers - 'typbind': [ - include('whitespace'), - - (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), - - include('breakout'), - include('core'), - (r'\S+', Error, '#pop'), - ], - - # Dealing with what comes after the 'datatype' (or 'and') keyword - 'dname': [ - include('whitespace'), - include('breakout'), - - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - (r'(=)(\s*)(datatype)', - bygroups(Punctuation, Text, Keyword.Reserved), '#pop'), - (r'=(?!%s)' % symbolicid_re, Punctuation, - ('#pop', 'datbind', 'datcon')), - - (r'(%s)' % alphanumid_re, Keyword.Type), - (r'(%s)' % symbolicid_re, Keyword.Type), - (r'\S+', Error, '#pop'), - ], - - # common case - A | B | C of int - 'datbind': [ - include('whitespace'), - - (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')), - (r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), - (r'\b(of)\b(?!\')', Keyword.Reserved), - - (r'(\|)(\s*)(%s)' % alphanumid_re, - bygroups(Punctuation, Text, Name.Class)), - (r'(\|)(\s+)(%s)' % symbolicid_re, - bygroups(Punctuation, Text, Name.Class)), - - include('breakout'), - include('core'), - (r'\S+', Error), - ], - - # Dealing with what comes after an exception - 'ename': [ - include('whitespace'), - - (r'(exception|and)\b(\s+)(%s)' % alphanumid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'(exception|and)\b(\s*)(%s)' % symbolicid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'\b(of)\b(?!\')', Keyword.Reserved), - - include('breakout'), - include('core'), - (r'\S+', Error), - ], - - 'datcon': [ - include('whitespace'), - (r'(%s)' % alphanumid_re, Name.Class, '#pop'), - (r'(%s)' % symbolicid_re, Name.Class, '#pop'), - (r'\S+', Error, '#pop'), - ], - - # Series of type variables - 'tyvarseq': [ - (r'\s', Text), - (r'\(\*', Comment.Multiline, 'comment'), - - (r'\'[0-9a-zA-Z_\']*', Name.Decorator), - (alphanumid_re, Name), - (r',', Punctuation), - (r'\)', Punctuation, '#pop'), - (symbolicid_re, Name), - ], - - 'comment': [ - (r'[^(*)]', Comment.Multiline), - (r'\(\*', Comment.Multiline, '#push'), - (r'\*\)', Comment.Multiline, '#pop'), - (r'[(*)]', Comment.Multiline), - ], - } - - -class OcamlLexer(RegexLexer): - """ - For the OCaml language. - - *New in Pygments 0.7.* - """ - - name = 'OCaml' - aliases = ['ocaml'] - filenames = ['*.ml', '*.mli', '*.mll', '*.mly'] - mimetypes = ['text/x-ocaml'] - - keywords = [ - 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', - 'downto', 'else', 'end', 'exception', 'external', 'false', - 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', - 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', - 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', - 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', - 'type', 'value', 'val', 'virtual', 'when', 'while', 'with', - ] - keyopts = [ - '!=','#','&','&&','\(','\)','\*','\+',',','-', - '-\.','->','\.','\.\.',':','::',':=',':>',';',';;','<', - '<-','=','>','>]','>}','\?','\?\?','\[','\[<','\[>','\[\|', - ']','_','`','{','{<','\|','\|]','}','~' - ] - - operators = r'[!$%&*+\./:<=>?@^|~-]' - word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or'] - prefix_syms = r'[!?~]' - infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array'] - - tokens = { - 'escape-sequence': [ - (r'\\[\\\"\'ntbr]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - ], - 'root': [ - (r'\s+', Text), - (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), - (r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)', - Name.Namespace, 'dotted'), - (r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class), - (r'\(\*(?![)])', Comment, 'comment'), - (r'\b(%s)\b' % '|'.join(keywords), Keyword), - (r'(%s)' % '|'.join(keyopts[::-1]), Operator), - (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), - (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), - (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), - - (r"[^\W\d][\w']*", Name), - - (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), - (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), - (r'0[oO][0-7][0-7_]*', Number.Oct), - (r'0[bB][01][01_]*', Number.Binary), - (r'\d[\d_]*', Number.Integer), - - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", - String.Char), - (r"'.'", String.Char), - (r"'", Keyword), # a stray quote is another syntax element - - (r'"', String.Double, 'string'), - - (r'[~?][a-z][\w\']*:', Name.Variable), - ], - 'comment': [ - (r'[^(*)]+', Comment), - (r'\(\*', Comment, '#push'), - (r'\*\)', Comment, '#pop'), - (r'[(*)]', Comment), - ], - 'string': [ - (r'[^\\"]+', String.Double), - include('escape-sequence'), - (r'\\\n', String.Double), - (r'"', String.Double, '#pop'), - ], - 'dotted': [ - (r'\s+', Text), - (r'\.', Punctuation), - (r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace), - (r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'), - (r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'), - ], - } - - -class ErlangLexer(RegexLexer): - """ - For the Erlang functional programming language. - - Blame Jeremy Thurgood (http://jerith.za.net/). - - *New in Pygments 0.9.* - """ - - name = 'Erlang' - aliases = ['erlang'] - filenames = ['*.erl', '*.hrl', '*.es', '*.escript'] - mimetypes = ['text/x-erlang'] - - keywords = [ - 'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if', - 'let', 'of', 'query', 'receive', 'try', 'when', - ] - - builtins = [ # See erlang(3) man page - 'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list', - 'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions', - 'byte_size', 'cancel_timer', 'check_process_code', 'delete_module', - 'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit', - 'float', 'float_to_list', 'fun_info', 'fun_to_list', - 'function_exported', 'garbage_collect', 'get', 'get_keys', - 'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary', - 'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean', - 'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list', - 'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record', - 'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom', - 'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom', - 'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple', - 'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5', - 'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor', - 'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2', - 'pid_to_list', 'port_close', 'port_command', 'port_connect', - 'port_control', 'port_call', 'port_info', 'port_to_list', - 'process_display', 'process_flag', 'process_info', 'purge_module', - 'put', 'read_timer', 'ref_to_list', 'register', 'resume_process', - 'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie', - 'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor', - 'spawn_opt', 'split_binary', 'start_timer', 'statistics', - 'suspend_process', 'system_flag', 'system_info', 'system_monitor', - 'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered', - 'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list', - 'universaltime_to_localtime', 'unlink', 'unregister', 'whereis' - ] - - operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)' - word_operators = [ - 'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor', - 'div', 'not', 'or', 'orelse', 'rem', 'xor' - ] - - atom_re = r"(?:[a-z][a-zA-Z0-9_]*|'[^\n']*[^\\]')" - - variable_re = r'(?:[A-Z_][a-zA-Z0-9_]*)' - - escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))' - - macro_re = r'(?:'+variable_re+r'|'+atom_re+r')' - - base_re = r'(?:[2-9]|[12][0-9]|3[0-6])' - - tokens = { - 'root': [ - (r'\s+', Text), - (r'%.*\n', Comment), - ('(' + '|'.join(keywords) + r')\b', Keyword), - ('(' + '|'.join(builtins) + r')\b', Name.Builtin), - ('(' + '|'.join(word_operators) + r')\b', Operator.Word), - (r'^-', Punctuation, 'directive'), - (operators, Operator), - (r'"', String, 'string'), - (r'<<', Name.Label), - (r'>>', Name.Label), - ('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)), - ('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()', - bygroups(Name.Function, Text, Punctuation)), - (r'[+-]?'+base_re+r'#[0-9a-zA-Z]+', Number.Integer), - (r'[+-]?\d+', Number.Integer), - (r'[+-]?\d+.\d+', Number.Float), - (r'[]\[:_@\".{}()|;,]', Punctuation), - (variable_re, Name.Variable), - (atom_re, Name), - (r'\?'+macro_re, Name.Constant), - (r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char), - (r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label), - ], - 'string': [ - (escape_re, String.Escape), - (r'"', String, '#pop'), - (r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol), - (r'[^"\\~]+', String), - (r'~', String), - ], - 'directive': [ - (r'(define)(\s*)(\()('+macro_re+r')', - bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'), - (r'(record)(\s*)(\()('+macro_re+r')', - bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'), - (atom_re, Name.Entity, '#pop'), - ], - } - - -class ErlangShellLexer(Lexer): - """ - Shell sessions in erl (for Erlang code). - - *New in Pygments 1.1.* - """ - name = 'Erlang erl session' - aliases = ['erl'] - filenames = ['*.erl-sh'] - mimetypes = ['text/x-erl-shellsession'] - - _prompt_re = re.compile(r'\d+>(?=\s|\Z)') - - def get_tokens_unprocessed(self, text): - erlexer = ErlangLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - m = self._prompt_re.match(line) - if m is not None: - end = m.end() - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:end])])) - curcode += line[end:] - else: - if curcode: - for item in do_insertions(insertions, - erlexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - if line.startswith('*'): - yield match.start(), Generic.Traceback, line - else: - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - erlexer.get_tokens_unprocessed(curcode)): - yield item - - -class OpaLexer(RegexLexer): - """ - Lexer for the Opa language (http://opalang.org). - - *New in Pygments 1.5.* - """ - - name = 'Opa' - aliases = ['opa'] - filenames = ['*.opa'] - mimetypes = ['text/x-opa'] - - # most of these aren't strictly keywords - # but if you color only real keywords, you might just - # as well not color anything - keywords = [ - 'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do', - 'else', 'end', 'external', 'forall', 'function', 'if', 'import', - 'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then', - 'type', 'val', 'with', 'xml_parser', - ] - - # matches both stuff and `stuff` - ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))' - - op_re = r'[.=\-<>,@~%/+?*&^!]' - punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere - # because they are also used for inserts - - tokens = { - # copied from the caml lexer, should be adapted - 'escape-sequence': [ - (r'\\[\\\"\'ntr}]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - ], - - # factorizing these rules, because they are inserted many times - 'comments': [ - (r'/\*', Comment, 'nested-comment'), - (r'//.*?$', Comment), - ], - 'comments-and-spaces': [ - include('comments'), - (r'\s+', Text), - ], - - 'root': [ - include('comments-and-spaces'), - # keywords - (r'\b(%s)\b' % '|'.join(keywords), Keyword), - # directives - # we could parse the actual set of directives instead of anything - # starting with @, but this is troublesome - # because it needs to be adjusted all the time - # and assuming we parse only sources that compile, it is useless - (r'@'+ident_re+r'\b', Name.Builtin.Pseudo), - - # number literals - (r'-?.[\d]+([eE][+\-]?\d+)', Number.Float), - (r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float), - (r'-?\d+[eE][+\-]?\d+', Number.Float), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'0[oO][0-7]+', Number.Oct), - (r'0[bB][01]+', Number.Binary), - (r'\d+', Number.Integer), - # color literals - (r'#[\da-fA-F]{3,6}', Number.Integer), - - # string literals - (r'"', String.Double, 'string'), - # char literal, should be checked because this is the regexp from - # the caml lexer - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'", - String.Char), - - # this is meant to deal with embedded exprs in strings - # every time we find a '}' we pop a state so that if we were - # inside a string, we are back in the string state - # as a consequence, we must also push a state every time we find a - # '{' or else we will have errors when parsing {} for instance - (r'{', Operator, '#push'), - (r'}', Operator, '#pop'), - - # html literals - # this is a much more strict that the actual parser, - # since a])', String.Single, 'html-open-tag'), - - # db path - # matching the '[_]' in '/a[_]' because it is a part - # of the syntax of the db path definition - # unfortunately, i don't know how to match the ']' in - # /a[1], so this is somewhat inconsistent - (r'[@?!]?(/\w+)+(\[_\])?', Name.Variable), - # putting the same color on <- as on db path, since - # it can be used only to mean Db.write - (r'<-(?!'+op_re+r')', Name.Variable), - - # 'modules' - # although modules are not distinguished by their names as in caml - # the standard library seems to follow the convention that modules - # only area capitalized - (r'\b([A-Z]\w*)(?=\.)', Name.Namespace), - - # operators - # = has a special role because this is the only - # way to syntactic distinguish binding constructions - # unfortunately, this colors the equal in {x=2} too - (r'=(?!'+op_re+r')', Keyword), - (r'(%s)+' % op_re, Operator), - (r'(%s)+' % punc_re, Operator), - - # coercions - (r':', Operator, 'type'), - # type variables - # we need this rule because we don't parse specially type - # definitions so in "type t('a) = ...", "'a" is parsed by 'root' - ("'"+ident_re, Keyword.Type), - - # id literal, #something, or #{expr} - (r'#'+ident_re, String.Single), - (r'#(?={)', String.Single), - - # identifiers - # this avoids to color '2' in 'a2' as an integer - (ident_re, Text), - - # default, not sure if that is needed or not - # (r'.', Text), - ], - - # it is quite painful to have to parse types to know where they end - # this is the general rule for a type - # a type is either: - # * -> ty - # * type-with-slash - # * type-with-slash -> ty - # * type-with-slash (, type-with-slash)+ -> ty - # - # the code is pretty funky in here, but this code would roughly - # translate in caml to: - # let rec type stream = - # match stream with - # | [< "->"; stream >] -> type stream - # | [< ""; stream >] -> - # type_with_slash stream - # type_lhs_1 stream; - # and type_1 stream = ... - 'type': [ - include('comments-and-spaces'), - (r'->', Keyword.Type), - (r'', Keyword.Type, ('#pop', 'type-lhs-1', 'type-with-slash')), - ], - - # parses all the atomic or closed constructions in the syntax of type - # expressions: record types, tuple types, type constructors, basic type - # and type variables - 'type-1': [ - include('comments-and-spaces'), - (r'\(', Keyword.Type, ('#pop', 'type-tuple')), - (r'~?{', Keyword.Type, ('#pop', 'type-record')), - (ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')), - (ident_re, Keyword.Type, '#pop'), - ("'"+ident_re, Keyword.Type), - # this case is not in the syntax but sometimes - # we think we are parsing types when in fact we are parsing - # some css, so we just pop the states until we get back into - # the root state - (r'', Keyword.Type, '#pop'), - ], - - # type-with-slash is either: - # * type-1 - # * type-1 (/ type-1)+ - 'type-with-slash': [ - include('comments-and-spaces'), - (r'', Keyword.Type, ('#pop', 'slash-type-1', 'type-1')), - ], - 'slash-type-1': [ - include('comments-and-spaces'), - ('/', Keyword.Type, ('#pop', 'type-1')), - # same remark as above - (r'', Keyword.Type, '#pop'), - ], - - # we go in this state after having parsed a type-with-slash - # while trying to parse a type - # and at this point we must determine if we are parsing an arrow - # type (in which case we must continue parsing) or not (in which - # case we stop) - 'type-lhs-1': [ - include('comments-and-spaces'), - (r'->', Keyword.Type, ('#pop', 'type')), - (r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')), - (r'', Keyword.Type, '#pop'), - ], - 'type-arrow': [ - include('comments-and-spaces'), - # the look ahead here allows to parse f(x : int, y : float -> truc) - # correctly - (r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'), - (r'->', Keyword.Type, ('#pop', 'type')), - # same remark as above - (r'', Keyword.Type, '#pop'), - ], - - # no need to do precise parsing for tuples and records - # because they are closed constructions, so we can simply - # find the closing delimiter - # note that this function would be not work if the source - # contained identifiers like `{)` (although it could be patched - # to support it) - 'type-tuple': [ - include('comments-and-spaces'), - (r'[^\(\)/*]+', Keyword.Type), - (r'[/*]', Keyword.Type), - (r'\(', Keyword.Type, '#push'), - (r'\)', Keyword.Type, '#pop'), - ], - 'type-record': [ - include('comments-and-spaces'), - (r'[^{}/*]+', Keyword.Type), - (r'[/*]', Keyword.Type), - (r'{', Keyword.Type, '#push'), - (r'}', Keyword.Type, '#pop'), - ], - -# 'type-tuple': [ -# include('comments-and-spaces'), -# (r'\)', Keyword.Type, '#pop'), -# (r'', Keyword.Type, ('#pop', 'type-tuple-1', 'type-1')), -# ], -# 'type-tuple-1': [ -# include('comments-and-spaces'), -# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,) -# (r',', Keyword.Type, 'type-1'), -# ], -# 'type-record':[ -# include('comments-and-spaces'), -# (r'}', Keyword.Type, '#pop'), -# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'), -# ], -# 'type-record-field-expr': [ -# -# ], - - 'nested-comment': [ - (r'[^/*]+', Comment), - (r'/\*', Comment, '#push'), - (r'\*/', Comment, '#pop'), - (r'[/*]', Comment), - ], - - # the copy pasting between string and single-string - # is kinda sad. Is there a way to avoid that?? - 'string': [ - (r'[^\\"{]+', String.Double), - (r'"', String.Double, '#pop'), - (r'{', Operator, 'root'), - include('escape-sequence'), - ], - 'single-string': [ - (r'[^\\\'{]+', String.Double), - (r'\'', String.Double, '#pop'), - (r'{', Operator, 'root'), - include('escape-sequence'), - ], - - # all the html stuff - # can't really reuse some existing html parser - # because we must be able to parse embedded expressions - - # we are in this state after someone parsed the '<' that - # started the html literal - 'html-open-tag': [ - (r'[\w\-:]+', String.Single, ('#pop', 'html-attr')), - (r'>', String.Single, ('#pop', 'html-content')), - ], - - # we are in this state after someone parsed the ' is allowed - (r'[\w\-:]*>', String.Single, '#pop'), - ], - - # we are in this state after having parsed '', String.Single, '#pop'), - (r'>', String.Single, ('#pop', 'html-content')), - ], - - 'html-attr-value': [ - (r"'", String.Single, ('#pop', 'single-string')), - (r'"', String.Single, ('#pop', 'string')), - (r'#'+ident_re, String.Single, '#pop'), - (r'#(?={)', String.Single, ('#pop', 'root')), - (r'[^"\'{`=<>]+', String.Single, '#pop'), - (r'{', Operator, ('#pop', 'root')), # this is a tail call! - ], - - # we should probably deal with '\' escapes here - 'html-content': [ - (r'', Comment, '#pop'), - (r'[^\-]+|-', Comment), - ], - } - - -class CoqLexer(RegexLexer): - """ - For the `Coq `_ theorem prover. - - *New in Pygments 1.5.* - """ - - name = 'Coq' - aliases = ['coq'] - filenames = ['*.v'] - mimetypes = ['text/x-coq'] - - keywords1 = [ - # Vernacular commands - 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable', - 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis', - 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope', - 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac', - 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit', - 'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex', - 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure', - 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary', - 'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save', - 'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', - 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside', - 'outside', - ] - keywords2 = [ - # Gallina - 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct', - 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else', - 'for', 'of', 'nosimpl', 'with', 'as', - ] - keywords3 = [ - # Sorts - 'Type', 'Prop', - ] - keywords4 = [ - # Tactics - 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro', - 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct', - 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite', - 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold', - 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog', - 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial', - 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto', - 'split', 'left', 'right', 'autorewrite', - ] - keywords5 = [ - # Terminators - 'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega', - 'assumption', 'solve', 'contradiction', 'discriminate', - ] - keywords6 = [ - # Control - 'do', 'last', 'first', 'try', 'idtac', 'repeat', - ] - # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', - # 'downto', 'else', 'end', 'exception', 'external', 'false', - # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', - # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', - # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', - # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', - # 'type', 'val', 'virtual', 'when', 'while', 'with' - keyopts = [ - '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', - r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', - '<-', '=', '>', '>]', '>}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', - r'\[\|', ']', '_', '`', '{', '{<', r'\|', r'\|]', '}', '~', '=>', - r'/\\', r'\\/', - 'Π', 'λ', - ] - operators = r'[!$%&*+\./:<=>?@^|~-]' - word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or'] - prefix_syms = r'[!?~]' - infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', - 'array'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), - (r'\(\*', Comment, 'comment'), - (r'\b(%s)\b' % '|'.join(keywords1), Keyword.Namespace), - (r'\b(%s)\b' % '|'.join(keywords2), Keyword), - (r'\b(%s)\b' % '|'.join(keywords3), Keyword.Type), - (r'\b(%s)\b' % '|'.join(keywords4), Keyword), - (r'\b(%s)\b' % '|'.join(keywords5), Keyword.Pseudo), - (r'\b(%s)\b' % '|'.join(keywords6), Keyword.Reserved), - (r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)', - Name.Namespace, 'dotted'), - (r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class), - (r'(%s)' % '|'.join(keyopts[::-1]), Operator), - (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), - (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), - (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), - - (r"[^\W\d][\w']*", Name), - - (r'\d[\d_]*', Number.Integer), - (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), - (r'0[oO][0-7][0-7_]*', Number.Oct), - (r'0[bB][01][01_]*', Number.Binary), - (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), - - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", - String.Char), - (r"'.'", String.Char), - (r"'", Keyword), # a stray quote is another syntax element - - (r'"', String.Double, 'string'), - - (r'[~?][a-z][\w\']*:', Name.Variable), - ], - 'comment': [ - (r'[^(*)]+', Comment), - (r'\(\*', Comment, '#push'), - (r'\*\)', Comment, '#pop'), - (r'[(*)]', Comment), - ], - 'string': [ - (r'[^"]+', String.Double), - (r'""', String.Double), - (r'"', String.Double, '#pop'), - ], - 'dotted': [ - (r'\s+', Text), - (r'\.', Punctuation), - (r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace), - (r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'), - (r'[a-z][a-z0-9_\']*', Name, '#pop'), - (r'', Text, '#pop') - ], - } - - def analyse_text(text): - if text.startswith('(*'): - return True - - -class NewLispLexer(RegexLexer): - """ - For `newLISP. `_ source code (version 10.3.0). - - *New in Pygments 1.5.* - """ - - name = 'NewLisp' - aliases = ['newlisp'] - filenames = ['*.lsp', '*.nl'] - mimetypes = ['text/x-newlisp', 'application/x-newlisp'] - - flags = re.IGNORECASE | re.MULTILINE | re.UNICODE - - # list of built-in functions for newLISP version 10.3 - builtins = [ - '^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++', - '<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10', - '$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7', - '$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs', - 'acos', 'acosh', 'add', 'address', 'amb', 'and', 'and', 'append-file', - 'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin', - 'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec', - 'base64-enc', 'bayes-query', 'bayes-train', 'begin', 'begin', 'begin', - 'beta', 'betai', 'bind', 'binomial', 'bits', 'callback', 'case', 'case', - 'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean', - 'close', 'command-event', 'cond', 'cond', 'cond', 'cons', 'constant', - 'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count', - 'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry', - 'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec', - 'def-new', 'default', 'define-macro', 'define-macro', 'define', - 'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device', - 'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while', - 'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup', - 'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event', - 'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand', - 'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter', - 'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt', - 'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln', - 'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string', - 'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc', - 'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert', - 'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error', - 'last', 'legal?', 'length', 'let', 'let', 'let', 'letex', 'letn', - 'letn', 'letn', 'list?', 'list', 'load', 'local', 'log', 'lookup', - 'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat', - 'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply', - 'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error', - 'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local', - 'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping', - 'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select', - 'net-send-to', 'net-send-udp', 'net-send', 'net-service', - 'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper', - 'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack', - 'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop', - 'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print', - 'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event', - 'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand', - 'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file', - 'read-key', 'read-line', 'read-utf8', 'read', 'reader-event', - 'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex', - 'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse', - 'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self', - 'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all', - 'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent', - 'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt', - 'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?', - 'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term', - 'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case', - 'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?', - 'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until', - 'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while', - 'write', 'write-char', 'write-file', 'write-line', 'write', - 'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?', - ] - - # valid names - valid_name = r'([a-zA-Z0-9!$%&*+.,/<=>?@^_~|-])+|(\[.*?\])+' - - tokens = { - 'root': [ - # shebang - (r'#!(.*?)$', Comment.Preproc), - # comments starting with semicolon - (r';.*$', Comment.Single), - # comments starting with # - (r'#.*$', Comment.Single), - - # whitespace - (r'\s+', Text), - - # strings, symbols and characters - (r'"(\\\\|\\"|[^"])*"', String), - - # braces - (r"{", String, "bracestring"), - - # [text] ... [/text] delimited strings - (r'\[text\]*', String, "tagstring"), - - # 'special' operators... - (r"('|:)", Operator), - - # highlight the builtins - ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins), - Keyword), - - # the remaining functions - (r'(?<=\()' + valid_name, Name.Variable), - - # the remaining variables - (valid_name, String.Symbol), - - # parentheses - (r'(\(|\))', Punctuation), - ], - - # braced strings... - 'bracestring': [ - ("{", String, "#push"), - ("}", String, "#pop"), - ("[^{}]+", String), - ], - - # tagged [text]...[/text] delimited strings... - 'tagstring': [ - (r'(?s)(.*?)(\[/text\])', String, '#pop'), - ], - } - - -class ElixirLexer(RegexLexer): - """ - For the `Elixir language `_. - - *New in Pygments 1.5.* - """ - - name = 'Elixir' - aliases = ['elixir', 'ex', 'exs'] - filenames = ['*.ex', '*.exs'] - mimetypes = ['text/x-elixir'] - - def gen_elixir_sigil_rules(): - states = {} - - states['strings'] = [ - (r'(%[A-Ba-z])?"""(?:.|\n)*?"""', String.Doc), - (r"'''(?:.|\n)*?'''", String.Doc), - (r'"', String.Double, 'dqs'), - (r"'.*'", String.Single), - (r'(?', 'lt'): - - states['strings'] += [ - (r'%[a-z]' + lbrace, String.Double, name + 'intp'), - (r'%[A-Z]' + lbrace, String.Double, name + 'no-intp') - ] - - states[name +'intp'] = [ - (r'' + rbrace + '[a-z]*', String.Double, "#pop"), - include('enddoublestr') - ] - - states[name +'no-intp'] = [ - (r'.*' + rbrace + '[a-z]*', String.Double , "#pop") - ] - - return states - - tokens = { - 'root': [ - (r'\s+', Text), - (r'#.*$', Comment.Single), - (r'\b(case|cond|end|bc|lc|if|unless|try|loop|receive|fn|defmodule|' - r'defp?|defprotocol|defimpl|defrecord|defmacrop?|defdelegate|' - r'defexception|exit|raise|throw|unless|after|rescue|catch|else)\b(?![?!])|' - r'(?)\b\s*', Keyword), - (r'\b(import|require|use|recur|quote|unquote|super|refer)\b(?![?!])', - Keyword.Namespace), - (r'(?|<(?!<|=)|>(?!<|=|>)|<=|>=|===|==|=~|!=|!~|(?=[ \t])\?|' - r'(?<=[ \t])!+|&&|\|\||\^|\*|\+|\-|/|' - r'\||\+\+|\-\-|\*\*|\/\/|\<\-|\<\>|<<|>>|=|\.', Operator), - (r'(?=]))?|\<\>|===?|>=?|<=?|' - r'<=>|&&?|%\(\)|%\[\]|%\{\}|\+\+?|\-\-?|\|\|?|\!|//|[%&`/\|]|' - r'\*\*?|=?~|<\-)|([a-zA-Z_]\w*([?!])?)(:)(?!:)', String.Symbol), - (r':"', String.Symbol, 'interpoling_symbol'), - (r'\b(nil|true|false)\b(?![?!])|\b[A-Z]\w*\b', Name.Constant), - (r'\b(__(FILE|LINE|MODULE|MAIN|FUNCTION)__)\b(?![?!])', Name.Builtin.Pseudo), - (r'[a-zA-Z_!][\w_]*[!\?]?', Name), - (r'[(){};,/\|:\\\[\]]', Punctuation), - (r'@[a-zA-Z_]\w*|&\d', Name.Variable), - (r'\b(0[xX][0-9A-Fa-f]+|\d(_?\d)*(\.(?![^\d\s])' - r'(_?\d)*)?([eE][-+]?\d(_?\d)*)?|0[bB][01]+)\b', Number), - (r'%r\/.*\/', String.Regex), - include('strings'), - ], - 'dqs': [ - (r'"', String.Double, "#pop"), - include('enddoublestr') - ], - 'interpoling': [ - (r'#{', String.Interpol, 'interpoling_string'), - ], - 'interpoling_string' : [ - (r'}', String.Interpol, "#pop"), - include('root') - ], - 'interpoling_symbol': [ - (r'"', String.Symbol, "#pop"), - include('interpoling'), - (r'[^#"]+', String.Symbol), - ], - 'enddoublestr' : [ - include('interpoling'), - (r'[^#"]+', String.Double), - ] - } - tokens.update(gen_elixir_sigil_rules()) - - -class ElixirConsoleLexer(Lexer): - """ - For Elixir interactive console (iex) output like: - - .. sourcecode:: iex - - iex> [head | tail] = [1,2,3] - [1,2,3] - iex> head - 1 - iex> tail - [2,3] - iex> [head | tail] - [1,2,3] - iex> length [head | tail] - 3 - - *New in Pygments 1.5.* - """ - - name = 'Elixir iex session' - aliases = ['iex'] - mimetypes = ['text/x-elixir-shellsession'] - - _prompt_re = re.compile('(iex|\.{3})> ') - - def get_tokens_unprocessed(self, text): - exlexer = ElixirLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - if line.startswith('** '): - insertions.append((len(curcode), - [(0, Generic.Error, line[:-1])])) - curcode += line[-1:] - else: - m = self._prompt_re.match(line) - if m is not None: - end = m.end() - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:end])])) - curcode += line[end:] - else: - if curcode: - for item in do_insertions(insertions, - exlexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - exlexer.get_tokens_unprocessed(curcode)): - yield item - - -class KokaLexer(RegexLexer): - """ - Lexer for the `Koka `_ - language. - - *New in Pygments 1.6.* - """ - - name = 'Koka' - aliases = ['koka'] - filenames = ['*.kk', '*.kki'] - mimetypes = ['text/x-koka'] - - keywords = [ - 'infix', 'infixr', 'infixl', - 'type', 'cotype', 'rectype', 'alias', - 'struct', 'con', - 'fun', 'function', 'val', 'var', - 'external', - 'if', 'then', 'else', 'elif', 'return', 'match', - 'private', 'public', 'private', - 'module', 'import', 'as', - 'include', 'inline', - 'rec', - 'try', 'yield', 'enum', - 'interface', 'instance', - ] - - # keywords that are followed by a type - typeStartKeywords = [ - 'type', 'cotype', 'rectype', 'alias', 'struct', 'enum', - ] - - # keywords valid in a type - typekeywords = [ - 'forall', 'exists', 'some', 'with', - ] - - # builtin names and special names - builtin = [ - 'for', 'while', 'repeat', - 'foreach', 'foreach-indexed', - 'error', 'catch', 'finally', - 'cs', 'js', 'file', 'ref', 'assigned', - ] - - # symbols that can be in an operator - symbols = '[\$%&\*\+@!/\\\^~=\.:\-\?\|<>]+' - - # symbol boundary: an operator keyword should not be followed by any of these - sboundary = '(?!'+symbols+')' - - # name boundary: a keyword should not be followed by any of these - boundary = '(?![\w/])' - - # koka token abstractions - tokenType = Name.Attribute - tokenTypeDef = Name.Class - tokenConstructor = Generic.Emph - - # main lexer - tokens = { - 'root': [ - include('whitespace'), - - # go into type mode - (r'::?' + sboundary, tokenType, 'type'), - (r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), - 'alias-type'), - (r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), - 'struct-type'), - ((r'(%s)' % '|'.join(typeStartKeywords)) + - r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), - 'type'), - - # special sequences of tokens (we use ?: for non-capturing group as - # required by 'bygroups') - (r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)', - bygroups(Keyword, Text, Keyword, Name.Namespace)), - (r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)' - r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)' - r'((?:[a-z]\w*/)*[a-z]\w*))?', - bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text, - Keyword, Name.Namespace)), - - (r'(^(?:(?:public|private)\s*)?(?:function|fun|val))' - r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))', - bygroups(Keyword, Text, Name.Function)), - (r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?' - r'([a-z]\w*|\((?:' + symbols + r'|/)\))', - bygroups(Keyword, Text, Keyword, Name.Function)), - - # keywords - (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type), - (r'(%s)' % '|'.join(keywords) + boundary, Keyword), - (r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo), - (r'::?|:=|\->|[=\.]' + sboundary, Keyword), - - # names - (r'((?:[a-z]\w*/)*)([A-Z]\w*)', - bygroups(Name.Namespace, tokenConstructor)), - (r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)), - (r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))', - bygroups(Name.Namespace, Name)), - (r'_\w*', Name.Variable), - - # literal string - (r'@"', String.Double, 'litstring'), - - # operators - (symbols + "|/(?![\*/])", Operator), - (r'`', Operator), - (r'[\{\}\(\)\[\];,]', Punctuation), - - # literals. No check for literal characters with len > 1 - (r'[0-9]+\.[0-9]+([eE][\-\+]?[0-9]+)?', Number.Float), - (r'0[xX][0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - - (r"'", String.Char, 'char'), - (r'"', String.Double, 'string'), - ], - - # type started by alias - 'alias-type': [ - (r'=',Keyword), - include('type') - ], - - # type started by struct - 'struct-type': [ - (r'(?=\((?!,*\)))',Punctuation, '#pop'), - include('type') - ], - - # type started by colon - 'type': [ - (r'[\(\[<]', tokenType, 'type-nested'), - include('type-content') - ], - - # type nested in brackets: can contain parameters, comma etc. - 'type-nested': [ - (r'[\)\]>]', tokenType, '#pop'), - (r'[\(\[<]', tokenType, 'type-nested'), - (r',', tokenType), - (r'([a-z]\w*)(\s*)(:)(?!:)', - bygroups(Name, Text, tokenType)), # parameter name - include('type-content') - ], - - # shared contents of a type - 'type-content': [ - include('whitespace'), - - # keywords - (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword), - (r'(?=((%s)' % '|'.join(keywords) + boundary + '))', - Keyword, '#pop'), # need to match because names overlap... - - # kinds - (r'[EPHVX]' + boundary, tokenType), - - # type names - (r'[a-z][0-9]*(?![\w/])', tokenType ), - (r'_\w*', tokenType.Variable), # Generic.Emph - (r'((?:[a-z]\w*/)*)([A-Z]\w*)', - bygroups(Name.Namespace, tokenType)), - (r'((?:[a-z]\w*/)*)([a-z]\w+)', - bygroups(Name.Namespace, tokenType)), - - # type keyword operators - (r'::|\->|[\.:|]', tokenType), - - #catchall - (r'', Text, '#pop') - ], - - # comments and literals - 'whitespace': [ - (r'\n\s*#.*$', Comment.Preproc), - (r'\s+', Text), - (r'/\*', Comment.Multiline, 'comment'), - (r'//.*$', Comment.Single) - ], - 'comment': [ - (r'[^/\*]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[\*/]', Comment.Multiline), - ], - 'litstring': [ - (r'[^"]+', String.Double), - (r'""', String.Escape), - (r'"', String.Double, '#pop'), - ], - 'string': [ - (r'[^\\"\n]+', String.Double), - include('escape-sequence'), - (r'["\n]', String.Double, '#pop'), - ], - 'char': [ - (r'[^\\\'\n]+', String.Char), - include('escape-sequence'), - (r'[\'\n]', String.Char, '#pop'), - ], - 'escape-sequence': [ - (r'\\[nrt\\\"\']', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - (r'\\u[0-9a-fA-F]{4}', String.Escape), - # Yes, \U literals are 6 hex digits. - (r'\\U[0-9a-fA-F]{6}', String.Escape) - ] - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/hdl.py b/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/hdl.py deleted file mode 100644 index 57ffc34..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/hdl.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.hdl - ~~~~~~~~~~~~~~~~~~~ - - Lexers for hardware descriptor languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from pygments.lexer import RegexLexer, bygroups, include, using, this -from pygments.token import \ - Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \ - Error - -__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer'] - - -class VerilogLexer(RegexLexer): - """ - For verilog source code with preprocessor directives. - - *New in Pygments 1.4.* - """ - name = 'verilog' - aliases = ['verilog', 'v'] - filenames = ['*.v'] - mimetypes = ['text/x-verilog'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' - - tokens = { - 'root': [ - (r'^\s*`define', Comment.Preproc, 'macro'), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'[{}#@]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), - (r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary - (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), - (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), - (r'\'[01xz]', Number), - (r'\d+[Ll]?', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;\']', Punctuation), - (r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), - - (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), - (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), - 'import'), - - (r'(always|always_comb|always_ff|always_latch|and|assign|automatic|' - r'begin|break|buf|bufif0|bufif1|case|casex|casez|cmos|const|' - r'continue|deassign|default|defparam|disable|do|edge|else|end|endcase|' - r'endfunction|endgenerate|endmodule|endpackage|endprimitive|endspecify|' - r'endtable|endtask|enum|event|final|for|force|forever|fork|function|' - r'generate|genvar|highz0|highz1|if|initial|inout|input|' - r'integer|join|large|localparam|macromodule|medium|module|' - r'nand|negedge|nmos|nor|not|notif0|notif1|or|output|packed|' - r'parameter|pmos|posedge|primitive|pull0|pull1|pulldown|pullup|rcmos|' - r'ref|release|repeat|return|rnmos|rpmos|rtran|rtranif0|' - r'rtranif1|scalared|signed|small|specify|specparam|strength|' - r'string|strong0|strong1|struct|table|task|' - r'tran|tranif0|tranif1|type|typedef|' - r'unsigned|var|vectored|void|wait|weak0|weak1|while|' - r'xnor|xor)\b', Keyword), - - (r'`(accelerate|autoexpand_vectornets|celldefine|default_nettype|' - r'else|elsif|endcelldefine|endif|endprotect|endprotected|' - r'expand_vectornets|ifdef|ifndef|include|noaccelerate|noexpand_vectornets|' - r'noremove_gatenames|noremove_netnames|nounconnected_drive|' - r'protect|protected|remove_gatenames|remove_netnames|resetall|' - r'timescale|unconnected_drive|undef)\b', Comment.Preproc), - - (r'\$(bits|bitstoreal|bitstoshortreal|countdrivers|display|fclose|' - r'fdisplay|finish|floor|fmonitor|fopen|fstrobe|fwrite|' - r'getpattern|history|incsave|input|itor|key|list|log|' - r'monitor|monitoroff|monitoron|nokey|nolog|printtimescale|' - r'random|readmemb|readmemh|realtime|realtobits|reset|reset_count|' - r'reset_value|restart|rtoi|save|scale|scope|shortrealtobits|' - r'showscopes|showvariables|showvars|sreadmemb|sreadmemh|' - r'stime|stop|strobe|time|timeformat|write)\b', Name.Builtin), - - (r'(byte|shortint|int|longint|integer|time|' - r'bit|logic|reg|' - r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor' - r'shortreal|real|realtime)\b', Keyword.Type), - ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'import': [ - (r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop') - ] - } - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # Convention: mark all upper case names as constants - if token is Name: - if value.isupper(): - token = Name.Constant - yield index, token, value - - -class SystemVerilogLexer(RegexLexer): - """ - Extends verilog lexer to recognise all SystemVerilog keywords from IEEE - 1800-2009 standard. - - *New in Pygments 1.5.* - """ - name = 'systemverilog' - aliases = ['systemverilog', 'sv'] - filenames = ['*.sv', '*.svh'] - mimetypes = ['text/x-systemverilog'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' - - tokens = { - 'root': [ - (r'^\s*`define', Comment.Preproc, 'macro'), - (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), - (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'), - - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'[{}#@]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), - (r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary - (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), - (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), - (r'\'[01xz]', Number), - (r'\d+[Ll]?', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;\']', Punctuation), - (r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), - - (r'(accept_on|alias|always|always_comb|always_ff|always_latch|' - r'and|assert|assign|assume|automatic|before|begin|bind|bins|' - r'binsof|bit|break|buf|bufif0|bufif1|byte|case|casex|casez|' - r'cell|chandle|checker|class|clocking|cmos|config|const|constraint|' - r'context|continue|cover|covergroup|coverpoint|cross|deassign|' - r'default|defparam|design|disable|dist|do|edge|else|end|endcase|' - r'endchecker|endclass|endclocking|endconfig|endfunction|endgenerate|' - r'endgroup|endinterface|endmodule|endpackage|endprimitive|' - r'endprogram|endproperty|endsequence|endspecify|endtable|' - r'endtask|enum|event|eventually|expect|export|extends|extern|' - r'final|first_match|for|force|foreach|forever|fork|forkjoin|' - r'function|generate|genvar|global|highz0|highz1|if|iff|ifnone|' - r'ignore_bins|illegal_bins|implies|import|incdir|include|' - r'initial|inout|input|inside|instance|int|integer|interface|' - r'intersect|join|join_any|join_none|large|let|liblist|library|' - r'local|localparam|logic|longint|macromodule|matches|medium|' - r'modport|module|nand|negedge|new|nexttime|nmos|nor|noshowcancelled|' - r'not|notif0|notif1|null|or|output|package|packed|parameter|' - r'pmos|posedge|primitive|priority|program|property|protected|' - r'pull0|pull1|pulldown|pullup|pulsestyle_ondetect|pulsestyle_onevent|' - r'pure|rand|randc|randcase|randsequence|rcmos|real|realtime|' - r'ref|reg|reject_on|release|repeat|restrict|return|rnmos|' - r'rpmos|rtran|rtranif0|rtranif1|s_always|s_eventually|s_nexttime|' - r's_until|s_until_with|scalared|sequence|shortint|shortreal|' - r'showcancelled|signed|small|solve|specify|specparam|static|' - r'string|strong|strong0|strong1|struct|super|supply0|supply1|' - r'sync_accept_on|sync_reject_on|table|tagged|task|this|throughout|' - r'time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|' - r'tri1|triand|trior|trireg|type|typedef|union|unique|unique0|' - r'unsigned|until|until_with|untyped|use|uwire|var|vectored|' - r'virtual|void|wait|wait_order|wand|weak|weak0|weak1|while|' - r'wildcard|wire|with|within|wor|xnor|xor)\b', Keyword ), - - (r'(`__FILE__|`__LINE__|`begin_keywords|`celldefine|`default_nettype|' - r'`define|`else|`elsif|`end_keywords|`endcelldefine|`endif|' - r'`ifdef|`ifndef|`include|`line|`nounconnected_drive|`pragma|' - r'`resetall|`timescale|`unconnected_drive|`undef|`undefineall)\b', - Comment.Preproc ), - - (r'(\$display|\$displayb|\$displayh|\$displayo|\$dumpall|\$dumpfile|' - r'\$dumpflush|\$dumplimit|\$dumpoff|\$dumpon|\$dumpports|' - r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|' - r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|' - r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|' - r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|' - r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|' - r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|' - r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|' - r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|' - r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|' - r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|' - r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|' - r'\$writememh|\$writeo)\b' , Name.Builtin ), - - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(byte|shortint|int|longint|integer|time|' - r'bit|logic|reg|' - r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor' - r'shortreal|real|realtime)\b', Keyword.Type), - ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'classname': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'import': [ - (r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop') - ] - } - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # Convention: mark all upper case names as constants - if token is Name: - if value.isupper(): - token = Name.Constant - yield index, token, value - - def analyse_text(text): - if text.startswith('//') or text.startswith('/*'): - return 0.5 - - -class VhdlLexer(RegexLexer): - """ - For VHDL source code. - - *New in Pygments 1.5.* - """ - name = 'vhdl' - aliases = ['vhdl'] - filenames = ['*.vhdl', '*.vhd'] - mimetypes = ['text/x-vhdl'] - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single), - (r"'(U|X|0|1|Z|W|L|H|-)'", String.Char), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r"'[a-zA-Z_][a-zA-Z0-9_]*", Name.Attribute), - (r'[()\[\],.;\']', Punctuation), - (r'"[^\n\\]*"', String), - - (r'(library)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)), - (r'(use)(\s+)([a-zA-Z_][\.a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(entity|component)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Class)), - (r'(architecture|configuration)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)' - r'(of)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(is)', - bygroups(Keyword, Text, Name.Class, Text, Keyword, Text, - Name.Class, Text, Keyword)), - - (r'(end)(\s+)', bygroups(using(this), Text), 'endblock'), - - include('types'), - include('keywords'), - include('numbers'), - - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'endblock': [ - include('keywords'), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class), - (r'(\s+)', Text), - (r';', Punctuation, '#pop'), - ], - 'types': [ - (r'(boolean|bit|character|severity_level|integer|time|delay_length|' - r'natural|positive|string|bit_vector|file_open_kind|' - r'file_open_status|std_ulogic|std_ulogic_vector|std_logic|' - r'std_logic_vector)\b', Keyword.Type), - ], - 'keywords': [ - (r'(abs|access|after|alias|all|and|' - r'architecture|array|assert|attribute|begin|block|' - r'body|buffer|bus|case|component|configuration|' - r'constant|disconnect|downto|else|elsif|end|' - r'entity|exit|file|for|function|generate|' - r'generic|group|guarded|if|impure|in|' - r'inertial|inout|is|label|library|linkage|' - r'literal|loop|map|mod|nand|new|' - r'next|nor|not|null|of|on|' - r'open|or|others|out|package|port|' - r'postponed|procedure|process|pure|range|record|' - r'register|reject|return|rol|ror|select|' - r'severity|signal|shared|sla|sli|sra|' - r'srl|subtype|then|to|transport|type|' - r'units|until|use|variable|wait|when|' - r'while|with|xnor|xor)\b', Keyword), - ], - 'numbers': [ - (r'\d{1,2}#[0-9a-fA-F_]+#?', Number.Integer), - (r'[0-1_]+(\.[0-1_])', Number.Integer), - (r'\d+', Number.Integer), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), - (r'H"[0-9a-fA-F_]+"', Number.Oct), - (r'O"[0-7_]+"', Number.Oct), - (r'B"[0-1_]+"', Number.Oct), - ], - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/jvm.py b/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/jvm.py deleted file mode 100644 index 481221f..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/jvm.py +++ /dev/null @@ -1,1112 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.jvm - ~~~~~~~~~~~~~~~~~~~ - - Pygments lexers for JVM languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \ - this -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation -from pygments.util import get_choice_opt -from pygments import unistring as uni - - -__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer', - 'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'KotlinLexer', - 'XtendLexer', 'AspectJLexer', 'CeylonLexer'] - - -class JavaLexer(RegexLexer): - """ - For `Java `_ source code. - """ - - name = 'Java' - aliases = ['java'] - filenames = ['*.java'] - mimetypes = ['text/x-java'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]<>]*\s+)+?)' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), - (r'(assert|break|case|catch|continue|default|do|else|finally|for|' - r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b', - Keyword), - (r'(abstract|const|enum|extends|final|implements|native|private|' - r'protected|public|static|strictfp|super|synchronized|throws|' - r'transient|volatile)\b', Keyword.Declaration), - (r'(boolean|byte|char|double|float|int|long|short|void)\b', - Keyword.Type), - (r'(package)(\s+)', bygroups(Keyword.Namespace, Text)), - (r'(true|false|null)\b', Keyword.Constant), - (r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), 'class'), - (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'"(\\\\|\\"|[^"])*"', String), - (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char), - (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), - (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), - (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text) - ], - 'class': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'import': [ - (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') - ], - } - - -class AspectJLexer(JavaLexer): - """ - For `AspectJ `_ source code. - - *New in Pygments 1.6.* - """ - - name = 'AspectJ' - aliases = ['aspectj'] - filenames = ['*.aj'] - mimetypes = ['text/x-aspectj'] - - aj_keywords = [ - 'aspect', 'pointcut', 'privileged', 'call', 'execution', - 'initialization', 'preinitialization', 'handler', 'get', 'set', - 'staticinitialization', 'target', 'args', 'within', 'withincode', - 'cflow', 'cflowbelow', 'annotation', 'before', 'after', 'around', - 'proceed', 'throwing', 'returning', 'adviceexecution', 'declare', - 'parents', 'warning', 'error', 'soft', 'precedence', 'thisJoinPoint', - 'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart', - 'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow', - 'pertypewithin', 'lock', 'unlock', 'thisAspectInstance' - ] - aj_inter_type = ['parents:', 'warning:', 'error:', 'soft:', 'precedence:'] - aj_inter_type_annotation = ['@type', '@method', '@constructor', '@field'] - - def get_tokens_unprocessed(self, text): - for index, token, value in JavaLexer.get_tokens_unprocessed(self, text): - if token is Name and value in self.aj_keywords: - yield index, Keyword, value - elif token is Name.Label and value in self.aj_inter_type: - yield index, Keyword, value[:-1] - yield index, Operator, value[-1] - elif token is Name.Decorator and value in self.aj_inter_type_annotation: - yield index, Keyword, value - else: - yield index, token, value - - -class ScalaLexer(RegexLexer): - """ - For `Scala `_ source code. - """ - - name = 'Scala' - aliases = ['scala'] - filenames = ['*.scala'] - mimetypes = ['text/x-scala'] - - flags = re.MULTILINE | re.DOTALL - - # don't use raw unicode strings! - op = ('[-~\\^\\*!%&\\\\<>\\|+=:/?@\u00a6-\u00a7\u00a9\u00ac\u00ae\u00b0-\u00b1' - '\u00b6\u00d7\u00f7\u03f6\u0482\u0606-\u0608\u060e-\u060f\u06e9' - '\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0cf1-\u0cf2' - '\u0d79\u0f01-\u0f03\u0f13-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38' - '\u0fbe-\u0fc5\u0fc7-\u0fcf\u109e-\u109f\u1360\u1390-\u1399\u1940' - '\u19e0-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2044\u2052\u207a-\u207c' - '\u208a-\u208c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2118' - '\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u2140-\u2144' - '\u214a-\u214d\u214f\u2190-\u2328\u232b-\u244a\u249c-\u24e9\u2500-\u2767' - '\u2794-\u27c4\u27c7-\u27e5\u27f0-\u2982\u2999-\u29d7\u29dc-\u29fb' - '\u29fe-\u2b54\u2ce5-\u2cea\u2e80-\u2ffb\u3004\u3012-\u3013\u3020' - '\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3' - '\u3200-\u321e\u322a-\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u33ff' - '\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ufb29\ufdfd\ufe62\ufe64-\ufe66' - '\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe4\uffe8-\uffee\ufffc-\ufffd]+') - - letter = ('[a-zA-Z\\$_\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6' - '\u00f8-\u02af\u0370-\u0373\u0376-\u0377\u037b-\u037d\u0386' - '\u0388-\u03f5\u03f7-\u0481\u048a-\u0556\u0561-\u0587\u05d0-\u05f2' - '\u0621-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5' - '\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5' - '\u07b1\u07ca-\u07ea\u0904-\u0939\u093d\u0950\u0958-\u0961' - '\u0972-\u097f\u0985-\u09b9\u09bd\u09ce\u09dc-\u09e1\u09f0-\u09f1' - '\u0a05-\u0a39\u0a59-\u0a5e\u0a72-\u0a74\u0a85-\u0ab9\u0abd' - '\u0ad0-\u0ae1\u0b05-\u0b39\u0b3d\u0b5c-\u0b61\u0b71\u0b83-\u0bb9' - '\u0bd0\u0c05-\u0c3d\u0c58-\u0c61\u0c85-\u0cb9\u0cbd\u0cde-\u0ce1' - '\u0d05-\u0d3d\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0dc6\u0e01-\u0e30' - '\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0eb0\u0eb2-\u0eb3\u0ebd-\u0ec4' - '\u0edc-\u0f00\u0f40-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f' - '\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070' - '\u1075-\u1081\u108e\u10a0-\u10fa\u1100-\u135a\u1380-\u138f' - '\u13a0-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea\u16ee-\u1711' - '\u1720-\u1731\u1740-\u1751\u1760-\u1770\u1780-\u17b3\u17dc' - '\u1820-\u1842\u1844-\u18a8\u18aa-\u191c\u1950-\u19a9\u19c1-\u19c7' - '\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf' - '\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1d00-\u1d2b\u1d62-\u1d77' - '\u1d79-\u1d9a\u1e00-\u1fbc\u1fbe\u1fc2-\u1fcc\u1fd0-\u1fdb' - '\u1fe0-\u1fec\u1ff2-\u1ffc\u2071\u207f\u2102\u2107\u210a-\u2113' - '\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139' - '\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c7c' - '\u2c80-\u2ce4\u2d00-\u2d65\u2d80-\u2dde\u3006-\u3007\u3021-\u3029' - '\u3038-\u303a\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff-\u318e' - '\u31a0-\u31b7\u31f0-\u31ff\u3400-\u4db5\u4e00-\ua014\ua016-\ua48c' - '\ua500-\ua60b\ua610-\ua61f\ua62a-\ua66e\ua680-\ua697\ua722-\ua76f' - '\ua771-\ua787\ua78b-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822' - '\ua840-\ua873\ua882-\ua8b3\ua90a-\ua925\ua930-\ua946\uaa00-\uaa28' - '\uaa40-\uaa42\uaa44-\uaa4b\uac00-\ud7a3\uf900-\ufb1d\ufb1f-\ufb28' - '\ufb2a-\ufd3d\ufd50-\ufdfb\ufe70-\ufefc\uff21-\uff3a\uff41-\uff5a' - '\uff66-\uff6f\uff71-\uff9d\uffa0-\uffdc]') - - upper = ('[A-Z\\$_\u00c0-\u00d6\u00d8-\u00de\u0100\u0102\u0104\u0106\u0108' - '\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c' - '\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130' - '\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145' - '\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a' - '\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e' - '\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182' - '\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194' - '\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7' - '\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc' - '\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9' - '\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee' - '\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204' - '\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218' - '\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c' - '\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246' - '\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u0386\u0388-\u038f' - '\u0391-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0' - '\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7' - '\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a' - '\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e' - '\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a' - '\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae' - '\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1' - '\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6' - '\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea' - '\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe' - '\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512' - '\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0531-\u0556' - '\u10a0-\u10c5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e' - '\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22' - '\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36' - '\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a' - '\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e' - '\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72' - '\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86' - '\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2' - '\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6' - '\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca' - '\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede' - '\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2' - '\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d' - '\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59-\u1f5f' - '\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb' - '\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112' - '\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133' - '\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67' - '\u2c69\u2c6b\u2c6d-\u2c6f\u2c72\u2c75\u2c80\u2c82\u2c84\u2c86' - '\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a' - '\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae' - '\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2' - '\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6' - '\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\ua640\ua642\ua644\ua646' - '\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a' - '\ua65c\ua65e\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682' - '\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696' - '\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736' - '\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a' - '\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e' - '\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b' - '\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\uff21-\uff3a]') - - idrest = r'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op) - - tokens = { - 'root': [ - # method names - (r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'), - (r"'%s" % idrest, Text.Symbol), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*', Comment.Multiline, 'comment'), - (r'@%s' % idrest, Name.Decorator), - (r'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|' - r'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|' - r'lazy|match|new|override|pr(?:ivate|otected)' - r'|re(?:quires|turn)|s(?:ealed|uper)|' - r't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\b|' - '(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword), - (r':(?!%s)' % op, Keyword, 'type'), - (r'%s%s\b' % (upper, idrest), Name.Class), - (r'(true|false|null)\b', Keyword.Constant), - (r'(import|package)(\s+)', bygroups(Keyword, Text), 'import'), - (r'(type)(\s+)', bygroups(Keyword, Text), 'type'), - (r'""".*?"""(?!")', String), - (r'"(\\\\|\\"|[^"])*"', String), - (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char), -# (ur'(\.)(%s|%s|`[^`]+`)' % (idrest, op), bygroups(Operator, -# Name.Attribute)), - (idrest, Name), - (r'`[^`]+`', Name), - (r'\[', Operator, 'typeparam'), - (r'[\(\)\{\};,.#]', Operator), - (op, Operator), - (r'([0-9][0-9]*\.[0-9]*|\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?', - Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text) - ], - 'class': [ - (r'(%s|%s|`[^`]+`)(\s*)(\[)' % (idrest, op), - bygroups(Name.Class, Text, Operator), 'typeparam'), - (r'\s+', Text), - (r'{', Operator, '#pop'), - (r'\(', Operator, '#pop'), - (r'//.*?\n', Comment.Single, '#pop'), - (r'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'), - ], - 'type': [ - (r'\s+', Text), - ('<[%:]|>:|[#_\u21D2]|forSome|type', Keyword), - (r'([,\);}]|=>|=)(\s*)', bygroups(Operator, Text), '#pop'), - (r'[\(\{]', Operator, '#push'), - (r'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)(\[)' % - (idrest, op, idrest, op), - bygroups(Keyword.Type, Text, Operator), ('#pop', 'typeparam')), - (r'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)$' % - (idrest, op, idrest, op), - bygroups(Keyword.Type, Text), '#pop'), - (r'//.*?\n', Comment.Single, '#pop'), - (r'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type) - ], - 'typeparam': [ - (r'[\s,]+', Text), - ('<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword), - (r'([\]\)\}])', Operator, '#pop'), - (r'[\(\[\{]', Operator, '#push'), - (r'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type) - ], - 'comment': [ - (r'[^/\*]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ], - 'import': [ - (r'(%s|\.)+' % idrest, Name.Namespace, '#pop') - ], - } - - -class GosuLexer(RegexLexer): - """ - For Gosu source code. - - *New in Pygments 1.5.* - """ - - name = 'Gosu' - aliases = ['gosu'] - filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark'] - mimetypes = ['text/x-gosu'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # modifiers etc. - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), - (r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|' - r'index|while|do|continue|break|return|try|catch|finally|this|' - r'throw|new|switch|case|default|eval|super|outer|classpath|' - r'using)\b', Keyword), - (r'(var|delegate|construct|function|private|internal|protected|' - r'public|abstract|override|final|static|extends|transient|' - r'implements|represents|readonly)\b', Keyword.Declaration), - (r'(property\s+)(get|set)?', Keyword.Declaration), - (r'(boolean|byte|char|double|float|int|long|short|void|block)\b', - Keyword.Type), - (r'(package)(\s+)', bygroups(Keyword.Namespace, Text)), - (r'(true|false|null|NaN|Infinity)\b', Keyword.Constant), - (r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Keyword.Declaration, Text, Name.Class)), - (r'(uses)(\s+)([a-zA-Z0-9_.]+\*?)', - bygroups(Keyword.Namespace, Text, Name.Namespace)), - (r'"', String, 'string'), - (r'(\??[\.#])([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Name.Attribute)), - (r'(:)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), - (r'and|or|not|[\\~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\n', Text) - ], - 'templateText': [ - (r'(\\<)|(\\\$)', String), - (r'(<%@\s+)(extends|params)', - bygroups(Operator, Name.Decorator), 'stringTemplate'), - (r'<%!--.*?--%>', Comment.Multiline), - (r'(<%)|(<%=)', Operator, 'stringTemplate'), - (r'\$\{', Operator, 'stringTemplateShorthand'), - (r'.', String) - ], - 'string': [ - (r'"', String, '#pop'), - include('templateText') - ], - 'stringTemplate': [ - (r'"', String, 'string'), - (r'%>', Operator, '#pop'), - include('root') - ], - 'stringTemplateShorthand': [ - (r'"', String, 'string'), - (r'\{', Operator, 'stringTemplateShorthand'), - (r'\}', Operator, '#pop'), - include('root') - ], - } - - -class GosuTemplateLexer(Lexer): - """ - For Gosu templates. - - *New in Pygments 1.5.* - """ - - name = 'Gosu Template' - aliases = ['gst'] - filenames = ['*.gst'] - mimetypes = ['text/x-gosu-template'] - lexer = GosuLexer() - - def get_tokens_unprocessed(self, text): - stack = ['templateText'] - for item in self.lexer.get_tokens_unprocessed(text, stack): - yield item - - -class GroovyLexer(RegexLexer): - """ - For `Groovy `_ source code. - - *New in Pygments 1.5.* - """ - - name = 'Groovy' - aliases = ['groovy'] - filenames = ['*.groovy'] - mimetypes = ['text/x-groovy'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), - (r'(assert|break|case|catch|continue|default|do|else|finally|for|' - r'if|goto|instanceof|new|return|switch|this|throw|try|while|in|as)\b', - Keyword), - (r'(abstract|const|enum|extends|final|implements|native|private|' - r'protected|public|static|strictfp|super|synchronized|throws|' - r'transient|volatile)\b', Keyword.Declaration), - (r'(def|boolean|byte|char|double|float|int|long|short|void)\b', - Keyword.Type), - (r'(package)(\s+)', bygroups(Keyword.Namespace, Text)), - (r'(true|false|null)\b', Keyword.Constant), - (r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), - 'class'), - (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'\$/((?!/\$).)*/\$', String), - (r'/(\\\\|\\"|[^/])*/', String), - (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char), - (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), - (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), - (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text) - ], - 'class': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'import': [ - (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') - ], - } - - -class IokeLexer(RegexLexer): - """ - For `Ioke `_ (a strongly typed, dynamic, - prototype based programming language) source. - - *New in Pygments 1.4.* - """ - name = 'Ioke' - filenames = ['*.ik'] - aliases = ['ioke', 'ik'] - mimetypes = ['text/x-iokesrc'] - tokens = { - 'interpolatableText': [ - (r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}' - r'|\\[0-3]?[0-7]?[0-7])', String.Escape), - (r'#{', Punctuation, 'textInterpolationRoot') - ], - - 'text': [ - (r'(?>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|' - r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|' - r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|' - r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|' - r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|' - r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|' - r'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator), - (r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])', - Operator), - - # Punctuation - (r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|{|})', Punctuation), - - #kinds - (r'[A-Z][a-zA-Z0-9_!:?]*', Name.Class), - - #default cellnames - (r'[a-z_][a-zA-Z0-9_!:?]*', Name) - ] - } - - -class ClojureLexer(RegexLexer): - """ - Lexer for `Clojure `_ source code. - - *New in Pygments 0.11.* - """ - name = 'Clojure' - aliases = ['clojure', 'clj'] - filenames = ['*.clj'] - mimetypes = ['text/x-clojure', 'application/x-clojure'] - - special_forms = [ - '.', 'def', 'do', 'fn', 'if', 'let', 'new', 'quote', 'var', 'loop' - ] - - # It's safe to consider 'ns' a declaration thing because it defines a new - # namespace. - declarations = [ - 'def-', 'defn', 'defn-', 'defmacro', 'defmulti', 'defmethod', - 'defstruct', 'defonce', 'declare', 'definline', 'definterface', - 'defprotocol', 'defrecord', 'deftype', 'defproject', 'ns' - ] - - builtins = [ - '*', '+', '-', '->', '/', '<', '<=', '=', '==', '>', '>=', '..', - 'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns', - 'alter', 'and', 'append-child', 'apply', 'array-map', 'aset', - 'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float', - 'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await', - 'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or', - 'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?', - 'butlast', 'byte', 'cast', 'char', 'children', 'class', - 'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator', - 'complement', 'concat', 'conj', 'cons', 'constantly', 'cond', 'if-not', - 'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct', - 'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct', - 'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto', - 'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure', - 'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find', - 'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'for', - 'fnseq', 'frest', 'gensym', 'get-proxy-class', 'get', - 'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import', - 'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right', - 'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave', - 'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys', - 'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left', - 'lefts', 'line-seq', 'list*', 'list', 'load', 'load-file', - 'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1', - 'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat', - 'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min', - 'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next', - 'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports', - 'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers', - 'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial', - 'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str', - 'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy', - 'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find', - 're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq', - 'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem', - 'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys', - 'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq', - 'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq', - 'second', 'select', 'select-keys', 'send', 'send-off', 'seq', - 'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort', - 'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set', - 'special-symbol?', 'split-at', 'split-with', 'str', 'string?', - 'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?', - 'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array', - 'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy', - 'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip', - 'vector?', 'when', 'when-first', 'when-let', 'when-not', - 'with-local-vars', 'with-meta', 'with-open', 'with-out-str', - 'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper'] - - # valid names for identifiers - # well, names can only not consist fully of numbers - # but this should be good enough for now - - # TODO / should divide keywords/symbols into namespace/rest - # but that's hard, so just pretend / is part of the name - valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+' - - def _multi_escape(entries): - return '(%s)' % ('|'.join(re.escape(entry) + ' ' for entry in entries)) - - tokens = { - 'root': [ - # the comments - always starting with semicolon - # and going to the end of the line - (r';.*$', Comment.Single), - - # whitespaces - usually not relevant - (r'[,\s]+', Text), - - # numbers - (r'-?\d+\.\d+', Number.Float), - (r'-?\d+', Number.Integer), - (r'0x-?[abcdef\d]+', Number.Hex), - - # strings, symbols and characters - (r'"(\\\\|\\"|[^"])*"', String), - (r"'" + valid_name, String.Symbol), - (r"\\(.|[a-z]+)", String.Char), - - # keywords - (r'::?' + valid_name, String.Symbol), - - # special operators - (r'~@|[`\'#^~&@]', Operator), - - # highlight the special forms - (_multi_escape(special_forms), Keyword), - - # Technically, only the special forms are 'keywords'. The problem - # is that only treating them as keywords means that things like - # 'defn' and 'ns' need to be highlighted as builtins. This is ugly - # and weird for most styles. So, as a compromise we're going to - # highlight them as Keyword.Declarations. - (_multi_escape(declarations), Keyword.Declaration), - - # highlight the builtins - (_multi_escape(builtins), Name.Builtin), - - # the remaining functions - (r'(?<=\()' + valid_name, Name.Function), - - # find the remaining variables - (valid_name, Name.Variable), - - # Clojure accepts vector notation - (r'(\[|\])', Punctuation), - - # Clojure accepts map notation - (r'(\{|\})', Punctuation), - - # the famous parentheses! - (r'(\(|\))', Punctuation), - ], - } - - -class TeaLangLexer(RegexLexer): - """ - For `Tea `_ source code. Only used within a - TeaTemplateLexer. - - *New in Pygments 1.5.* - """ - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), - (r'(and|break|else|foreach|if|in|not|or|reverse)\b', - Keyword), - (r'(as|call|define)\b', Keyword.Declaration), - (r'(true|false|null)\b', Keyword.Constant), - (r'(template)(\s+)', bygroups(Keyword.Declaration, Text), 'template'), - (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'"(\\\\|\\"|[^"])*"', String), - (r'\'(\\\\|\\\'|[^\'])*\'', String), - (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), - (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), - (r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text) - ], - 'template': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'import': [ - (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') - ], - } - - -class CeylonLexer(RegexLexer): - """ - For `Ceylon `_ source code. - - *New in Pygments 1.6.* - """ - - name = 'Ceylon' - aliases = ['ceylon'] - filenames = ['*.ceylon'] - mimetypes = ['text/x-ceylon'] - - flags = re.MULTILINE | re.DOTALL - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'(variable|shared|abstract|doc|by|formal|actual|late|native)', - Name.Decorator), - (r'(break|case|catch|continue|default|else|finally|for|in|' - r'variable|if|return|switch|this|throw|try|while|is|exists|dynamic|' - r'nonempty|then|outer|assert)\b', Keyword), - (r'(abstracts|extends|satisfies|adapts|' - r'super|given|of|out|assign|' - r'transient|volatile)\b', Keyword.Declaration), - (r'(function|value|void)\b', - Keyword.Type), - (r'(package)(\s+)', bygroups(Keyword.Namespace, Text)), - (r'(true|false|null)\b', Keyword.Constant), - (r'(class|interface|object|alias)(\s+)', - bygroups(Keyword.Declaration, Text), 'class'), - (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'"(\\\\|\\"|[^"])*"', String), - (r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char), - (r'".*``.*``.*"', String.Interpol), - (r'(\.)([a-z_][a-zA-Z0-9_]*)', - bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator), - (r'\d{1,3}(_\d{3})+\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float), - (r'\d{1,3}(_\d{3})+\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?', - Number.Float), - (r'[0-9][0-9]*\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float), - (r'[0-9][0-9]*\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?', - Number.Float), - (r'#([0-9a-fA-F]{4})(_[0-9a-fA-F]{4})+', Number.Hex), - (r'#[0-9a-fA-F]+', Number.Hex), - (r'\$([01]{4})(_[01]{4})+', Number.Integer), - (r'\$[01]+', Number.Integer), - (r'\d{1,3}(_\d{3})+[kMGTP]?', Number.Integer), - (r'[0-9]+[kMGTP]?', Number.Integer), - (r'\n', Text) - ], - 'class': [ - (r'[A-Za-z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'import': [ - (r'[a-z][a-zA-Z0-9_.]*', - Name.Namespace, '#pop') - ], - } - - -class KotlinLexer(RegexLexer): - """ - For `Kotlin `_ - source code. - - Additional options accepted: - - `unicodelevel` - Determines which Unicode characters this lexer allows for identifiers. - The possible values are: - - * ``none`` -- only the ASCII letters and numbers are allowed. This - is the fastest selection. - * ``basic`` -- all Unicode characters from the specification except - category ``Lo`` are allowed. - * ``full`` -- all Unicode characters as specified in the C# specs - are allowed. Note that this means a considerable slowdown since the - ``Lo`` category has more than 40,000 characters in it! - - The default value is ``basic``. - - *New in Pygments 1.5.* - """ - - name = 'Kotlin' - aliases = ['kotlin'] - filenames = ['*.kt'] - mimetypes = ['text/x-kotlin'] # inferred - - flags = re.MULTILINE | re.DOTALL | re.UNICODE - - # for the range of allowed unicode characters in identifiers, - # see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf - - levels = { - 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*', - 'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' + - '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + - uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'), - 'full': ('@?(?:_|[^' + - uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' - + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', - 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), - } - - tokens = {} - token_variants = True - - for levelname, cs_ident in list(levels.items()): - tokens[levelname] = { - 'root': [ - # method names - (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type - r'(' + cs_ident + ')' # method name - r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Punctuation)), - (r'^\s*\[.*?\]', Name.Attribute), - (r'[^\S\n]+', Text), - (r'\\\n', Text), # line continuation - (r'//.*?\n', Comment.Single), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'\n', Text), - (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation), - (r'[{}]', Punctuation), - (r'@"(""|[^"])*"', String), - (r'"(\\\\|\\"|[^"\n])*["\n]', String), - (r"'\\.'|'[^\\]'", String.Char), - (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?" - r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number), - (r'#[ \t]*(if|endif|else|elif|define|undef|' - r'line|error|warning|region|endregion|pragma)\b.*?\n', - Comment.Preproc), - (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text, - Keyword)), - (r'(abstract|as|break|catch|' - r'fun|continue|default|delegate|' - r'do|else|enum|extern|false|finally|' - r'fixed|for|goto|if|implicit|in|interface|' - r'internal|is|lock|null|' - r'out|override|private|protected|public|readonly|' - r'ref|return|sealed|sizeof|' - r'when|this|throw|true|try|typeof|' - r'unchecked|unsafe|virtual|void|while|' - r'get|set|new|partial|yield|val|var)\b', Keyword), - (r'(global)(::)', bygroups(Keyword, Punctuation)), - (r'(bool|byte|char|decimal|double|dynamic|float|int|long|' - r'short)\b\??', Keyword.Type), - (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'), - (r'(package|using)(\s+)', bygroups(Keyword, Text), 'package'), - (cs_ident, Name), - ], - 'class': [ - (cs_ident, Name.Class, '#pop') - ], - 'package': [ - (r'(?=\()', Text, '#pop'), # using (resource) - ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop') - ] - } - - def __init__(self, **options): - level = get_choice_opt(options, 'unicodelevel', list(self.tokens.keys()), - 'basic') - if level not in self._all_tokens: - # compile the regexes now - self._tokens = self.__class__.process_tokendef(level) - else: - self._tokens = self._all_tokens[level] - - RegexLexer.__init__(self, **options) - - -class XtendLexer(RegexLexer): - """ - For `Xtend `_ source code. - - *New in Pygments 1.6.* - """ - - name = 'Xtend' - aliases = ['xtend'] - filenames = ['*.xtend'] - mimetypes = ['text/x-xtend'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments - r'([a-zA-Z_$][a-zA-Z0-9_$]*)' # method name - r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), - (r'(assert|break|case|catch|continue|default|do|else|finally|for|' - r'if|goto|instanceof|new|return|switch|this|throw|try|while|IF|' - r'ELSE|ELSEIF|ENDIF|FOR|ENDFOR|SEPARATOR|BEFORE|AFTER)\b', - Keyword), - (r'(def|abstract|const|enum|extends|final|implements|native|private|' - r'protected|public|static|strictfp|super|synchronized|throws|' - r'transient|volatile)\b', Keyword.Declaration), - (r'(boolean|byte|char|double|float|int|long|short|void)\b', - Keyword.Type), - (r'(package)(\s+)', bygroups(Keyword.Namespace, Text)), - (r'(true|false|null)\b', Keyword.Constant), - (r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), - 'class'), - (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r"(''')", String, 'template'), - (r"(\u00BB)", String, 'template'), - (r'"(\\\\|\\"|[^"])*"', String), - (r"'(\\\\|\\'|[^'])*'", String), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), - (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), - (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text) - ], - 'class': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'import': [ - (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') - ], - 'template': [ - (r"'''", String, '#pop'), - (r"\u00AB", String, '#pop'), - (r'.', String) - ], - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/math.py b/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/math.py deleted file mode 100644 index f0e49fe..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/math.py +++ /dev/null @@ -1,1918 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.math - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for math languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.util import shebang_matches -from pygments.lexer import Lexer, RegexLexer, bygroups, include, \ - combined, do_insertions -from pygments.token import Comment, String, Punctuation, Keyword, Name, \ - Operator, Number, Text, Generic - -from pygments.lexers.agile import PythonLexer -from pygments.lexers import _scilab_builtins -from pygments.lexers import _stan_builtins - -__all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer', - 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer', - 'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer', - 'IDLLexer', 'RdLexer', 'IgorLexer'] - - -class JuliaLexer(RegexLexer): - """ - For `Julia `_ source code. - - *New in Pygments 1.6.* - """ - name = 'Julia' - aliases = ['julia','jl'] - filenames = ['*.jl'] - mimetypes = ['text/x-julia','application/x-julia'] - - builtins = [ - 'exit','whos','edit','load','is','isa','isequal','typeof','tuple', - 'ntuple','uid','hash','finalizer','convert','promote','subtype', - 'typemin','typemax','realmin','realmax','sizeof','eps','promote_type', - 'method_exists','applicable','invoke','dlopen','dlsym','system', - 'error','throw','assert','new','Inf','Nan','pi','im', - ] - - tokens = { - 'root': [ - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'#.*$', Comment), - (r'[]{}:(),;[@]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), - - # keywords - (r'(begin|while|for|in|return|break|continue|' - r'macro|quote|let|if|elseif|else|try|catch|end|' - r'bitstype|ccall|do|using|module|import|export|' - r'importall|baremodule|immutable)\b', Keyword), - (r'(local|global|const)\b', Keyword.Declaration), - (r'(Bool|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64' - r'|Float32|Float64|Complex64|Complex128|Any|Nothing|None)\b', - Keyword.Type), - - # functions - (r'(function)((?:\s|\\\s)+)', - bygroups(Keyword,Name.Function), 'funcname'), - - # types - (r'(type|typealias|abstract)((?:\s|\\\s)+)', - bygroups(Keyword,Name.Class), 'typename'), - - # operators - (r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator), - (r'\.\*|\.\^|\.\\|\.\/|\\', Operator), - - # builtins - ('(' + '|'.join(builtins) + r')\b', Name.Builtin), - - # backticks - (r'`(?s).*?`', String.Backtick), - - # chars - (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|" - r"\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char), - - # try to match trailing transpose - (r'(?<=[.\w\)\]])\'+', Operator), - - # strings - (r'(?:[IL])"', String, 'string'), - (r'[E]?"', String, combined('stringescape', 'string')), - - # names - (r'@[a-zA-Z0-9_.]+', Name.Decorator), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - - # numbers - (r'(\d+(_\d+)+\.\d*|\d*\.\d+(_\d+)+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+(_\d+)+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'0b[01]+(_[01]+)+', Number.Binary), - (r'0b[01]+', Number.Binary), - (r'0o[0-7]+(_[0-7]+)+', Number.Oct), - (r'0o[0-7]+', Number.Oct), - (r'0x[a-fA-F0-9]+(_[a-fA-F0-9]+)+', Number.Hex), - (r'0x[a-fA-F0-9]+', Number.Hex), - (r'\d+(_\d+)+', Number.Integer), - (r'\d+', Number.Integer) - ], - - 'funcname': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop'), - ('\([^\s\w{]{1,2}\)', Operator, '#pop'), - ('[^\s\w{]{1,2}', Operator, '#pop'), - ], - - 'typename': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - - 'stringescape': [ - (r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|' - r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) - ], - - 'string': [ - (r'"', String, '#pop'), - (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings - (r'\$(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?', - String.Interpol), - (r'[^\\"$]+', String), - # quotes, dollar signs, and backslashes must be parsed one at a time - (r'["\\]', String), - # unhandled string formatting sign - (r'\$', String) - ], - } - - def analyse_text(text): - return shebang_matches(text, r'julia') - - -line_re = re.compile('.*?\n') - -class JuliaConsoleLexer(Lexer): - """ - For Julia console sessions. Modeled after MatlabSessionLexer. - - *New in Pygments 1.6.* - """ - name = 'Julia console' - aliases = ['jlcon'] - - def get_tokens_unprocessed(self, text): - jllexer = JuliaLexer(**self.options) - - curcode = '' - insertions = [] - - for match in line_re.finditer(text): - line = match.group() - - if line.startswith('julia>'): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:3])])) - curcode += line[3:] - - elif line.startswith(' '): - - idx = len(curcode) - - # without is showing error on same line as before...? - line = "\n" + line - token = (0, Generic.Traceback, line) - insertions.append((idx, [token])) - - else: - if curcode: - for item in do_insertions( - insertions, jllexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - - yield match.start(), Generic.Output, line - - if curcode: # or item: - for item in do_insertions( - insertions, jllexer.get_tokens_unprocessed(curcode)): - yield item - - -class MuPADLexer(RegexLexer): - """ - A `MuPAD `_ lexer. - Contributed by Christopher Creutzig . - - *New in Pygments 0.8.* - """ - name = 'MuPAD' - aliases = ['mupad'] - filenames = ['*.mu'] - - tokens = { - 'root' : [ - (r'//.*?$', Comment.Single), - (r'/\*', Comment.Multiline, 'comment'), - (r'"(?:[^"\\]|\\.)*"', String), - (r'\(|\)|\[|\]|\{|\}', Punctuation), - (r'''(?x)\b(?: - next|break|end| - axiom|end_axiom|category|end_category|domain|end_domain|inherits| - if|%if|then|elif|else|end_if| - case|of|do|otherwise|end_case| - while|end_while| - repeat|until|end_repeat| - for|from|to|downto|step|end_for| - proc|local|option|save|begin|end_proc| - delete|frame - )\b''', Keyword), - (r'''(?x)\b(?: - DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR| - DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT| - DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC| - DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR - )\b''', Name.Class), - (r'''(?x)\b(?: - PI|EULER|E|CATALAN| - NIL|FAIL|undefined|infinity| - TRUE|FALSE|UNKNOWN - )\b''', - Name.Constant), - (r'\b(?:dom|procname)\b', Name.Builtin.Pseudo), - (r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator), - (r'''(?x)\b(?: - and|or|not|xor| - assuming| - div|mod| - union|minus|intersect|in|subset - )\b''', - Operator.Word), - (r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number), - #(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin), - (r'''(?x) - ((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`) - (?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)(\s*)([(])''', - bygroups(Name.Function, Text, Punctuation)), - (r'''(?x) - (?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`) - (?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable), - (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number), - (r'\.[0-9]+(?:e[0-9]+)?', Number), - (r'.', Text) - ], - 'comment' : [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ] - } - - -class MatlabLexer(RegexLexer): - """ - For Matlab source code. - - *New in Pygments 0.10.* - """ - name = 'Matlab' - aliases = ['matlab'] - filenames = ['*.m'] - mimetypes = ['text/matlab'] - - # - # These lists are generated automatically. - # Run the following in bash shell: - # - # for f in elfun specfun elmat; do - # echo -n "$f = " - # matlab -nojvm -r "help $f;exit;" | perl -ne \ - # 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}' - # done - # - # elfun: Elementary math functions - # specfun: Special Math functions - # elmat: Elementary matrices and matrix manipulation - # - # taken from Matlab version 7.4.0.336 (R2007a) - # - elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh", - "acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2", - "atanh","sec","secd","sech","asec","asecd","asech","csc","cscd", - "csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd", - "acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2", - "realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs", - "angle","complex","conj","imag","real","unwrap","isreal","cplxpair", - "fix","floor","ceil","round","mod","rem","sign"] - specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta", - "betainc","betaln","ellipj","ellipke","erf","erfc","erfcx", - "erfinv","expint","gamma","gammainc","gammaln","psi","legendre", - "cross","dot","factor","isprime","primes","gcd","lcm","rat", - "rats","perms","nchoosek","factorial","cart2sph","cart2pol", - "pol2cart","sph2cart","hsv2rgb","rgb2hsv"] - elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace", - "freqspace","meshgrid","accumarray","size","length","ndims","numel", - "disp","isempty","isequal","isequalwithequalnans","cat","reshape", - "diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90", - "find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute", - "ipermute","shiftdim","circshift","squeeze","isscalar","isvector", - "ans","eps","realmax","realmin","pi","i","inf","nan","isnan", - "isinf","isfinite","j","why","compan","gallery","hadamard","hankel", - "hilb","invhilb","magic","pascal","rosser","toeplitz","vander", - "wilkinson"] - - tokens = { - 'root': [ - # line starting with '!' is sent as a system command. not sure what - # label to use... - (r'^!.*', String.Other), - (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), - (r'%.*$', Comment), - (r'^\s*function', Keyword, 'deffunc'), - - # from 'iskeyword' on version 7.11 (R2010): - (r'(break|case|catch|classdef|continue|else|elseif|end|enumerated|' - r'events|for|function|global|if|methods|otherwise|parfor|' - r'persistent|properties|return|spmd|switch|try|while)\b', Keyword), - - ("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin), - - # line continuation with following comment: - (r'\.\.\..*$', Comment), - - # operators: - (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - # punctuation: - (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation), - (r'=|:|;', Punctuation), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w\)\]])\'', Operator), - - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+', Number.Integer), - - (r'(?. - - *New in Pygments 0.10.* - """ - name = 'Matlab session' - aliases = ['matlabsession'] - - def get_tokens_unprocessed(self, text): - mlexer = MatlabLexer(**self.options) - - curcode = '' - insertions = [] - - for match in line_re.finditer(text): - line = match.group() - - if line.startswith('>>'): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:3])])) - curcode += line[3:] - - elif line.startswith('???'): - - idx = len(curcode) - - # without is showing error on same line as before...? - line = "\n" + line - token = (0, Generic.Traceback, line) - insertions.append((idx, [token])) - - else: - if curcode: - for item in do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - - yield match.start(), Generic.Output, line - - if curcode: # or item: - for item in do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)): - yield item - - -class OctaveLexer(RegexLexer): - """ - For GNU Octave source code. - - *New in Pygments 1.5.* - """ - name = 'Octave' - aliases = ['octave'] - filenames = ['*.m'] - mimetypes = ['text/octave'] - - # These lists are generated automatically. - # Run the following in bash shell: - # - # First dump all of the Octave manual into a plain text file: - # - # $ info octave --subnodes -o octave-manual - # - # Now grep through it: - - # for i in \ - # "Built-in Function" "Command" "Function File" \ - # "Loadable Function" "Mapping Function"; - # do - # perl -e '@name = qw('"$i"'); - # print lc($name[0]),"_kw = [\n"'; - # - # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ - # octave-manual | sort | uniq ; - # echo "]" ; - # echo; - # done - - # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) - - builtin_kw = [ "addlistener", "addpath", "addproperty", "all", - "and", "any", "argnames", "argv", "assignin", - "atexit", "autoload", - "available_graphics_toolkits", "beep_on_error", - "bitand", "bitmax", "bitor", "bitshift", "bitxor", - "cat", "cell", "cellstr", "char", "class", "clc", - "columns", "command_line_path", - "completion_append_char", "completion_matches", - "complex", "confirm_recursive_rmdir", "cputime", - "crash_dumps_octave_core", "ctranspose", "cumprod", - "cumsum", "debug_on_error", "debug_on_interrupt", - "debug_on_warning", "default_save_options", - "dellistener", "diag", "diff", "disp", - "doc_cache_file", "do_string_escapes", "double", - "drawnow", "e", "echo_executing_commands", "eps", - "eq", "errno", "errno_list", "error", "eval", - "evalin", "exec", "exist", "exit", "eye", "false", - "fclear", "fclose", "fcntl", "fdisp", "feof", - "ferror", "feval", "fflush", "fgetl", "fgets", - "fieldnames", "file_in_loadpath", "file_in_path", - "filemarker", "filesep", "find_dir_in_path", - "fixed_point_format", "fnmatch", "fopen", "fork", - "formula", "fprintf", "fputs", "fread", "freport", - "frewind", "fscanf", "fseek", "fskipl", "ftell", - "functions", "fwrite", "ge", "genpath", "get", - "getegid", "getenv", "geteuid", "getgid", - "getpgrp", "getpid", "getppid", "getuid", "glob", - "gt", "gui_mode", "history_control", - "history_file", "history_size", - "history_timestamp_format_string", "home", - "horzcat", "hypot", "ifelse", - "ignore_function_time_stamp", "inferiorto", - "info_file", "info_program", "inline", "input", - "intmax", "intmin", "ipermute", - "is_absolute_filename", "isargout", "isbool", - "iscell", "iscellstr", "ischar", "iscomplex", - "isempty", "isfield", "isfloat", "isglobal", - "ishandle", "isieee", "isindex", "isinteger", - "islogical", "ismatrix", "ismethod", "isnull", - "isnumeric", "isobject", "isreal", - "is_rooted_relative_filename", "issorted", - "isstruct", "isvarname", "kbhit", "keyboard", - "kill", "lasterr", "lasterror", "lastwarn", - "ldivide", "le", "length", "link", "linspace", - "logical", "lstat", "lt", "make_absolute_filename", - "makeinfo_program", "max_recursion_depth", "merge", - "methods", "mfilename", "minus", "mislocked", - "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", - "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", - "munlock", "nargin", "nargout", - "native_float_format", "ndims", "ne", "nfields", - "nnz", "norm", "not", "numel", "nzmax", - "octave_config_info", "octave_core_file_limit", - "octave_core_file_name", - "octave_core_file_options", "ones", "or", - "output_max_field_width", "output_precision", - "page_output_immediately", "page_screen_output", - "path", "pathsep", "pause", "pclose", "permute", - "pi", "pipe", "plus", "popen", "power", - "print_empty_dimensions", "printf", - "print_struct_array_contents", "prod", - "program_invocation_name", "program_name", - "putenv", "puts", "pwd", "quit", "rats", "rdivide", - "readdir", "readlink", "read_readline_init_file", - "realmax", "realmin", "rehash", "rename", - "repelems", "re_read_readline_init_file", "reset", - "reshape", "resize", "restoredefaultpath", - "rethrow", "rmdir", "rmfield", "rmpath", "rows", - "save_header_format_string", "save_precision", - "saving_history", "scanf", "set", "setenv", - "shell_cmd", "sighup_dumps_octave_core", - "sigterm_dumps_octave_core", "silent_functions", - "single", "size", "size_equal", "sizemax", - "sizeof", "sleep", "source", "sparse_auto_mutate", - "split_long_rows", "sprintf", "squeeze", "sscanf", - "stat", "stderr", "stdin", "stdout", "strcmp", - "strcmpi", "string_fill_char", "strncmp", - "strncmpi", "struct", "struct_levels_to_print", - "strvcat", "subsasgn", "subsref", "sum", "sumsq", - "superiorto", "suppress_verbose_help_message", - "symlink", "system", "tic", "tilde_expand", - "times", "tmpfile", "tmpnam", "toc", "toupper", - "transpose", "true", "typeinfo", "umask", "uminus", - "uname", "undo_string_escapes", "unlink", "uplus", - "upper", "usage", "usleep", "vec", "vectorize", - "vertcat", "waitpid", "warning", "warranty", - "whos_line_format", "yes_or_no", "zeros", - "inf", "Inf", "nan", "NaN"] - - command_kw = [ "close", "load", "who", "whos", ] - - function_kw = [ "accumarray", "accumdim", "acosd", "acotd", - "acscd", "addtodate", "allchild", "ancestor", - "anova", "arch_fit", "arch_rnd", "arch_test", - "area", "arma_rnd", "arrayfun", "ascii", "asctime", - "asecd", "asind", "assert", "atand", - "autoreg_matrix", "autumn", "axes", "axis", "bar", - "barh", "bartlett", "bartlett_test", "beep", - "betacdf", "betainv", "betapdf", "betarnd", - "bicgstab", "bicubic", "binary", "binocdf", - "binoinv", "binopdf", "binornd", "bitcmp", - "bitget", "bitset", "blackman", "blanks", - "blkdiag", "bone", "box", "brighten", "calendar", - "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", - "cauchy_rnd", "caxis", "celldisp", "center", "cgs", - "chisquare_test_homogeneity", - "chisquare_test_independence", "circshift", "cla", - "clabel", "clf", "clock", "cloglog", "closereq", - "colon", "colorbar", "colormap", "colperm", - "comet", "common_size", "commutation_matrix", - "compan", "compare_versions", "compass", - "computer", "cond", "condest", "contour", - "contourc", "contourf", "contrast", "conv", - "convhull", "cool", "copper", "copyfile", "cor", - "corrcoef", "cor_test", "cosd", "cotd", "cov", - "cplxpair", "cross", "cscd", "cstrcat", "csvread", - "csvwrite", "ctime", "cumtrapz", "curl", "cut", - "cylinder", "date", "datenum", "datestr", - "datetick", "datevec", "dblquad", "deal", - "deblank", "deconv", "delaunay", "delaunayn", - "delete", "demo", "detrend", "diffpara", "diffuse", - "dir", "discrete_cdf", "discrete_inv", - "discrete_pdf", "discrete_rnd", "display", - "divergence", "dlmwrite", "dos", "dsearch", - "dsearchn", "duplication_matrix", "durbinlevinson", - "ellipsoid", "empirical_cdf", "empirical_inv", - "empirical_pdf", "empirical_rnd", "eomday", - "errorbar", "etime", "etreeplot", "example", - "expcdf", "expinv", "expm", "exppdf", "exprnd", - "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", - "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", - "factorial", "fail", "fcdf", "feather", "fftconv", - "fftfilt", "fftshift", "figure", "fileattrib", - "fileparts", "fill", "findall", "findobj", - "findstr", "finv", "flag", "flipdim", "fliplr", - "flipud", "fpdf", "fplot", "fractdiff", "freqz", - "freqz_plot", "frnd", "fsolve", - "f_test_regression", "ftp", "fullfile", "fzero", - "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", - "gcbf", "gcbo", "gcf", "genvarname", "geocdf", - "geoinv", "geopdf", "geornd", "getfield", "ginput", - "glpk", "gls", "gplot", "gradient", - "graphics_toolkit", "gray", "grid", "griddata", - "griddatan", "gtext", "gunzip", "gzip", "hadamard", - "hamming", "hankel", "hanning", "hggroup", - "hidden", "hilb", "hist", "histc", "hold", "hot", - "hotelling_test", "housh", "hsv", "hurst", - "hygecdf", "hygeinv", "hygepdf", "hygernd", - "idivide", "ifftshift", "image", "imagesc", - "imfinfo", "imread", "imshow", "imwrite", "index", - "info", "inpolygon", "inputname", "interpft", - "interpn", "intersect", "invhilb", "iqr", "isa", - "isdefinite", "isdir", "is_duplicate_entry", - "isequal", "isequalwithequalnans", "isfigure", - "ishermitian", "ishghandle", "is_leap_year", - "isletter", "ismac", "ismember", "ispc", "isprime", - "isprop", "isscalar", "issquare", "isstrprop", - "issymmetric", "isunix", "is_valid_file_id", - "isvector", "jet", "kendall", - "kolmogorov_smirnov_cdf", - "kolmogorov_smirnov_test", "kruskal_wallis_test", - "krylov", "kurtosis", "laplace_cdf", "laplace_inv", - "laplace_pdf", "laplace_rnd", "legend", "legendre", - "license", "line", "linkprop", "list_primes", - "loadaudio", "loadobj", "logistic_cdf", - "logistic_inv", "logistic_pdf", "logistic_rnd", - "logit", "loglog", "loglogerr", "logm", "logncdf", - "logninv", "lognpdf", "lognrnd", "logspace", - "lookfor", "ls_command", "lsqnonneg", "magic", - "mahalanobis", "manova", "matlabroot", - "mcnemar_test", "mean", "meansq", "median", "menu", - "mesh", "meshc", "meshgrid", "meshz", "mexext", - "mget", "mkpp", "mode", "moment", "movefile", - "mpoles", "mput", "namelengthmax", "nargchk", - "nargoutchk", "nbincdf", "nbininv", "nbinpdf", - "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", - "nonzeros", "normcdf", "normest", "norminv", - "normpdf", "normrnd", "now", "nthroot", "null", - "ocean", "ols", "onenormest", "optimget", - "optimset", "orderfields", "orient", "orth", - "pack", "pareto", "parseparams", "pascal", "patch", - "pathdef", "pcg", "pchip", "pcolor", "pcr", - "peaks", "periodogram", "perl", "perms", "pie", - "pink", "planerot", "playaudio", "plot", - "plotmatrix", "plotyy", "poisscdf", "poissinv", - "poisspdf", "poissrnd", "polar", "poly", - "polyaffine", "polyarea", "polyderiv", "polyfit", - "polygcd", "polyint", "polyout", "polyreduce", - "polyval", "polyvalm", "postpad", "powerset", - "ppder", "ppint", "ppjumps", "ppplot", "ppval", - "pqpnonneg", "prepad", "primes", "print", - "print_usage", "prism", "probit", "qp", "qqplot", - "quadcc", "quadgk", "quadl", "quadv", "quiver", - "qzhess", "rainbow", "randi", "range", "rank", - "ranks", "rat", "reallog", "realpow", "realsqrt", - "record", "rectangle_lw", "rectangle_sw", - "rectint", "refresh", "refreshdata", - "regexptranslate", "repmat", "residue", "ribbon", - "rindex", "roots", "rose", "rosser", "rotdim", - "rref", "run", "run_count", "rundemos", "run_test", - "runtests", "saveas", "saveaudio", "saveobj", - "savepath", "scatter", "secd", "semilogx", - "semilogxerr", "semilogy", "semilogyerr", - "setaudio", "setdiff", "setfield", "setxor", - "shading", "shift", "shiftdim", "sign_test", - "sinc", "sind", "sinetone", "sinewave", "skewness", - "slice", "sombrero", "sortrows", "spaugment", - "spconvert", "spdiags", "spearman", "spectral_adf", - "spectral_xdf", "specular", "speed", "spencer", - "speye", "spfun", "sphere", "spinmap", "spline", - "spones", "sprand", "sprandn", "sprandsym", - "spring", "spstats", "spy", "sqp", "stairs", - "statistics", "std", "stdnormal_cdf", - "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", - "stem", "stft", "strcat", "strchr", "strjust", - "strmatch", "strread", "strsplit", "strtok", - "strtrim", "strtrunc", "structfun", "studentize", - "subplot", "subsindex", "subspace", "substr", - "substruct", "summer", "surf", "surface", "surfc", - "surfl", "surfnorm", "svds", "swapbytes", - "sylvester_matrix", "symvar", "synthesis", "table", - "tand", "tar", "tcdf", "tempdir", "tempname", - "test", "text", "textread", "textscan", "tinv", - "title", "toeplitz", "tpdf", "trace", "trapz", - "treelayout", "treeplot", "triangle_lw", - "triangle_sw", "tril", "trimesh", "triplequad", - "triplot", "trisurf", "triu", "trnd", "tsearchn", - "t_test", "t_test_regression", "type", "unidcdf", - "unidinv", "unidpdf", "unidrnd", "unifcdf", - "unifinv", "unifpdf", "unifrnd", "union", "unique", - "unix", "unmkpp", "unpack", "untabify", "untar", - "unwrap", "unzip", "u_test", "validatestring", - "vander", "var", "var_test", "vech", "ver", - "version", "view", "voronoi", "voronoin", - "waitforbuttonpress", "wavread", "wavwrite", - "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", - "welch_test", "what", "white", "whitebg", - "wienrnd", "wilcoxon_test", "wilkinson", "winter", - "xlabel", "xlim", "ylabel", "yulewalker", "zip", - "zlabel", "z_test", ] - - loadable_kw = [ "airy", "amd", "balance", "besselh", "besseli", - "besselj", "besselk", "bessely", "bitpack", - "bsxfun", "builtin", "ccolamd", "cellfun", - "cellslices", "chol", "choldelete", "cholinsert", - "cholinv", "cholshift", "cholupdate", "colamd", - "colloc", "convhulln", "convn", "csymamd", - "cummax", "cummin", "daspk", "daspk_options", - "dasrt", "dasrt_options", "dassl", "dassl_options", - "dbclear", "dbdown", "dbstack", "dbstatus", - "dbstop", "dbtype", "dbup", "dbwhere", "det", - "dlmread", "dmperm", "dot", "eig", "eigs", - "endgrent", "endpwent", "etree", "fft", "fftn", - "fftw", "filter", "find", "full", "gcd", - "getgrent", "getgrgid", "getgrnam", "getpwent", - "getpwnam", "getpwuid", "getrusage", "givens", - "gmtime", "gnuplot_binary", "hess", "ifft", - "ifftn", "inv", "isdebugmode", "issparse", "kron", - "localtime", "lookup", "lsode", "lsode_options", - "lu", "luinc", "luupdate", "matrix_type", "max", - "min", "mktime", "pinv", "qr", "qrdelete", - "qrinsert", "qrshift", "qrupdate", "quad", - "quad_options", "qz", "rand", "rande", "randg", - "randn", "randp", "randperm", "rcond", "regexp", - "regexpi", "regexprep", "schur", "setgrent", - "setpwent", "sort", "spalloc", "sparse", "spparms", - "sprank", "sqrtm", "strfind", "strftime", - "strptime", "strrep", "svd", "svd_driver", "syl", - "symamd", "symbfact", "symrcm", "time", "tsearch", - "typecast", "urlread", "urlwrite", ] - - mapping_kw = [ "abs", "acos", "acosh", "acot", "acoth", "acsc", - "acsch", "angle", "arg", "asec", "asech", "asin", - "asinh", "atan", "atanh", "beta", "betainc", - "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", - "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", - "erfcx", "erfinv", "exp", "finite", "fix", "floor", - "fmod", "gamma", "gammainc", "gammaln", "imag", - "isalnum", "isalpha", "isascii", "iscntrl", - "isdigit", "isfinite", "isgraph", "isinf", - "islower", "isna", "isnan", "isprint", "ispunct", - "isspace", "isupper", "isxdigit", "lcm", "lgamma", - "log", "lower", "mod", "real", "rem", "round", - "roundb", "sec", "sech", "sign", "sin", "sinh", - "sqrt", "tan", "tanh", "toascii", "tolower", "xor", - ] - - builtin_consts = [ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", - "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", - "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", - "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", - "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", - "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", - "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", - "WSTOPSIG", "WTERMSIG", "WUNTRACED", ] - - tokens = { - 'root': [ - #We should look into multiline comments - (r'[%#].*$', Comment), - (r'^\s*function', Keyword, 'deffunc'), - - # from 'iskeyword' on hg changeset 8cc154f45e37 - (r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|' - r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|' - r'endevents|endfor|endfunction|endif|endmethods|endproperties|' - r'endswitch|endwhile|events|for|function|get|global|if|methods|' - r'otherwise|persistent|properties|return|set|static|switch|try|' - r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword), - - ("(" + "|".join( builtin_kw + command_kw - + function_kw + loadable_kw - + mapping_kw) + r')\b', Name.Builtin), - - ("(" + "|".join(builtin_consts) + r')\b', Name.Constant), - - # operators in Octave but not Matlab: - (r'-=|!=|!|/=|--', Operator), - # operators: - (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), - # operators in Octave but not Matlab requiring escape for re: - (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*',Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - - # punctuation: - (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation), - (r'=|:|;', Punctuation), - - (r'"[^"]*"', String), - - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+', Number.Integer), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w\)\]])\'', Operator), - (r'(?|<=|>=|&&|&|~|\|\|?', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - # punctuation: - (r'[\[\](){}@.,=:;]', Punctuation), - - (r'"[^"]*"', String), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w\)\]])\'', Operator), - (r'(?') or line.startswith('+'): - # Colorize the prompt as such, - # then put rest of line into current_code_block - insertions.append((len(current_code_block), - [(0, Generic.Prompt, line[:2])])) - current_code_block += line[2:] - else: - # We have reached a non-prompt line! - # If we have stored prompt lines, need to process them first. - if current_code_block: - # Weave together the prompts and highlight code. - for item in do_insertions(insertions, - slexer.get_tokens_unprocessed(current_code_block)): - yield item - # Reset vars for next code block. - current_code_block = '' - insertions = [] - # Now process the actual line itself, this is output from R. - yield match.start(), Generic.Output, line - - # If we happen to end on a code block with nothing after it, need to - # process the last code block. This is neither elegant nor DRY so - # should be changed. - if current_code_block: - for item in do_insertions(insertions, - slexer.get_tokens_unprocessed(current_code_block)): - yield item - - -class SLexer(RegexLexer): - """ - For S, S-plus, and R source code. - - *New in Pygments 0.10.* - """ - - name = 'S' - aliases = ['splus', 's', 'r'] - filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile'] - mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', - 'text/x-R', 'text/x-r-history', 'text/x-r-profile'] - - tokens = { - 'comments': [ - (r'#.*$', Comment.Single), - ], - 'valid_name': [ - (r'[a-zA-Z][0-9a-zA-Z\._]*', Text), - # can begin with ., but not if that is followed by a digit - (r'\.[a-zA-Z_][0-9a-zA-Z\._]*', Text), - ], - 'punctuation': [ - (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation), - ], - 'keywords': [ - (r'(if|else|for|while|repeat|in|next|break|return|switch|function)' - r'(?![0-9a-zA-Z\._])', - Keyword.Reserved) - ], - 'operators': [ - (r'<>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator), - (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator) - ], - 'builtin_symbols': [ - (r'(NULL|NA(_(integer|real|complex|character)_)?|' - r'Inf|TRUE|FALSE|NaN|\.\.(\.|[0-9]+))' - r'(?![0-9a-zA-Z\._])', - Keyword.Constant), - (r'(T|F)\b', Keyword.Variable), - ], - 'numbers': [ - # hex number - (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex), - # decimal number - (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+)([eE][+-]?[0-9]+)?[Li]?', - Number), - ], - 'statements': [ - include('comments'), - # whitespaces - (r'\s+', Text), - (r'`.*?`', String.Backtick), - (r'\'', String, 'string_squote'), - (r'\"', String, 'string_dquote'), - include('builtin_symbols'), - include('numbers'), - include('keywords'), - include('punctuation'), - include('operators'), - include('valid_name'), - ], - 'root': [ - include('statements'), - # blocks: - (r'\{|\}', Punctuation), - #(r'\{', Punctuation, 'block'), - (r'.', Text), - ], - #'block': [ - # include('statements'), - # ('\{', Punctuation, '#push'), - # ('\}', Punctuation, '#pop') - #], - 'string_squote': [ - (r'([^\'\\]|\\.)*\'', String, '#pop'), - ], - 'string_dquote': [ - (r'([^"\\]|\\.)*"', String, '#pop'), - ], - } - - def analyse_text(text): - if re.search(r'[a-z0-9_\])\s]<-(?!-)', text): - return 0.11 - - -class BugsLexer(RegexLexer): - """ - Pygments Lexer for `OpenBugs `_ and WinBugs - models. - - *New in Pygments 1.6.* - """ - - name = 'BUGS' - aliases = ['bugs', 'winbugs', 'openbugs'] - filenames = ['*.bug'] - - _FUNCTIONS = [ - # Scalar functions - 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', - 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', - 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', - 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', - 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', - 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', - 'trunc', - # Vector functions - 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', - 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', - 'sd', 'sort', 'sum', - ## Special - 'D', 'I', 'F', 'T', 'C'] - """ OpenBUGS built-in functions - - From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII - - This also includes - - - T, C, I : Truncation and censoring. - ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. - - D : ODE - - F : Functional http://www.openbugs.info/Examples/Functionals.html - - """ - - _DISTRIBUTIONS = ['dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', - 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', - 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', - 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', - 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', - 'dmt', 'dwish'] - """ OpenBUGS built-in distributions - - Functions from - http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI - """ - - - tokens = { - 'whitespace' : [ - (r"\s+", Text), - ], - 'comments' : [ - # Comments - (r'#.*$', Comment.Single), - ], - 'root': [ - # Comments - include('comments'), - include('whitespace'), - # Block start - (r'(model)(\s+)({)', - bygroups(Keyword.Namespace, Text, Punctuation)), - # Reserved Words - (r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved), - # Built-in Functions - (r'(%s)(?=\s*\()' - % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), - Name.Builtin), - # Regular variable names - (r'[A-Za-z][A-Za-z0-9_.]*', Name), - # Number Literals - (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), - # Punctuation - (r'\[|\]|\(|\)|:|,|;', Punctuation), - # Assignment operators - # SLexer makes these tokens Operators. - (r'<-|~', Operator), - # Infix and prefix operators - (r'\+|-|\*|/', Operator), - # Block - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r"^\s*model\s*{", text, re.M): - return 0.7 - else: - return 0.0 - -class JagsLexer(RegexLexer): - """ - Pygments Lexer for JAGS. - - *New in Pygments 1.6.* - """ - - name = 'JAGS' - aliases = ['jags'] - filenames = ['*.jag', '*.bug'] - - ## JAGS - _FUNCTIONS = [ - 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', - 'cos', 'cosh', 'cloglog', - 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', - 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', - 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', - 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', - 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', - # Truncation/Censoring (should I include) - 'T', 'I'] - # Distributions with density, probability and quartile functions - _DISTRIBUTIONS = ['[dpq]%s' % x for x in - ['bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', - 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', - 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib']] - # Other distributions without density and probability - _OTHER_DISTRIBUTIONS = [ - 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', - 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', - 'dnbinom', 'dweibull', 'ddirich'] - - tokens = { - 'whitespace' : [ - (r"\s+", Text), - ], - 'names' : [ - # Regular variable names - (r'[a-zA-Z][a-zA-Z0-9_.]*\b', Name), - ], - 'comments' : [ - # do not use stateful comments - (r'(?s)/\*.*?\*/', Comment.Multiline), - # Comments - (r'#.*$', Comment.Single), - ], - 'root': [ - # Comments - include('comments'), - include('whitespace'), - # Block start - (r'(model|data)(\s+)({)', - bygroups(Keyword.Namespace, Text, Punctuation)), - (r'var(?![0-9a-zA-Z\._])', Keyword.Declaration), - # Reserved Words - (r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved), - # Builtins - # Need to use lookahead because . is a valid char - (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS - + _DISTRIBUTIONS - + _OTHER_DISTRIBUTIONS), - Name.Builtin), - # Names - include('names'), - # Number Literals - (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), - (r'\[|\]|\(|\)|:|,|;', Punctuation), - # Assignment operators - (r'<-|~', Operator), - # # JAGS includes many more than OpenBUGS - (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r'^\s*model\s*\{', text, re.M): - if re.search(r'^\s*data\s*\{', text, re.M): - return 0.9 - elif re.search(r'^\s*var', text, re.M): - return 0.9 - else: - return 0.3 - else: - return 0 - -class StanLexer(RegexLexer): - """Pygments Lexer for Stan models. - - The Stan modeling language is specified in the *Stan 1.3.0 - Modeling Language Manual* `pdf - `_. - - *New in Pygments 1.6.* - """ - - name = 'Stan' - aliases = ['stan'] - filenames = ['*.stan'] - - tokens = { - 'whitespace' : [ - (r"\s+", Text), - ], - 'comments' : [ - (r'(?s)/\*.*?\*/', Comment.Multiline), - # Comments - (r'(//|#).*$', Comment.Single), - ], - 'root': [ - # Stan is more restrictive on strings than this regex - (r'"[^"]*"', String), - # Comments - include('comments'), - # block start - include('whitespace'), - # Block start - (r'(%s)(\s*)({)' % - r'|'.join(('data', r'transformed\s+?data', - 'parameters', r'transformed\s+parameters', - 'model', r'generated\s+quantities')), - bygroups(Keyword.Namespace, Text, Punctuation)), - # Reserved Words - (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), - # Truncation - (r'T(?=\s*\[)', Keyword), - # Data types - (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), - # Punctuation - (r"[;:,\[\]()]", Punctuation), - # Builtin - (r'(%s)(?=\s*\()' - % r'|'.join(_stan_builtins.FUNCTIONS - + _stan_builtins.DISTRIBUTIONS), - Name.Builtin), - # Special names ending in __, like lp__ - (r'[A-Za-z][A-Za-z0-9_]*__\b', Name.Builtin.Pseudo), - (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), - # Regular variable names - (r'[A-Za-z][A-Za-z0-9_]*\b', Name), - # Real Literals - (r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float), - (r'-?[0-9]*\.[0-9]*', Number.Float), - # Integer Literals - (r'-?[0-9]+', Number.Integer), - # Assignment operators - # SLexer makes these tokens Operators. - (r'<-|~', Operator), - # Infix and prefix operators (and = ) - (r"\+|-|\.?\*|\.?/|\\|'|==?|!=?|<=?|>=?|\|\||&&", Operator), - # Block delimiters - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r'^\s*parameters\s*\{', text, re.M): - return 1.0 - else: - return 0.0 - - -class IDLLexer(RegexLexer): - """ - Pygments Lexer for IDL (Interactive Data Language). - - *New in Pygments 1.6.* - """ - name = 'IDL' - aliases = ['idl'] - filenames = ['*.pro'] - mimetypes = ['text/idl'] - - _RESERVED = ['and', 'begin', 'break', 'case', 'common', 'compile_opt', - 'continue', 'do', 'else', 'end', 'endcase', 'elseelse', - 'endfor', 'endforeach', 'endif', 'endrep', 'endswitch', - 'endwhile', 'eq', 'for', 'foreach', 'forward_function', - 'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le', - 'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro', - 'repeat', 'switch', 'then', 'until', 'while', 'xor'] - """Reserved words from: http://www.exelisvis.com/docs/reswords.html""" - - _BUILTIN_LIB = ['abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10', - 'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query', - 'arg_present', 'array_equal', 'array_indices', 'arrow', - 'ascii_template', 'asin', 'assoc', 'atan', 'axis', - 'a_correlate', 'bandpass_filter', 'bandreject_filter', - 'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk', - 'besely', 'beta', 'bilinear', 'binary_template', 'bindgen', - 'binomial', 'bin_date', 'bit_ffs', 'bit_population', - 'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint', - 'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder', - 'bytscl', 'caldat', 'calendar', 'call_external', - 'call_function', 'call_method', 'call_procedure', 'canny', - 'catch', 'cd', 'cdf_[0-9a-za-z_]*', 'ceil', 'chebyshev', - 'check_math', - 'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen', - 'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts', - 'cmyk_convert', 'colorbar', 'colorize_sample', - 'colormap_applicable', 'colormap_gradient', - 'colormap_rotation', 'colortable', 'color_convert', - 'color_exchange', 'color_quan', 'color_range_map', 'comfit', - 'command_line_args', 'complex', 'complexarr', 'complexround', - 'compute_mesh_normals', 'cond', 'congrid', 'conj', - 'constrained_min', 'contour', 'convert_coord', 'convol', - 'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos', - 'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct', - 'create_view', 'crossp', 'crvlength', 'cti_test', - 'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord', - 'cw_animate', 'cw_animate_getp', 'cw_animate_load', - 'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index', - 'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel', - 'cw_form', 'cw_fslider', 'cw_light_editor', - 'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient', - 'cw_palette_editor', 'cw_palette_editor_get', - 'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider', - 'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists', - 'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key', - 'define_msgblk', 'define_msgblk_from_file', 'defroi', - 'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv', - 'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix', - 'dialog_dbconnect', 'dialog_message', 'dialog_pickfile', - 'dialog_printersetup', 'dialog_printjob', - 'dialog_read_image', 'dialog_write_image', 'digital_filter', - 'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure', - 'dlm_load', 'dlm_register', 'doc_library', 'double', - 'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec', - 'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn', - 'eof', 'eos_[0-9a-za-z_]*', 'erase', 'erf', 'erfc', 'erfcx', - 'erode', 'errorplot', 'errplot', 'estimator_filter', - 'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint', - 'extrac', 'extract_slice', 'factorial', 'fft', 'filepath', - 'file_basename', 'file_chmod', 'file_copy', 'file_delete', - 'file_dirname', 'file_expand_path', 'file_info', - 'file_lines', 'file_link', 'file_mkdir', 'file_move', - 'file_poll_input', 'file_readlink', 'file_same', - 'file_search', 'file_test', 'file_which', 'findgen', - 'finite', 'fix', 'flick', 'float', 'floor', 'flow3', - 'fltarr', 'flush', 'format_axis_values', 'free_lun', - 'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root', - 'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct', - 'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint', - 'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv', - 'getwindows', 'get_drive_list', 'get_dxf_objects', - 'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size', - 'greg2jul', 'grib_[0-9a-za-z_]*', 'grid3', 'griddata', - 'grid_input', 'grid_tps', 'gs_iter', - 'h5[adfgirst]_[0-9a-za-z_]*', 'h5_browser', 'h5_close', - 'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse', - 'hanning', 'hash', 'hdf_[0-9a-za-z_]*', 'heap_free', - 'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save', - 'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal', - 'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int', - 'i18n_multibytetoutf8', 'i18n_multibytetowidechar', - 'i18n_utf8tomultibyte', 'i18n_widechartomultibyte', - 'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity', - 'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64', - 'idl_validname', 'iellipse', 'igamma', 'igetcurrent', - 'igetdata', 'igetid', 'igetproperty', 'iimage', 'image', - 'image_cont', 'image_statistics', 'imaginary', 'imap', - 'indgen', 'intarr', 'interpol', 'interpolate', - 'interval_volume', 'int_2d', 'int_3d', 'int_tabulated', - 'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon', - 'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve', - 'irotate', 'ir_filter', 'isa', 'isave', 'iscale', - 'isetcurrent', 'isetproperty', 'ishft', 'isocontour', - 'isosurface', 'isurface', 'itext', 'itranslate', 'ivector', - 'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse', - 'json_serialize', 'jul2greg', 'julday', 'keyword_set', - 'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date', - 'label_region', 'ladfit', 'laguerre', 'laplacian', - 'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ', - 'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes', - 'la_gm_linear_model', 'la_hqr', 'la_invert', - 'la_least_squares', 'la_least_square_equality', - 'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol', - 'la_svd', 'la_tridc', 'la_trimprove', 'la_triql', - 'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt', - 'legend', 'legendre', 'linbcg', 'lindgen', 'linfit', - 'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr', - 'lngamma', 'lnp_test', 'loadct', 'locale_get', - 'logical_and', 'logical_or', 'logical_true', 'lon64arr', - 'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove', - 'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll', - 'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points', - 'map_continents', 'map_grid', 'map_image', 'map_patch', - 'map_proj_forward', 'map_proj_image', 'map_proj_info', - 'map_proj_init', 'map_proj_inverse', 'map_set', - 'matrix_multiply', 'matrix_power', 'max', 'md_test', - 'mean', 'meanabsdev', 'mean_filter', 'median', 'memory', - 'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge', - 'mesh_numtriangles', 'mesh_obj', 'mesh_smooth', - 'mesh_surfacearea', 'mesh_validate', 'mesh_volume', - 'message', 'min', 'min_curve_surf', 'mk_html_help', - 'modifyct', 'moment', 'morph_close', 'morph_distance', - 'morph_gradient', 'morph_hitormiss', 'morph_open', - 'morph_thin', 'morph_tophat', 'multi', 'm_correlate', - 'ncdf_[0-9a-za-z_]*', 'newton', 'noise_hurl', 'noise_pick', - 'noise_scatter', 'noise_slur', 'norm', 'n_elements', - 'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy', - 'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid', - 'online_help', 'on_error', 'open', 'oplot', 'oploterr', - 'parse_url', 'particle_trace', 'path_cache', 'path_sep', - 'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox', - 'plot_field', 'pnt_line', 'point_lun', 'polarplot', - 'polar_contour', 'polar_surface', 'poly', 'polyfill', - 'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp', - 'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell', - 'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes', - 'print', 'printd', 'product', 'profile', 'profiler', - 'profiles', 'project_vol', 'psafm', 'pseudo', - 'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new', - 'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull', - 'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp', - 'query_csv', 'query_dicom', 'query_gif', 'query_image', - 'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict', - 'query_png', 'query_ppm', 'query_srf', 'query_tiff', - 'query_wav', 'radon', 'randomn', 'randomu', 'ranks', - 'rdpix', 'read', 'reads', 'readu', 'read_ascii', - 'read_binary', 'read_bmp', 'read_csv', 'read_dicom', - 'read_gif', 'read_image', 'read_interfile', 'read_jpeg', - 'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png', - 'read_ppm', 'read_spr', 'read_srf', 'read_sylk', - 'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap', - 'read_xwd', 'real_part', 'rebin', 'recall_commands', - 'recon3', 'reduce_colors', 'reform', 'region_grow', - 'register_cursor', 'regress', 'replicate', - 'replicate_inplace', 'resolve_all', 'resolve_routine', - 'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts', - 'rot', 'rotate', 'round', 'routine_filepath', - 'routine_info', 'rs_test', 'r_correlate', 'r_test', - 'save', 'savgol', 'scale3', 'scale3d', 'scope_level', - 'scope_traceback', 'scope_varfetch', 'scope_varname', - 'search2d', 'search3d', 'sem_create', 'sem_delete', - 'sem_lock', 'sem_release', 'setenv', 'set_plot', - 'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr', - 'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap', - 'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin', - 'sindgen', 'sinh', 'size', 'skewness', 'skip_lun', - 'slicer3', 'slide_image', 'smooth', 'sobel', 'socket', - 'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat', - 'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab', - 'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize', - 'stddev', 'stop', 'strarr', 'strcmp', 'strcompress', - 'streamline', 'stregex', 'stretch', 'string', 'strjoin', - 'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid', - 'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign', - 'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc', - 'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace', - 'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan', - 'tanh', 'tek_color', 'temporary', 'tetra_clip', - 'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed', - 'timegen', 'time_test2', 'tm_test', 'total', 'trace', - 'transpose', 'triangulate', 'trigrid', 'triql', 'trired', - 'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff', - 'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd', - 'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint', - 'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr', - 'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym', - 'value_locate', 'variance', 'vector', 'vector_field', 'vel', - 'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj', - 'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw', - 'where', 'widget_base', 'widget_button', 'widget_combobox', - 'widget_control', 'widget_displaycontextmen', 'widget_draw', - 'widget_droplist', 'widget_event', 'widget_info', - 'widget_label', 'widget_list', 'widget_propertysheet', - 'widget_slider', 'widget_tab', 'widget_table', - 'widget_text', 'widget_tree', 'widget_tree_move', - 'widget_window', 'wiener_filter', 'window', 'writeu', - 'write_bmp', 'write_csv', 'write_gif', 'write_image', - 'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict', - 'write_png', 'write_ppm', 'write_spr', 'write_srf', - 'write_sylk', 'write_tiff', 'write_wav', 'write_wave', - 'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt', - 'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet', - 'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar', - 'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet', - 'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps', - 'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise', - 'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont', - 'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl', - 'xmtool', 'xobjview', 'xobjview_rotate', - 'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d', - 'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit', - 'xvolume', 'xvolume_rotate', 'xvolume_write_image', - 'xyouts', 'zoom', 'zoom_24'] - """Functions from: http://www.exelisvis.com/docs/routines-1.html""" - - tokens = { - 'root': [ - (r'^\s*;.*?\n', Comment.Singleline), - (r'\b(' + '|'.join(_RESERVED) + r')\b', Keyword), - (r'\b(' + '|'.join(_BUILTIN_LIB) + r')\b', Name.Builtin), - (r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator), - (r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator), - (r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator), - (r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator), - (r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number), - (r'.', Text), - ] - } - - -class RdLexer(RegexLexer): - """ - Pygments Lexer for R documentation (Rd) files - - This is a very minimal implementation, highlighting little more - than the macros. A description of Rd syntax is found in `Writing R - Extensions `_ - and `Parsing Rd files `_. - - *New in Pygments 1.6.* - """ - name = 'Rd' - aliases = ['rd'] - filenames = ['*.Rd'] - mimetypes = ['text/x-r-doc'] - - # To account for verbatim / LaTeX-like / and R-like areas - # would require parsing. - tokens = { - 'root' : [ - # catch escaped brackets and percent sign - (r'\\[\\{}%]', String.Escape), - # comments - (r'%.*$', Comment), - # special macros with no arguments - (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant), - # macros - (r'\\[a-zA-Z]+\b', Keyword), - # special preprocessor macros - (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc), - # non-escaped brackets - (r'[{}]', Name.Builtin), - # everything else - (r'[^\\%\n{}]+', Text), - (r'.', Text), - ] - } - - -class IgorLexer(RegexLexer): - """ - Pygments Lexer for Igor Pro procedure files (.ipf). - See http://www.wavemetrics.com/ and http://www.igorexchange.com/. - - *New in Pygments 1.7.* - """ - - name = 'Igor' - aliases = ['igor', 'igorpro'] - filenames = ['*.ipf'] - mimetypes = ['text/ipf'] - - flags = re.IGNORECASE - - flowControl = [ - 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch', - 'case', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry', 'break', - 'continue', 'return', - ] - types = [ - 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE', - 'STRUCT', 'ThreadSafe', 'function', 'end', 'static', 'macro', 'window', - 'graph', 'Structure', 'EndStructure', 'EndMacro', 'FuncFit', 'Proc', - 'Picture', 'Menu', 'SubMenu', 'Prompt', 'DoPrompt', - ] - operations = [ - 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', - 'AddMovieFrame', 'APMath', 'Append', 'AppendImage', - 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText', - 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour', - 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall', - 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', - 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc', - 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar', - 'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile', - 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross', - 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor', - 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions', - 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide', - 'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints', - 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic', - 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow', - 'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine', - 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText', - 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder', - 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText', - 'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp', - 'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR', - 'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly', - 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf', - 'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload', - 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo', - 'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow', - 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', - 'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools', - 'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles', - 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection', - 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask', - 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', - 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration', - 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave', - 'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold', - 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', - 'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath', - 'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder', - 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings', - 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout', - 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', - 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess', - 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime', - 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter', - 'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve', - 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', - 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', - 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText', - 'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList', - 'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', - 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder', - 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable', - 'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain', - 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage', - 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath', - 'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open', - 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo', - 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction', - 'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu', - 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', - 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable', - 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit', - 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour', - 'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', - 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder', - 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages', - 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample', - 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData', - 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook', - 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', - 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern', - 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer', - 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode', - 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed', - 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus', - 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth', - 'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet', - 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart', - 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString', - 'sprintf', 'sscanf', 'Stack', 'StackWindows', - 'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest', - 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest', - 'StatsCircularCorrelationTest', 'StatsCircularMeans', - 'StatsCircularMoments', 'StatsCircularTwoSampleTest', - 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest', - 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest', - 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest', - 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest', - 'StatsLinearRegression', 'StatsMultiCorrelationTest', - 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles', - 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample', - 'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest', - 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest', - 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest', - 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String', - 'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile', - 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid', - 'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv', - 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform', - 'WindowFunction', - ] - functions = [ - 'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog', - 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh', - 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi', - 'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch', - 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs', - 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev', - 'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos', - 'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi', - 'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual', - 'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian', - 'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave', - 'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma', - 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', - 'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata', - 'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor', - 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin', - 'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss', - 'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize', - 'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise', - 'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1', - 'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion', - 'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D', - 'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre', - 'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln', - 'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint', - 'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max', - 'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey', - 'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect', - 'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x', - 'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar', - 'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec', - 'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ', - 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD', - 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF', - 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF', - 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', - 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', - 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', - 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', - 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF', - 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF', - 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF', - 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF', - 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF', - 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', - 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', - 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', - 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF', - 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF', - 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF', - 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF', - 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF', - 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF', - 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', - 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF', - 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF', - 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF', - 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF', - 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute', - 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF', - 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', - 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF', - 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF', - 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF', - 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean', - 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', - 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', - 'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch', - 'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists', - 'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease', - 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', - 'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists', - 'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem', - 'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR', - ] - functions += [ - 'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo', - 'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName', - 'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo', - 'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date', - 'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo', - 'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont', - 'GetDimLabel', 'GetErrMessage', 'GetFormula', - 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR', - 'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData', - 'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash', - 'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile', - 'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList', - 'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str', - 'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo', - 'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey', - 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', - 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey', - 'Secs2Date', 'Secs2Time', 'SelectString', 'SortList', - 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath', - 'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault', - 'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel', - 'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr', - 'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits', - 'WinList', 'WinName', 'WinRecreation', 'XWaveName', - 'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef', - 'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef', - 'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR', - 'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR', - ] - - tokens = { - 'root': [ - (r'//.*$', Comment.Single), - (r'"([^"\\]|\\.)*"', String), - # Flow Control. - (r'\b(%s)\b' % '|'.join(flowControl), Keyword), - # Types. - (r'\b(%s)\b' % '|'.join(types), Keyword.Type), - # Built-in operations. - (r'\b(%s)\b' % '|'.join(operations), Name.Class), - # Built-in functions. - (r'\b(%s)\b' % '|'.join(functions), Name.Function), - # Compiler directives. - (r'^#(include|pragma|define|ifdef|ifndef|endif)', - Name.Decorator), - (r'[^a-zA-Z"/]+', Text), - (r'.', Text), - ], - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/other.py b/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/other.py deleted file mode 100644 index 04e426c..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/other.py +++ /dev/null @@ -1,3778 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.other - ~~~~~~~~~~~~~~~~~~~~~ - - Lexers for other languages. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, using, \ - this, combined, ExtendedRegexLexer -from pygments.token import Error, Punctuation, Literal, Token, \ - Text, Comment, Operator, Keyword, Name, String, Number, Generic, \ - Whitespace -from pygments.util import get_bool_opt -from pygments.lexers.web import HtmlLexer - -from pygments.lexers._openedgebuiltins import OPENEDGEKEYWORDS -from pygments.lexers._robotframeworklexer import RobotFrameworkLexer - -# backwards compatibility -from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer -from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ - TcshLexer - -__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'MOOCodeLexer', - 'SmalltalkLexer', 'LogtalkLexer', 'GnuplotLexer', 'PovrayLexer', - 'AppleScriptLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer', - 'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer', 'PostScriptLexer', - 'AutohotkeyLexer', 'GoodDataCLLexer', 'MaqlLexer', 'ProtoBufLexer', - 'HybrisLexer', 'AwkLexer', 'Cfengine3Lexer', 'SnobolLexer', - 'ECLLexer', 'UrbiscriptLexer', 'OpenEdgeLexer', 'BroLexer', - 'MscgenLexer', 'KconfigLexer', 'VGLLexer', 'SourcePawnLexer', - 'RobotFrameworkLexer', 'PuppetLexer', 'NSISLexer', 'RPMSpecLexer', - 'CbmBasicV2Lexer', 'AutoItLexer', 'RexxLexer'] - - -class ECLLexer(RegexLexer): - """ - Lexer for the declarative big-data `ECL - `_ - language. - - *New in Pygments 1.5.* - """ - - name = 'ECL' - aliases = ['ecl'] - filenames = ['*.ecl'] - mimetypes = ['application/x-ecl'] - - flags = re.IGNORECASE | re.MULTILINE - - tokens = { - 'root': [ - include('whitespace'), - include('statements'), - ], - 'whitespace': [ - (r'\s+', Text), - (r'\/\/.*', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - ], - 'statements': [ - include('types'), - include('keywords'), - include('functions'), - include('hash'), - (r'"', String, 'string'), - (r'\'', String, 'string'), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]+', Operator), - (r'[{}()\[\],.;]', Punctuation), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'hash': [ - (r'^#.*$', Comment.Preproc), - ], - 'types': [ - (r'(RECORD|END)\D', Keyword.Declaration), - (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|' - r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|' - r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)', - bygroups(Keyword.Type, Text)), - ], - 'keywords': [ - (r'(APPLY|ASSERT|BUILD|BUILDINDEX|EVALUATE|FAIL|KEYDIFF|KEYPATCH|' - r'LOADXML|NOTHOR|NOTIFY|OUTPUT|PARALLEL|SEQUENTIAL|SOAPCALL|WAIT' - r'CHECKPOINT|DEPRECATED|FAILCODE|FAILMESSAGE|FAILURE|GLOBAL|' - r'INDEPENDENT|ONWARNING|PERSIST|PRIORITY|RECOVERY|STORED|SUCCESS|' - r'WAIT|WHEN)\b', Keyword.Reserved), - # These are classed differently, check later - (r'(ALL|AND|ANY|AS|ATMOST|BEFORE|BEGINC\+\+|BEST|BETWEEN|CASE|CONST|' - r'COUNTER|CSV|DESCEND|ENCRYPT|ENDC\+\+|ENDMACRO|EXCEPT|EXCLUSIVE|' - r'EXPIRE|EXPORT|EXTEND|FALSE|FEW|FIRST|FLAT|FULL|FUNCTION|GROUP|' - r'HEADER|HEADING|HOLE|IFBLOCK|IMPORT|IN|JOINED|KEEP|KEYED|LAST|' - r'LEFT|LIMIT|LOAD|LOCAL|LOCALE|LOOKUP|MACRO|MANY|MAXCOUNT|' - r'MAXLENGTH|MIN SKEW|MODULE|INTERFACE|NAMED|NOCASE|NOROOT|NOSCAN|' - r'NOSORT|NOT|OF|ONLY|OPT|OR|OUTER|OVERWRITE|PACKED|PARTITION|' - r'PENALTY|PHYSICALLENGTH|PIPE|QUOTE|RELATIONSHIP|REPEAT|RETURN|' - r'RIGHT|SCAN|SELF|SEPARATOR|SERVICE|SHARED|SKEW|SKIP|SQL|STORE|' - r'TERMINATOR|THOR|THRESHOLD|TOKEN|TRANSFORM|TRIM|TRUE|TYPE|' - r'UNICODEORDER|UNSORTED|VALIDATE|VIRTUAL|WHOLE|WILD|WITHIN|XML|' - r'XPATH|__COMPRESSED__)\b', Keyword.Reserved), - ], - 'functions': [ - (r'(ABS|ACOS|ALLNODES|ASCII|ASIN|ASSTRING|ATAN|ATAN2|AVE|CASE|' - r'CHOOSE|CHOOSEN|CHOOSESETS|CLUSTERSIZE|COMBINE|CORRELATION|COS|' - r'COSH|COUNT|COVARIANCE|CRON|DATASET|DEDUP|DEFINE|DENORMALIZE|' - r'DISTRIBUTE|DISTRIBUTED|DISTRIBUTION|EBCDIC|ENTH|ERROR|EVALUATE|' - r'EVENT|EVENTEXTRA|EVENTNAME|EXISTS|EXP|FAILCODE|FAILMESSAGE|' - r'FETCH|FROMUNICODE|GETISVALID|GLOBAL|GRAPH|GROUP|HASH|HASH32|' - r'HASH64|HASHCRC|HASHMD5|HAVING|IF|INDEX|INTFORMAT|ISVALID|' - r'ITERATE|JOIN|KEYUNICODE|LENGTH|LIBRARY|LIMIT|LN|LOCAL|LOG|LOOP|' - r'MAP|MATCHED|MATCHLENGTH|MATCHPOSITION|MATCHTEXT|MATCHUNICODE|' - r'MAX|MERGE|MERGEJOIN|MIN|NOLOCAL|NONEMPTY|NORMALIZE|PARSE|PIPE|' - r'POWER|PRELOAD|PROCESS|PROJECT|PULL|RANDOM|RANGE|RANK|RANKED|' - r'REALFORMAT|RECORDOF|REGEXFIND|REGEXREPLACE|REGROUP|REJECTED|' - r'ROLLUP|ROUND|ROUNDUP|ROW|ROWDIFF|SAMPLE|SET|SIN|SINH|SIZEOF|' - r'SOAPCALL|SORT|SORTED|SQRT|STEPPED|STORED|SUM|TABLE|TAN|TANH|' - r'THISNODE|TOPN|TOUNICODE|TRANSFER|TRIM|TRUNCATE|TYPEOF|UNGROUP|' - r'UNICODEORDER|VARIANCE|WHICH|WORKUNIT|XMLDECODE|XMLENCODE|' - r'XMLTEXT|XMLUNICODE)\b', Name.Function), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\'', String, '#pop'), - (r'[^"\']+', String), - ], - } - - -class BrainfuckLexer(RegexLexer): - """ - Lexer for the esoteric `BrainFuck `_ - language. - """ - - name = 'Brainfuck' - aliases = ['brainfuck', 'bf'] - filenames = ['*.bf', '*.b'] - mimetypes = ['application/x-brainfuck'] - - tokens = { - 'common': [ - # use different colors for different instruction types - (r'[.,]+', Name.Tag), - (r'[+-]+', Name.Builtin), - (r'[<>]+', Name.Variable), - (r'[^.,+\-<>\[\]]+', Comment), - ], - 'root': [ - (r'\[', Keyword, 'loop'), - (r'\]', Error), - include('common'), - ], - 'loop': [ - (r'\[', Keyword, '#push'), - (r'\]', Keyword, '#pop'), - include('common'), - ] - } - - -class BefungeLexer(RegexLexer): - """ - Lexer for the esoteric `Befunge `_ - language. - - *New in Pygments 0.7.* - """ - name = 'Befunge' - aliases = ['befunge'] - filenames = ['*.befunge'] - mimetypes = ['application/x-befunge'] - - tokens = { - 'root': [ - (r'[0-9a-f]', Number), - (r'[\+\*/%!`-]', Operator), # Traditional math - (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives - (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives - (r'[|_mw]', Keyword), - (r'[{}]', Name.Tag), # Befunge-98 stack ops - (r'".*?"', String.Double), # Strings don't appear to allow escapes - (r'\'.', String.Single), # Single character - (r'[#;]', Comment), # Trampoline... depends on direction hit - (r'[pg&~=@iotsy]', Keyword), # Misc - (r'[()A-Z]', Comment), # Fingerprints - (r'\s+', Text), # Whitespace doesn't matter - ], - } - - -class RedcodeLexer(RegexLexer): - """ - A simple Redcode lexer based on ICWS'94. - Contributed by Adam Blinkinsop . - - *New in Pygments 0.8.* - """ - name = 'Redcode' - aliases = ['redcode'] - filenames = ['*.cw'] - - opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD', - 'JMP','JMZ','JMN','DJN','CMP','SLT','SPL', - 'ORG','EQU','END'] - modifiers = ['A','B','AB','BA','F','X','I'] - - tokens = { - 'root': [ - # Whitespace: - (r'\s+', Text), - (r';.*$', Comment.Single), - # Lexemes: - # Identifiers - (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), - (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), - (r'[A-Za-z_][A-Za-z_0-9]+', Name), - # Operators - (r'[-+*/%]', Operator), - (r'[#$@<>]', Operator), # mode - (r'[.,]', Punctuation), # mode - # Numbers - (r'[-+]?\d+', Number.Integer), - ], - } - - -class MOOCodeLexer(RegexLexer): - """ - For `MOOCode `_ (the MOO scripting - language). - - *New in Pygments 0.9.* - """ - name = 'MOOCode' - filenames = ['*.moo'] - aliases = ['moocode', 'moo'] - mimetypes = ['text/x-moocode'] - - tokens = { - 'root' : [ - # Numbers - (r'(0|[1-9][0-9_]*)', Number.Integer), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # exceptions - (r'(E_PERM|E_DIV)', Name.Exception), - # db-refs - (r'((#[-0-9]+)|(\$[a-z_A-Z0-9]+))', Name.Entity), - # Keywords - (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' - r'|endwhile|break|continue|return|try' - r'|except|endtry|finally|in)\b', Keyword), - # builtins - (r'(random|length)', Name.Builtin), - # special variables - (r'(player|caller|this|args)', Name.Variable.Instance), - # skip whitespace - (r'\s+', Text), - (r'\n', Text), - # other operators - (r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator), - # function call - (r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)), - # variables - (r'([a-zA-Z_0-9]+)', Text), - ] - } - - -class SmalltalkLexer(RegexLexer): - """ - For `Smalltalk `_ syntax. - Contributed by Stefan Matthias Aust. - Rewritten by Nils Winter. - - *New in Pygments 0.10.* - """ - name = 'Smalltalk' - filenames = ['*.st'] - aliases = ['smalltalk', 'squeak', 'st'] - mimetypes = ['text/x-smalltalk'] - - tokens = { - 'root' : [ - (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), - include('squeak fileout'), - include('whitespaces'), - include('method definition'), - (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), - include('objects'), - (r'\^|\:=|\_', Operator), - # temporaries - (r'[\]({}.;!]', Text), - ], - 'method definition' : [ - # Not perfect can't allow whitespaces at the beginning and the - # without breaking everything - (r'([a-zA-Z]+\w*:)(\s*)(\w+)', - bygroups(Name.Function, Text, Name.Variable)), - (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), - (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', - bygroups(Name.Function, Text, Name.Variable, Text)), - ], - 'blockvariables' : [ - include('whitespaces'), - (r'(:)(\s*)(\w+)', - bygroups(Operator, Text, Name.Variable)), - (r'\|', Operator, '#pop'), - (r'', Text, '#pop'), # else pop - ], - 'literals' : [ - (r"'(''|[^'])*'", String, 'afterobject'), - (r'\$.', String.Char, 'afterobject'), - (r'#\(', String.Symbol, 'parenth'), - (r'\)', Text, 'afterobject'), - (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), - ], - '_parenth_helper' : [ - include('whitespaces'), - (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), - (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), - # literals - (r"'(''|[^'])*'", String), - (r'\$.', String.Char), - (r'#*\(', String.Symbol, 'inner_parenth'), - ], - 'parenth' : [ - # This state is a bit tricky since - # we can't just pop this state - (r'\)', String.Symbol, ('root', 'afterobject')), - include('_parenth_helper'), - ], - 'inner_parenth': [ - (r'\)', String.Symbol, '#pop'), - include('_parenth_helper'), - ], - 'whitespaces' : [ - # skip whitespace and comments - (r'\s+', Text), - (r'"(""|[^"])*"', Comment), - ], - 'objects' : [ - (r'\[', Text, 'blockvariables'), - (r'\]', Text, 'afterobject'), - (r'\b(self|super|true|false|nil|thisContext)\b', - Name.Builtin.Pseudo, 'afterobject'), - (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), - (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), - (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', - String.Symbol, 'afterobject'), - include('literals'), - ], - 'afterobject' : [ - (r'! !$', Keyword , '#pop'), # squeak chunk delimiter - include('whitespaces'), - (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', - Name.Builtin, '#pop'), - (r'\b(new\b(?!:))', Name.Builtin), - (r'\:=|\_', Operator, '#pop'), - (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), - (r'\b[a-zA-Z]+\w*', Name.Function), - (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), - (r'\.', Punctuation, '#pop'), - (r';', Punctuation), - (r'[\])}]', Text), - (r'[\[({]', Text, '#pop'), - ], - 'squeak fileout' : [ - # Squeak fileout format (optional) - (r'^"(""|[^"])*"!', Keyword), - (r"^'(''|[^'])*'!", Keyword), - (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', - bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), - (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", - bygroups(Keyword, Name.Class, Keyword, String, Keyword)), - (r'^(\w+)( subclass: )(#\w+)' - r'(\s+instanceVariableNames: )(.*?)' - r'(\s+classVariableNames: )(.*?)' - r'(\s+poolDictionaries: )(.*?)' - r'(\s+category: )(.*?)(!)', - bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, - String, Keyword, String, Keyword, String, Keyword)), - (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', - bygroups(Name.Class, Keyword, String, Keyword)), - (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), - (r'! !$', Keyword), - ], - } - - -class LogtalkLexer(RegexLexer): - """ - For `Logtalk `_ source code. - - *New in Pygments 0.10.* - """ - - name = 'Logtalk' - aliases = ['logtalk'] - filenames = ['*.lgt'] - mimetypes = ['text/x-logtalk'] - - tokens = { - 'root': [ - # Directives - (r'^\s*:-\s',Punctuation,'directive'), - # Comments - (r'%.*?\n', Comment), - (r'/\*(.|\n)*?\*/',Comment), - # Whitespace - (r'\n', Text), - (r'\s+', Text), - # Numbers - (r"0'.", Number), - (r'0b[01]+', Number), - (r'0o[0-7]+', Number), - (r'0x[0-9a-fA-F]+', Number), - (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), - # Variables - (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), - # Event handlers - (r'(after|before)(?=[(])', Keyword), - # Execution-context methods - (r'(parameter|this|se(lf|nder))(?=[(])', Keyword), - # Reflection - (r'(current_predicate|predicate_property)(?=[(])', Keyword), - # DCGs and term expansion - (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', - Keyword), - # Entity - (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', - Keyword), - (r'(object|protocol|category)_property(?=[(])', Keyword), - # Entity relations - (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), - (r'extends_(object|protocol|category)(?=[(])', Keyword), - (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), - (r'(instantiat|specializ)es_class(?=[(])', Keyword), - # Events - (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), - # Flags - (r'(current|set)_logtalk_flag(?=[(])', Keyword), - # Compiling, loading, and library paths - (r'logtalk_(compile|l(ibrary_path|oad_context|oad))(?=[(])', - Keyword), - # Database - (r'(clause|retract(all)?)(?=[(])', Keyword), - (r'a(bolish|ssert(a|z))(?=[(])', Keyword), - # Control constructs - (r'(ca(ll|tch)|throw)(?=[(])', Keyword), - (r'(fail|true)\b', Keyword), - # All solutions - (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), - # Multi-threading meta-predicates - (r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', - Keyword), - # Term unification - (r'unify_with_occurs_check(?=[(])', Keyword), - # Term creation and decomposition - (r'(functor|arg|copy_term|numbervars)(?=[(])', Keyword), - # Evaluable functors - (r'(rem|mod|abs|sign)(?=[(])', Keyword), - (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), - (r'(floor|truncate|round|ceiling)(?=[(])', Keyword), - # Other arithmetic functors - (r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword), - # Term testing - (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|' - r'ground)(?=[(])', Keyword), - # Term comparison - (r'compare(?=[(])', Keyword), - # Stream selection and control - (r'(curren|se)t_(in|out)put(?=[(])', Keyword), - (r'(open|close)(?=[(])', Keyword), - (r'flush_output(?=[(])', Keyword), - (r'(at_end_of_stream|flush_output)\b', Keyword), - (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', - Keyword), - # Character and byte input/output - (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), - (r'\bnl\b', Keyword), - # Term input/output - (r'read(_term)?(?=[(])', Keyword), - (r'write(q|_(canonical|term))?(?=[(])', Keyword), - (r'(current_)?op(?=[(])', Keyword), - (r'(current_)?char_conversion(?=[(])', Keyword), - # Atomic term processing - (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), - (r'(char_code|sub_atom)(?=[(])', Keyword), - (r'number_c(har|ode)s(?=[(])', Keyword), - # Implementation defined hooks functions - (r'(se|curren)t_prolog_flag(?=[(])', Keyword), - (r'\bhalt\b', Keyword), - (r'halt(?=[(])', Keyword), - # Message sending operators - (r'(::|:|\^\^)', Operator), - # External call - (r'[{}]', Keyword), - # Logic and control - (r'\b(ignore|once)(?=[(])', Keyword), - (r'\brepeat\b', Keyword), - # Sorting - (r'(key)?sort(?=[(])', Keyword), - # Bitwise functors - (r'(>>|<<|/\\|\\\\|\\)', Operator), - # Arithemtic evaluation - (r'\bis\b', Keyword), - # Arithemtic comparison - (r'(=:=|=\\=|<|=<|>=|>)', Operator), - # Term creation and decomposition - (r'=\.\.', Operator), - # Term unification - (r'(=|\\=)', Operator), - # Term comparison - (r'(==|\\==|@=<|@<|@>=|@>)', Operator), - # Evaluable functors - (r'(//|[-+*/])', Operator), - (r'\b(e|pi|mod|rem)\b', Operator), - # Other arithemtic functors - (r'\b\*\*\b', Operator), - # DCG rules - (r'-->', Operator), - # Control constructs - (r'([!;]|->)', Operator), - # Logic and control - (r'\\+', Operator), - # Mode operators - (r'[?@]', Operator), - # Existential quantifier - (r'\^', Operator), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # Ponctuation - (r'[()\[\],.|]', Text), - # Atoms - (r"[a-z][a-zA-Z0-9_]*", Text), - (r"'", String, 'quoted_atom'), - ], - - 'quoted_atom': [ - (r"''", String), - (r"'", String, '#pop'), - (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), - (r"[^\\'\n]+", String), - (r'\\', String), - ], - - 'directive': [ - # Conditional compilation directives - (r'(el)?if(?=[(])', Keyword, 'root'), - (r'(e(lse|ndif))[.]', Keyword, 'root'), - # Entity directives - (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), - (r'(end_(category|object|protocol))[.]',Keyword, 'root'), - # Predicate scope directives - (r'(public|protected|private)(?=[(])', Keyword, 'root'), - # Other directives - (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), - (r'in(fo|itialization)(?=[(])', Keyword, 'root'), - (r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'), - (r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|' - r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', - Keyword, 'root'), - (r'op(?=[(])', Keyword, 'root'), - (r'(c(alls|oinductive)|reexport|use(s|_module))(?=[(])', - Keyword, 'root'), - (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'), - (r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'), - ], - - 'entityrelations': [ - (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)' - r'(?=[(])', Keyword), - # Numbers - (r"0'.", Number), - (r'0b[01]+', Number), - (r'0o[0-7]+', Number), - (r'0x[0-9a-fA-F]+', Number), - (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), - # Variables - (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), - # Atoms - (r"[a-z][a-zA-Z0-9_]*", Text), - (r"'", String, 'quoted_atom'), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # End of entity-opening directive - (r'([)]\.)', Text, 'root'), - # Scope operator - (r'(::)', Operator), - # Ponctuation - (r'[()\[\],.|]', Text), - # Comments - (r'%.*?\n', Comment), - (r'/\*(.|\n)*?\*/',Comment), - # Whitespace - (r'\n', Text), - (r'\s+', Text), - ] - } - - def analyse_text(text): - if ':- object(' in text: - return True - if ':- protocol(' in text: - return True - if ':- category(' in text: - return True - return False - - -def _shortened(word): - dpos = word.find('$') - return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b' - for i in range(len(word), dpos, -1)]) -def _shortened_many(*words): - return '|'.join(map(_shortened, words)) - -class GnuplotLexer(RegexLexer): - """ - For `Gnuplot `_ plotting scripts. - - *New in Pygments 0.11.* - """ - - name = 'Gnuplot' - aliases = ['gnuplot'] - filenames = ['*.plot', '*.plt'] - mimetypes = ['text/x-gnuplot'] - - tokens = { - 'root': [ - include('whitespace'), - (_shortened('bi$nd'), Keyword, 'bind'), - (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), - (_shortened('f$it'), Keyword, 'fit'), - (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), - (r'else\b', Keyword), - (_shortened('pa$use'), Keyword, 'pause'), - (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), - (_shortened('sa$ve'), Keyword, 'save'), - (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), - (_shortened_many('sh$ow', 'uns$et'), - Keyword, ('noargs', 'optionarg')), - (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', - 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', - 'pwd$', 're$read', 'res$et', 'scr$eendump', - 'she$ll', 'sy$stem', 'up$date'), - Keyword, 'genericargs'), - (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', - 'she$ll', 'test$'), - Keyword, 'noargs'), - ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)', - bygroups(Name.Variable, Text, Operator), 'genericargs'), - ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)', - bygroups(Name.Function, Text, Operator), 'genericargs'), - (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros - (r';', Keyword), - ], - 'comment': [ - (r'[^\\\n]', Comment), - (r'\\\n', Comment), - (r'\\', Comment), - # don't add the newline to the Comment token - ('', Comment, '#pop'), - ], - 'whitespace': [ - ('#', Comment, 'comment'), - (r'[ \t\v\f]+', Text), - ], - 'noargs': [ - include('whitespace'), - # semicolon and newline end the argument list - (r';', Punctuation, '#pop'), - (r'\n', Text, '#pop'), - ], - 'dqstring': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - (r'\n', String, '#pop'), # newline ends the string too - ], - 'sqstring': [ - (r"''", String), # escaped single quote - (r"'", String, '#pop'), - (r"[^\\'\n]+", String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # normal backslash - (r'\n', String, '#pop'), # newline ends the string too - ], - 'genericargs': [ - include('noargs'), - (r'"', String, 'dqstring'), - (r"'", String, 'sqstring'), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), - (r'(\d+\.\d*|\.\d+)', Number.Float), - (r'-?\d+', Number.Integer), - ('[,.~!%^&*+=|?:<>/-]', Operator), - ('[{}()\[\]]', Punctuation), - (r'(eq|ne)\b', Operator.Word), - (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', - bygroups(Name.Function, Text, Punctuation)), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros - (r'\\\n', Text), - ], - 'optionarg': [ - include('whitespace'), - (_shortened_many( - "a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der", - "box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta", - "data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign", - "fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid", - "hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle", - "la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale", - "mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin", - "rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot", - "mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics", - "nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics", - "mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput", - "pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot", - "poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze", - "st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs", - "ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le", - "v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta", - "yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel", - "yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs", - "x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs", - "zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs", - "x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs", - "noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs", - "xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs", - "noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs", - "cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange", - "y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange", - "vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis", - "zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'), - ], - 'bind': [ - ('!', Keyword, '#pop'), - (_shortened('all$windows'), Name.Builtin), - include('genericargs'), - ], - 'quit': [ - (r'gnuplot\b', Keyword), - include('noargs'), - ], - 'fit': [ - (r'via\b', Name.Builtin), - include('plot'), - ], - 'if': [ - (r'\)', Punctuation, '#pop'), - include('genericargs'), - ], - 'pause': [ - (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), - (_shortened('key$press'), Name.Builtin), - include('genericargs'), - ], - 'plot': [ - (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', - 'mat$rix', 's$mooth', 'thru$', 't$itle', - 'not$itle', 'u$sing', 'w$ith'), - Name.Builtin), - include('genericargs'), - ], - 'save': [ - (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), - Name.Builtin), - include('genericargs'), - ], - } - - -class PovrayLexer(RegexLexer): - """ - For `Persistence of Vision Raytracer `_ files. - - *New in Pygments 0.11.* - """ - name = 'POVRay' - aliases = ['pov'] - filenames = ['*.pov', '*.inc'] - mimetypes = ['text/x-povray'] - - tokens = { - 'root': [ - (r'/\*[\w\W]*?\*/', Comment.Multiline), - (r'//.*\n', Comment.Single), - (r'(?s)"(?:\\.|[^"\\])+"', String.Double), - (r'#(debug|default|else|end|error|fclose|fopen|ifdef|ifndef|' - r'include|range|read|render|statistics|switch|undef|version|' - r'warning|while|write|define|macro|local|declare)\b', - Comment.Preproc), - (r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|' - r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|' - r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|' - r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|' - r'attenuating|average|background|black_hole|blue|blur_samples|' - r'bounded_by|box_mapping|bozo|break|brick|brick_size|' - r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|' - r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|' - r'color|color_map|colour|colour_map|component|composite|concat|' - r'confidence|conic_sweep|constant|control0|control1|cos|cosh|' - r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|' - r'debug|declare|default|degrees|dents|diffuse|direction|' - r'distance|distance_maximum|div|dust|dust_type|eccentricity|' - r'else|emitting|end|error|error_bound|exp|exponent|' - r'fade_distance|fade_power|falloff|falloff_angle|false|' - r'file_exists|filter|finish|fisheye|flatness|flip|floor|' - r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|' - r'global_settings|glowing|gradient|granite|gray_threshold|' - r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|' - r'if|ifdef|iff|image_map|incidence|include|int|interpolate|' - r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|' - r'linear|linear_spline|linear_sweep|location|log|looks_like|' - r'look_at|low_error_factor|mandel|map_type|marble|material_map|' - r'matrix|max|max_intersections|max_iteration|max_trace_level|' - r'max_value|metallic|min|minimum_reuse|mod|mortar|' - r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|' - r'octaves|off|offset|omega|omnimax|on|once|onion|open|' - r'orthographic|panoramic|pattern1|pattern2|pattern3|' - r'perspective|pgm|phase|phong|phong_size|pi|pigment|' - r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|' - r'precision|pwr|quadratic_spline|quaternion|quick_color|' - r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|' - r'ramp_wave|rand|range|reciprocal|recursion_limit|red|' - r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|' - r'right|ripples|rotate|roughness|samples|scale|scallop_wave|' - r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|' - r'slice|slope_map|smooth|specular|spherical_mapping|spiral|' - r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|' - r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|' - r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|' - r'test_camera_4|texture|texture_map|tga|thickness|threshold|' - r'tightness|tile2|tiles|track|transform|translate|transmit|' - r'triangle_wave|true|ttf|turbulence|turb_depth|type|' - r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|' - r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|' - r'vnormalize|volume_object|volume_rendered|vol_with_light|' - r'vrotate|v_steps|warning|warp|water_level|waves|while|width|' - r'wood|wrinkles|yes)\b', Keyword), - (r'(bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|' - r'disc|height_field|intersection|julia_fractal|lathe|' - r'light_source|merge|mesh|object|plane|poly|polygon|prism|' - r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|' - r'text|torus|triangle|union)\b', Name.Builtin), - # TODO: <=, etc - (r'[\[\](){}<>;,]', Punctuation), - (r'[-+*/=]', Operator), - (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), - (r'[a-zA-Z_][a-zA-Z_0-9]*', Name), - (r'[0-9]+\.[0-9]*', Number.Float), - (r'\.[0-9]+', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\s+', Text), - ] - } - - -class AppleScriptLexer(RegexLexer): - """ - For `AppleScript source code - `_, - including `AppleScript Studio - `_. - Contributed by Andreas Amann . - """ - - name = 'AppleScript' - aliases = ['applescript'] - filenames = ['*.applescript'] - - flags = re.MULTILINE | re.DOTALL - - Identifiers = r'[a-zA-Z]\w*' - Literals = ['AppleScript', 'current application', 'false', 'linefeed', - 'missing value', 'pi','quote', 'result', 'return', 'space', - 'tab', 'text item delimiters', 'true', 'version'] - Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ', - 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', - 'real ', 'record ', 'reference ', 'RGB color ', 'script ', - 'text ', 'unit types', '(?:Unicode )?text', 'string'] - BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month', - 'paragraph', 'word', 'year'] - HandlerParams = ['about', 'above', 'against', 'apart from', 'around', - 'aside from', 'at', 'below', 'beneath', 'beside', - 'between', 'for', 'given', 'instead of', 'on', 'onto', - 'out of', 'over', 'since'] - Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL', - 'choose application', 'choose color', 'choose file( name)?', - 'choose folder', 'choose from list', - 'choose remote application', 'clipboard info', - 'close( access)?', 'copy', 'count', 'current date', 'delay', - 'delete', 'display (alert|dialog)', 'do shell script', - 'duplicate', 'exists', 'get eof', 'get volume settings', - 'info for', 'launch', 'list (disks|folder)', 'load script', - 'log', 'make', 'mount volume', 'new', 'offset', - 'open( (for access|location))?', 'path to', 'print', 'quit', - 'random number', 'read', 'round', 'run( script)?', - 'say', 'scripting components', - 'set (eof|the clipboard to|volume)', 'store script', - 'summarize', 'system attribute', 'system info', - 'the clipboard', 'time to GMT', 'write', 'quoted form'] - References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', - 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', - 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', - 'before', 'behind', 'every', 'front', 'index', 'last', - 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose'] - Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not", - "isn't", "isn't equal( to)?", "is not equal( to)?", - "doesn't equal", "does not equal", "(is )?greater than", - "comes after", "is not less than or equal( to)?", - "isn't less than or equal( to)?", "(is )?less than", - "comes before", "is not greater than or equal( to)?", - "isn't greater than or equal( to)?", - "(is )?greater than or equal( to)?", "is not less than", - "isn't less than", "does not come before", - "doesn't come before", "(is )?less than or equal( to)?", - "is not greater than", "isn't greater than", - "does not come after", "doesn't come after", "starts? with", - "begins? with", "ends? with", "contains?", "does not contain", - "doesn't contain", "is in", "is contained by", "is not in", - "is not contained by", "isn't contained by", "div", "mod", - "not", "(a )?(ref( to)?|reference to)", "is", "does"] - Control = ['considering', 'else', 'error', 'exit', 'from', 'if', - 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', - 'try', 'until', 'using terms from', 'while', 'whith', - 'with timeout( of)?', 'with transaction', 'by', 'continue', - 'end', 'its?', 'me', 'my', 'return', 'of' , 'as'] - Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get'] - Reserved = ['but', 'put', 'returning', 'the'] - StudioClasses = ['action cell', 'alert reply', 'application', 'box', - 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', - 'clip view', 'color well', 'color-panel', - 'combo box( item)?', 'control', - 'data( (cell|column|item|row|source))?', 'default entry', - 'dialog reply', 'document', 'drag info', 'drawer', - 'event', 'font(-panel)?', 'formatter', - 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', - 'movie( view)?', 'open-panel', 'outline view', 'panel', - 'pasteboard', 'plugin', 'popup button', - 'progress indicator', 'responder', 'save-panel', - 'scroll view', 'secure text field( cell)?', 'slider', - 'sound', 'split view', 'stepper', 'tab view( item)?', - 'table( (column|header cell|header view|view))', - 'text( (field( cell)?|view))?', 'toolbar( item)?', - 'user-defaults', 'view', 'window'] - StudioEvents = ['accept outline drop', 'accept table drop', 'action', - 'activated', 'alert ended', 'awake from nib', 'became key', - 'became main', 'begin editing', 'bounds changed', - 'cell value', 'cell value changed', 'change cell value', - 'change item value', 'changed', 'child of item', - 'choose menu item', 'clicked', 'clicked toolbar item', - 'closed', 'column clicked', 'column moved', - 'column resized', 'conclude drop', 'data representation', - 'deminiaturized', 'dialog ended', 'document nib name', - 'double clicked', 'drag( (entered|exited|updated))?', - 'drop', 'end editing', 'exposed', 'idle', 'item expandable', - 'item value', 'item value changed', 'items changed', - 'keyboard down', 'keyboard up', 'launched', - 'load data representation', 'miniaturized', 'mouse down', - 'mouse dragged', 'mouse entered', 'mouse exited', - 'mouse moved', 'mouse up', 'moved', - 'number of browser rows', 'number of items', - 'number of rows', 'open untitled', 'opened', 'panel ended', - 'parameters updated', 'plugin loaded', 'prepare drop', - 'prepare outline drag', 'prepare outline drop', - 'prepare table drag', 'prepare table drop', - 'read from file', 'resigned active', 'resigned key', - 'resigned main', 'resized( sub views)?', - 'right mouse down', 'right mouse dragged', - 'right mouse up', 'rows changed', 'scroll wheel', - 'selected tab view item', 'selection changed', - 'selection changing', 'should begin editing', - 'should close', 'should collapse item', - 'should end editing', 'should expand item', - 'should open( untitled)?', - 'should quit( after last window closed)?', - 'should select column', 'should select item', - 'should select row', 'should select tab view item', - 'should selection change', 'should zoom', 'shown', - 'update menu item', 'update parameters', - 'update toolbar item', 'was hidden', 'was miniaturized', - 'will become active', 'will close', 'will dismiss', - 'will display browser cell', 'will display cell', - 'will display item cell', 'will display outline cell', - 'will finish launching', 'will hide', 'will miniaturize', - 'will move', 'will open', 'will pop up', 'will quit', - 'will resign active', 'will resize( sub views)?', - 'will select tab view item', 'will show', 'will zoom', - 'write to file', 'zoomed'] - StudioCommands = ['animate', 'append', 'call method', 'center', - 'close drawer', 'close panel', 'display', - 'display alert', 'display dialog', 'display panel', 'go', - 'hide', 'highlight', 'increment', 'item for', - 'load image', 'load movie', 'load nib', 'load panel', - 'load sound', 'localized string', 'lock focus', 'log', - 'open drawer', 'path for', 'pause', 'perform action', - 'play', 'register', 'resume', 'scroll', 'select( all)?', - 'show', 'size to fit', 'start', 'step back', - 'step forward', 'stop', 'synchronize', 'unlock focus', - 'update'] - StudioProperties = ['accepts arrow key', 'action method', 'active', - 'alignment', 'allowed identifiers', - 'allows branch selection', 'allows column reordering', - 'allows column resizing', 'allows column selection', - 'allows customization', - 'allows editing text attributes', - 'allows empty selection', 'allows mixed state', - 'allows multiple selection', 'allows reordering', - 'allows undo', 'alpha( value)?', 'alternate image', - 'alternate increment value', 'alternate title', - 'animation delay', 'associated file name', - 'associated object', 'auto completes', 'auto display', - 'auto enables items', 'auto repeat', - 'auto resizes( outline column)?', - 'auto save expanded items', 'auto save name', - 'auto save table columns', 'auto saves configuration', - 'auto scroll', 'auto sizes all columns to fit', - 'auto sizes cells', 'background color', 'bezel state', - 'bezel style', 'bezeled', 'border rect', 'border type', - 'bordered', 'bounds( rotation)?', 'box type', - 'button returned', 'button type', - 'can choose directories', 'can choose files', - 'can draw', 'can hide', - 'cell( (background color|size|type))?', 'characters', - 'class', 'click count', 'clicked( data)? column', - 'clicked data item', 'clicked( data)? row', - 'closeable', 'collating', 'color( (mode|panel))', - 'command key down', 'configuration', - 'content(s| (size|view( margins)?))?', 'context', - 'continuous', 'control key down', 'control size', - 'control tint', 'control view', - 'controller visible', 'coordinate system', - 'copies( on scroll)?', 'corner view', 'current cell', - 'current column', 'current( field)? editor', - 'current( menu)? item', 'current row', - 'current tab view item', 'data source', - 'default identifiers', 'delta (x|y|z)', - 'destination window', 'directory', 'display mode', - 'displayed cell', 'document( (edited|rect|view))?', - 'double value', 'dragged column', 'dragged distance', - 'dragged items', 'draws( cell)? background', - 'draws grid', 'dynamically scrolls', 'echos bullets', - 'edge', 'editable', 'edited( data)? column', - 'edited data item', 'edited( data)? row', 'enabled', - 'enclosing scroll view', 'ending page', - 'error handling', 'event number', 'event type', - 'excluded from windows menu', 'executable path', - 'expanded', 'fax number', 'field editor', 'file kind', - 'file name', 'file type', 'first responder', - 'first visible column', 'flipped', 'floating', - 'font( panel)?', 'formatter', 'frameworks path', - 'frontmost', 'gave up', 'grid color', 'has data items', - 'has horizontal ruler', 'has horizontal scroller', - 'has parent data item', 'has resize indicator', - 'has shadow', 'has sub menu', 'has vertical ruler', - 'has vertical scroller', 'header cell', 'header view', - 'hidden', 'hides when deactivated', 'highlights by', - 'horizontal line scroll', 'horizontal page scroll', - 'horizontal ruler view', 'horizontally resizable', - 'icon image', 'id', 'identifier', - 'ignores multiple clicks', - 'image( (alignment|dims when disabled|frame style|' - 'scaling))?', - 'imports graphics', 'increment value', - 'indentation per level', 'indeterminate', 'index', - 'integer value', 'intercell spacing', 'item height', - 'key( (code|equivalent( modifier)?|window))?', - 'knob thickness', 'label', 'last( visible)? column', - 'leading offset', 'leaf', 'level', 'line scroll', - 'loaded', 'localized sort', 'location', 'loop mode', - 'main( (bunde|menu|window))?', 'marker follows cell', - 'matrix mode', 'maximum( content)? size', - 'maximum visible columns', - 'menu( form representation)?', 'miniaturizable', - 'miniaturized', 'minimized image', 'minimized title', - 'minimum column width', 'minimum( content)? size', - 'modal', 'modified', 'mouse down state', - 'movie( (controller|file|rect))?', 'muted', 'name', - 'needs display', 'next state', 'next text', - 'number of tick marks', 'only tick mark values', - 'opaque', 'open panel', 'option key down', - 'outline table column', 'page scroll', 'pages across', - 'pages down', 'palette label', 'pane splitter', - 'parent data item', 'parent window', 'pasteboard', - 'path( (names|separator))?', 'playing', - 'plays every frame', 'plays selection only', 'position', - 'preferred edge', 'preferred type', 'pressure', - 'previous text', 'prompt', 'properties', - 'prototype cell', 'pulls down', 'rate', - 'released when closed', 'repeated', - 'requested print time', 'required file type', - 'resizable', 'resized column', 'resource path', - 'returns records', 'reuses columns', 'rich text', - 'roll over', 'row height', 'rulers visible', - 'save panel', 'scripts path', 'scrollable', - 'selectable( identifiers)?', 'selected cell', - 'selected( data)? columns?', 'selected data items?', - 'selected( data)? rows?', 'selected item identifier', - 'selection by rect', 'send action on arrow key', - 'sends action when done editing', 'separates columns', - 'separator item', 'sequence number', 'services menu', - 'shared frameworks path', 'shared support path', - 'sheet', 'shift key down', 'shows alpha', - 'shows state by', 'size( mode)?', - 'smart insert delete enabled', 'sort case sensitivity', - 'sort column', 'sort order', 'sort type', - 'sorted( data rows)?', 'sound', 'source( mask)?', - 'spell checking enabled', 'starting page', 'state', - 'string value', 'sub menu', 'super menu', 'super view', - 'tab key traverses cells', 'tab state', 'tab type', - 'tab view', 'table view', 'tag', 'target( printer)?', - 'text color', 'text container insert', - 'text container origin', 'text returned', - 'tick mark position', 'time stamp', - 'title(d| (cell|font|height|position|rect))?', - 'tool tip', 'toolbar', 'trailing offset', 'transparent', - 'treat packages as directories', 'truncated labels', - 'types', 'unmodified characters', 'update views', - 'use sort indicator', 'user defaults', - 'uses data source', 'uses ruler', - 'uses threaded animation', - 'uses title from previous column', 'value wraps', - 'version', - 'vertical( (line scroll|page scroll|ruler view))?', - 'vertically resizable', 'view', - 'visible( document rect)?', 'volume', 'width', 'window', - 'windows menu', 'wraps', 'zoomable', 'zoomed'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'¬\n', String.Escape), - (r"'s\s+", Text), # This is a possessive, consider moving - (r'(--|#).*?$', Comment), - (r'\(\*', Comment.Multiline, 'comment'), - (r'[\(\){}!,.:]', Punctuation), - (r'(«)([^»]+)(»)', - bygroups(Text, Name.Builtin, Text)), - (r'\b((?:considering|ignoring)\s*)' - r'(application responses|case|diacriticals|hyphens|' - r'numeric strings|punctuation|white space)', - bygroups(Keyword, Name.Builtin)), - (r'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator), - (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), - (r'^(\s*(?:on|end)\s+)' - r'(%s)' % '|'.join(StudioEvents[::-1]), - bygroups(Keyword, Name.Function)), - (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), - (r'\b(as )(%s)\b' % '|'.join(Classes), - bygroups(Keyword, Name.Class)), - (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), - (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), - (r'\b(%s)\b' % '|'.join(Control), Keyword), - (r'\b(%s)\b' % '|'.join(Declarations), Keyword), - (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), - (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), - (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), - (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), - (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), - (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), - (r'\b(%s)\b' % '|'.join(References), Name.Builtin), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r'\b(%s)\b' % Identifiers, Name.Variable), - (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), - (r'[-+]?\d+', Number.Integer), - ], - 'comment': [ - ('\(\*', Comment.Multiline, '#push'), - ('\*\)', Comment.Multiline, '#pop'), - ('[^*(]+', Comment.Multiline), - ('[*(]', Comment.Multiline), - ], - } - - -class ModelicaLexer(RegexLexer): - """ - For `Modelica `_ source code. - - *New in Pygments 1.1.* - """ - name = 'Modelica' - aliases = ['modelica'] - filenames = ['*.mo'] - mimetypes = ['text/x-modelica'] - - flags = re.IGNORECASE | re.DOTALL - - tokens = { - 'whitespace': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'//(\n|(.|\n)*?[^\\]\n)', Comment), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), - ], - 'statements': [ - (r'"', String, 'string'), - (r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+)', Number.Float), - (r'\d+[Ll]?', Number.Integer), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\]{},.;]', Punctuation), - (r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin), - (r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')" - r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class), - (r"('[\w\+\-\*\/\^]+'|\w+)", Name), - ], - 'root': [ - include('whitespace'), - include('keywords'), - include('functions'), - include('operators'), - include('classes'), - (r'("|)', Name.Tag, 'html-content'), - include('statements'), - ], - 'keywords': [ - (r'(algorithm|annotation|break|connect|constant|constrainedby|' - r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' - r'end|equation|exit|expandable|extends|' - r'external|false|final|flow|for|if|import|impure|in|initial\sequation|' - r'inner|input|loop|nondiscrete|outer|output|parameter|partial|' - r'protected|public|pure|redeclare|replaceable|stream|time|then|true|' - r'when|while|within)\b', Keyword), - ], - 'functions': [ - (r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|' - r'cross|div|exp|floor|getInstanceName|log|log10|mod|rem|' - r'semiLinear|sign|sin|sinh|size|spatialDistribution|sqrt|tan|' - r'tanh|zeros)\b', Name.Function), - ], - 'operators': [ - (r'(actualStream|and|assert|cardinality|change|Clock|delay|der|edge|' - r'hold|homotopy|initial|inStream|noEvent|not|or|pre|previous|reinit|' - r'return|sample|smooth|spatialDistribution|subSample|terminal|' - r'terminate)\b', Name.Builtin), - ], - 'classes': [ - (r'(block|class|connector|function|model|package|' - r'record|type)(\s+)([A-Za-z_]+)', - bygroups(Keyword, Text, Name.Class)) - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', - String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'html-content': [ - (r'<\s*/\s*html\s*>', Name.Tag, '#pop'), - (r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)), - ] - } - - -class RebolLexer(RegexLexer): - """ - A `REBOL `_ lexer. - - *New in Pygments 1.1.* - """ - name = 'REBOL' - aliases = ['rebol'] - filenames = ['*.r', '*.r3'] - mimetypes = ['text/x-rebol'] - - flags = re.IGNORECASE | re.MULTILINE - - re.IGNORECASE - - escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)' - - def word_callback(lexer, match): - word = match.group() - - if re.match(".*:$", word): - yield match.start(), Generic.Subheading, word - elif re.match( - r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' - r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' - r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' - r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' - r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' - r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' - r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' - r'while|compress|decompress|secure|open|close|read|read-io|' - r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' - r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' - r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' - r'browse|launch|stats|get-modes|set-modes|to-local-file|' - r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' - r'hide|draw|show|size-text|textinfo|offset-to-caret|' - r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' - r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' - r'dsa-make-key|dsa-generate-key|dsa-make-signature|' - r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' - r'rsa-encrypt)$', word): - yield match.start(), Name.Builtin, word - elif re.match( - r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' - r'minimum|maximum|negate|complement|absolute|random|head|tail|' - r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' - r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' - r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' - r'copy)$', word): - yield match.start(), Name.Function, word - elif re.match( - r'(error|source|input|license|help|install|echo|Usage|with|func|' - r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' - r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' - r'remold|charset|array|replace|move|extract|forskip|forall|alter|' - r'first+|also|take|for|forever|dispatch|attempt|what-dir|' - r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' - r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' - r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' - r'write-user|save-user|set-user-name|protect-system|parse-xml|' - r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' - r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' - r'request-dir|center-face|do-events|net-error|decode-url|' - r'parse-header|parse-header-date|parse-email-addrs|import-email|' - r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' - r'find-key-face|do-face|viewtop|confine|find-window|' - r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' - r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' - r'read-thru|load-thru|do-thru|launch-thru|load-image|' - r'request-download|do-face-alt|set-font|set-para|get-style|' - r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' - r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' - r'resize-face|load-stock|load-stock-block|notify|request|flash|' - r'request-color|request-pass|request-text|request-list|' - r'request-date|request-file|dbug|editor|link-relative-path|' - r'emailer|parse-error)$', word): - yield match.start(), Keyword.Namespace, word - elif re.match( - r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' - r'return|exit|break)$', word): - yield match.start(), Name.Exception, word - elif re.match('REBOL$', word): - yield match.start(), Generic.Heading, word - elif re.match("to-.*", word): - yield match.start(), Keyword, word - elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', - word): - yield match.start(), Operator, word - elif re.match(".*\?$", word): - yield match.start(), Keyword, word - elif re.match(".*\!$", word): - yield match.start(), Keyword.Type, word - elif re.match("'.*", word): - yield match.start(), Name.Variable.Instance, word # lit-word - elif re.match("#.*", word): - yield match.start(), Name.Label, word # issue - elif re.match("%.*", word): - yield match.start(), Name.Decorator, word # file - else: - yield match.start(), Name.Variable, word - - tokens = { - 'root': [ - (r'REBOL', Generic.Strong, 'script'), - (r'R', Comment), - (r'[^R]+', Comment), - ], - 'script': [ - (r'\s+', Text), - (r'#"', String.Char, 'char'), - (r'#{[0-9a-fA-F]*}', Number.Hex), - (r'2#{', Number.Hex, 'bin2'), - (r'64#{[0-9a-zA-Z+/=\s]*}', Number.Hex), - (r'"', String, 'string'), - (r'{', String, 'string2'), - (r';#+.*\n', Comment.Special), - (r';\*+.*\n', Comment.Preproc), - (r';.*\n', Comment), - (r'%"', Name.Decorator, 'stringFile'), - (r'%[^(\^{^")\s\[\]]+', Name.Decorator), - (r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money - (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time - (r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?' - r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date - (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple - (r'\d+[xX]\d+', Keyword.Constant), # pair - (r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float), - (r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float), - (r'[+-]?\d+(\'\d+)?', Number), - (r'[\[\]\(\)]', Generic.Strong), - (r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator), # url - (r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # url - (r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # email - (r'comment\s', Comment, 'comment'), - (r'/[^(\^{^")\s/[\]]*', Name.Attribute), - (r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), - (r'<[a-zA-Z0-9:._-]*>', Name.Tag), - (r'<[^(<>\s")]+', Name.Tag, 'tag'), - (r'([^(\^{^")\s]+)', Text), - ], - 'string': [ - (r'[^(\^")]+', String), - (escape_re, String.Escape), - (r'[\(|\)]+', String), - (r'\^.', String.Escape), - (r'"', String, '#pop'), - ], - 'string2': [ - (r'[^(\^{^})]+', String), - (escape_re, String.Escape), - (r'[\(|\)]+', String), - (r'\^.', String.Escape), - (r'{', String, '#push'), - (r'}', String, '#pop'), - ], - 'stringFile': [ - (r'[^(\^")]+', Name.Decorator), - (escape_re, Name.Decorator), - (r'\^.', Name.Decorator), - (r'"', Name.Decorator, '#pop'), - ], - 'char': [ - (escape_re + '"', String.Char, '#pop'), - (r'\^."', String.Char, '#pop'), - (r'."', String.Char, '#pop'), - ], - 'tag': [ - (escape_re, Name.Tag), - (r'"', Name.Tag, 'tagString'), - (r'[^(<>\r\n")]+', Name.Tag), - (r'>', Name.Tag, '#pop'), - ], - 'tagString': [ - (r'[^(\^")]+', Name.Tag), - (escape_re, Name.Tag), - (r'[\(|\)]+', Name.Tag), - (r'\^.', Name.Tag), - (r'"', Name.Tag, '#pop'), - ], - 'tuple': [ - (r'(\d+\.)+', Keyword.Constant), - (r'\d+', Keyword.Constant, '#pop'), - ], - 'bin2': [ - (r'\s+', Number.Hex), - (r'([0-1]\s*){8}', Number.Hex), - (r'}', Number.Hex, '#pop'), - ], - 'comment': [ - (r'"', Comment, 'commentString1'), - (r'{', Comment, 'commentString2'), - (r'\[', Comment, 'commentBlock'), - (r'[^(\s{\"\[]+', Comment, '#pop'), - ], - 'commentString1': [ - (r'[^(\^")]+', Comment), - (escape_re, Comment), - (r'[\(|\)]+', Comment), - (r'\^.', Comment), - (r'"', Comment, '#pop'), - ], - 'commentString2': [ - (r'[^(\^{^})]+', Comment), - (escape_re, Comment), - (r'[\(|\)]+', Comment), - (r'\^.', Comment), - (r'{', Comment, '#push'), - (r'}', Comment, '#pop'), - ], - 'commentBlock': [ - (r'\[', Comment, '#push'), - (r'\]', Comment, '#pop'), - (r'[^(\[\])]+', Comment), - ], - } - - -class ABAPLexer(RegexLexer): - """ - Lexer for ABAP, SAP's integrated language. - - *New in Pygments 1.1.* - """ - name = 'ABAP' - aliases = ['abap'] - filenames = ['*.abap'] - mimetypes = ['text/x-abap'] - - flags = re.IGNORECASE | re.MULTILINE - - tokens = { - 'common': [ - (r'\s+', Text), - (r'^\*.*$', Comment.Single), - (r'\".*?\n', Comment.Single), - ], - 'variable-names': [ - (r'<[\S_]+>', Name.Variable), - (r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable), - ], - 'root': [ - include('common'), - #function calls - (r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)', - bygroups(Keyword, Text, Name.Function)), - (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|' - r'TRANSACTION|TRANSFORMATION))\b', - Keyword), - (r'(FORM|PERFORM)(\s+)(\w+)', - bygroups(Keyword, Text, Name.Function)), - (r'(PERFORM)(\s+)(\()(\w+)(\))', - bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )), - (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)', - bygroups(Keyword, Text, Name.Function, Text, Keyword)), - - # method implementation - (r'(METHOD)(\s+)([\w~]+)', - bygroups(Keyword, Text, Name.Function)), - # method calls - (r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)', - bygroups(Text, Name.Variable, Operator, Name.Function)), - # call methodnames returning style - (r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function), - - # keywords with dashes in them. - # these need to be first, because for instance the -ID part - # of MESSAGE-ID wouldn't get highlighted if MESSAGE was - # first in the list of keywords. - (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|' - r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|' - r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|' - r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|' - r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|' - r'INTERFACE-POOL|INVERTED-DATE|' - r'LOAD-OF-PROGRAM|LOG-POINT|' - r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|' - r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|' - r'OUTPUT-LENGTH|PRINT-CONTROL|' - r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|' - r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|' - r'TYPE-POOL|TYPE-POOLS' - r')\b', Keyword), - - # keyword kombinations - (r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|' - r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|' - r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|' - r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|' - r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|' - r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|' - r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|' - r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|' - r'RUN\s+TIME|TIME\s+(STAMP)?)?|' - r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|' - r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|' - r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|' - r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|' - r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|' - r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|' - r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|' - r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|' - r'DATABASE|SHARED\s+(MEMORY|BUFFER))|' - r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|' - r'FREE\s(MEMORY|OBJECT)?|' - r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|' - r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|' - r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|' - r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|' - r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|' - r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|' - r'SCREEN)|COMMENT|FUNCTION\s+KEY|' - r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|' - r'SKIP|ULINE)|' - r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|' - r'TO LIST-PROCESSING|TO TRANSACTION)' - r'(ENDING|STARTING)\s+AT|' - r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|' - r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|' - r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|' - r'(BEGIN|END)\s+OF|' - r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|' - r'COMPARING(\s+ALL\s+FIELDS)?|' - r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|' - r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|' - r'END-OF-(DEFINITION|PAGE|SELECTION)|' - r'WITH\s+FRAME(\s+TITLE)|' - - # simple kombinations - r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|' - r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|' - r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|' - r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|' - r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|' - r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|' - r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword), - - # single word keywords. - (r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|' - r'ASSIGN(ING)?|AT(\s+FIRST)?|' - r'BACK|BLOCK|BREAK-POINT|' - r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|' - r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|' - r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|' - r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|' - r'DETAIL|DIRECTORY|DIVIDE|DO|' - r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|' - r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|' - r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|' - r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|' - r'HIDE|' - r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|' - r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|' - r'LENGTH|LINES|LOAD|LOCAL|' - r'JOIN|' - r'KEY|' - r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|' - r'NODES|' - r'OBLIGATORY|OF|OFF|ON|OVERLAY|' - r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|' - r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|' - r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|' - r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|' - r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|' - r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|' - r'ULINE|UNDER|UNPACK|UPDATE|USING|' - r'VALUE|VALUES|VIA|' - r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword), - - # builtins - (r'(abs|acos|asin|atan|' - r'boolc|boolx|bit_set|' - r'char_off|charlen|ceil|cmax|cmin|condense|contains|' - r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|' - r'count|count_any_of|count_any_not_of|' - r'dbmaxlen|distance|' - r'escape|exp|' - r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|' - r'insert|' - r'lines|log|log10|' - r'match|matches|' - r'nmax|nmin|numofchar|' - r'repeat|replace|rescale|reverse|round|' - r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|' - r'substring|substring_after|substring_from|substring_before|substring_to|' - r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|' - r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)), - - (r'&[0-9]', Name), - (r'[0-9]+', Number.Integer), - - # operators which look like variable names before - # parsing variable names. - (r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|' - r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|' - r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator), - - include('variable-names'), - - # standard oparators after variable names, - # because < and > are part of field symbols. - (r'[?*<>=\-+]', Operator), - (r"'(''|[^'])*'", String.Single), - (r'[/;:()\[\],\.]', Punctuation) - ], - } - - -class NewspeakLexer(RegexLexer): - """ - For `Newspeak ` syntax. - """ - name = 'Newspeak' - filenames = ['*.ns2'] - aliases = ['newspeak', ] - mimetypes = ['text/x-newspeak'] - - tokens = { - 'root' : [ - (r'\b(Newsqueak2)\b',Keyword.Declaration), - (r"'[^']*'",String), - (r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)', - bygroups(Keyword.Declaration,Text,Name.Class,Text)), - (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', - Keyword), - (r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)', - bygroups(Name.Function,Text,Name.Variable)), - (r'([a-zA-Z0-9_]+)(\s*)(=)', - bygroups(Name.Attribute,Text,Operator)), - (r'<[a-zA-Z0-9_]+>', Comment.Special), - include('expressionstat'), - include('whitespace') - ], - - 'expressionstat': [ - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'\d+', Number.Integer), - (r':\w+',Name.Variable), - (r'(\w+)(::)', bygroups(Name.Variable, Operator)), - (r'\w+:', Name.Function), - (r'\w+', Name.Variable), - (r'\(|\)', Punctuation), - (r'\[|\]', Punctuation), - (r'\{|\}', Punctuation), - - (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), - (r'\.|;', Punctuation), - include('whitespace'), - include('literals'), - ], - 'literals': [ - (r'\$.', String), - (r"'[^']*'", String), - (r"#'[^']*'", String.Symbol), - (r"#\w+:?", String.Symbol), - (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) - - ], - 'whitespace' : [ - (r'\s+', Text), - (r'"[^"]*"', Comment) - ] - } - - -class GherkinLexer(RegexLexer): - """ - For `Gherkin ` syntax. - - *New in Pygments 1.2.* - """ - name = 'Gherkin' - aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin'] - filenames = ['*.feature'] - mimetypes = ['text/x-gherkin'] - - feature_keywords = r'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$' - feature_element_keywords = r'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$' - examples_keywords = r'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$' - step_keywords = r'^(\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )' - - tokens = { - 'comments': [ - (r'#.*$', Comment), - ], - 'feature_elements' : [ - (step_keywords, Keyword, "step_content_stack"), - include('comments'), - (r"(\s|.)", Name.Function), - ], - 'feature_elements_on_stack' : [ - (step_keywords, Keyword, "#pop:2"), - include('comments'), - (r"(\s|.)", Name.Function), - ], - 'examples_table': [ - (r"\s+\|", Keyword, 'examples_table_header'), - include('comments'), - (r"(\s|.)", Name.Function), - ], - 'examples_table_header': [ - (r"\s+\|\s*$", Keyword, "#pop:2"), - include('comments'), - (r"\s*\|", Keyword), - (r"[^\|]", Name.Variable), - ], - 'scenario_sections_on_stack': [ - (feature_element_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), "feature_elements_on_stack"), - ], - 'narrative': [ - include('scenario_sections_on_stack'), - (r"(\s|.)", Name.Function), - ], - 'table_vars': [ - (r'(<[^>]+>)', Name.Variable), - ], - 'numbers': [ - (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String), - ], - 'string': [ - include('table_vars'), - (r'(\s|.)', String), - ], - 'py_string': [ - (r'"""', Keyword, "#pop"), - include('string'), - ], - 'step_content_root':[ - (r"$", Keyword, "#pop"), - include('step_content'), - ], - 'step_content_stack':[ - (r"$", Keyword, "#pop:2"), - include('step_content'), - ], - 'step_content':[ - (r'"', Name.Function, "double_string"), - include('table_vars'), - include('numbers'), - include('comments'), - (r'(\s|.)', Name.Function), - ], - 'table_content': [ - (r"\s+\|\s*$", Keyword, "#pop"), - include('comments'), - (r"\s*\|", Keyword), - include('string'), - ], - 'double_string': [ - (r'"', Name.Function, "#pop"), - include('string'), - ], - 'root': [ - (r'\n', Name.Function), - include('comments'), - (r'"""', Keyword, "py_string"), - (r'\s+\|', Keyword, 'table_content'), - (r'"', Name.Function, "double_string"), - include('table_vars'), - include('numbers'), - (r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)), - (step_keywords, bygroups(Name.Function, Keyword), - 'step_content_root'), - (feature_keywords, bygroups(Keyword, Keyword, Name.Function), - 'narrative'), - (feature_element_keywords, - bygroups(Name.Function, Keyword, Keyword, Name.Function), - 'feature_elements'), - (examples_keywords, - bygroups(Name.Function, Keyword, Keyword, Name.Function), - 'examples_table'), - (r'(\s|.)', Name.Function), - ] - } - -class AsymptoteLexer(RegexLexer): - """ - For `Asymptote `_ source code. - - *New in Pygments 1.2.* - """ - name = 'Asymptote' - aliases = ['asy', 'asymptote'] - filenames = ['*.asy'] - mimetypes = ['text/x-asymptote'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' - - tokens = { - 'whitespace': [ - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'//(\n|(.|\n)*?[^\\]\n)', Comment), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), - ], - 'statements': [ - # simple string (TeX friendly) - (r'"(\\\\|\\"|[^"])*"', String), - # C style string (with character escapes) - (r"'", String, 'string'), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), - (r'0[0-7]+[Ll]?', Number.Oct), - (r'\d+[Ll]?', Number.Integer), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.]', Punctuation), - (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), - (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' - r'return|break|continue|struct|typedef|new|access|import|' - r'unravel|from|include|quote|static|public|private|restricted|' - r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), - # Since an asy-type-name can be also an asy-function-name, - # in the following we test if the string " [a-zA-Z]" follows - # the Keyword.Type. - # Of course it is not perfect ! - (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' - r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' - r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' - r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' - r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' - r'path3|pen|picture|point|position|projection|real|revolution|' - r'scaleT|scientific|segment|side|slice|splitface|string|surface|' - r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' - r'transformation|tree|triangle|trilinear|triple|vector|' - r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type), - # Now the asy-type-name which are not asy-function-name - # except yours ! - # Perhaps useless - (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' - r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' - r'picture|position|real|revolution|slice|splitface|ticksgridT|' - r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), - ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'root': [ - include('whitespace'), - # functions - (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*\([^;]*?\))' # signature - r'(' + _ws + r')({)', - bygroups(using(this), Name.Function, using(this), using(this), - Punctuation), - 'function'), - # function declarations - (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments - r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name - r'(\s*\([^;]*?\))' # signature - r'(' + _ws + r')(;)', - bygroups(using(this), Name.Function, using(this), using(this), - Punctuation)), - ('', Text, 'statement'), - ], - 'statement' : [ - include('whitespace'), - include('statements'), - ('[{}]', Punctuation), - (';', Punctuation, '#pop'), - ], - 'function': [ - include('whitespace'), - include('statements'), - (';', Punctuation), - ('{', Punctuation, '#push'), - ('}', Punctuation, '#pop'), - ], - 'string': [ - (r"'", String, '#pop'), - (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'\n', String), - (r"[^\\'\n]+", String), # all other characters - (r'\\\n', String), - (r'\\n', String), # line continuation - (r'\\', String), # stray backslash - ] - } - - def get_tokens_unprocessed(self, text): - from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - if token is Name and value in ASYFUNCNAME: - token = Name.Function - elif token is Name and value in ASYVARNAME: - token = Name.Variable - yield index, token, value - - -class PostScriptLexer(RegexLexer): - """ - Lexer for PostScript files. - - The PostScript Language Reference published by Adobe at - - is the authority for this. - - *New in Pygments 1.4.* - """ - name = 'PostScript' - aliases = ['postscript', 'postscr'] - filenames = ['*.ps', '*.eps'] - mimetypes = ['application/postscript'] - - delimiter = r'\(\)\<\>\[\]\{\}\/\%\s' - delimiter_end = r'(?=[%s])' % delimiter - - valid_name_chars = r'[^%s]' % delimiter - valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) - - tokens = { - 'root': [ - # All comment types - (r'^%!.+\n', Comment.Preproc), - (r'%%.*\n', Comment.Special), - (r'(^%.*\n){2,}', Comment.Multiline), - (r'%.*\n', Comment.Single), - - # String literals are awkward; enter separate state. - (r'\(', String, 'stringliteral'), - - (r'[\{\}(\<\<)(\>\>)\[\]]', Punctuation), - - # Numbers - (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex), - # Slight abuse: use Oct to signify any explicit base system - (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)' - r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct), - (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' - + delimiter_end, Number.Float), - (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer), - - # References - (r'\/%s' % valid_name, Name.Variable), - - # Names - (valid_name, Name.Function), # Anything else is executed - - # These keywords taken from - # - # Is there an authoritative list anywhere that doesn't involve - # trawling documentation? - - (r'(false|true)' + delimiter_end, Keyword.Constant), - - # Conditionals / flow control - (r'(eq|ne|ge|gt|le|lt|and|or|not|if|ifelse|for|forall)' - + delimiter_end, Keyword.Reserved), - - ('(abs|add|aload|arc|arcn|array|atan|begin|bind|ceiling|charpath|' - 'clip|closepath|concat|concatmatrix|copy|cos|currentlinewidth|' - 'currentmatrix|currentpoint|curveto|cvi|cvs|def|defaultmatrix|' - 'dict|dictstackoverflow|div|dtransform|dup|end|exch|exec|exit|exp|' - 'fill|findfont|floor|get|getinterval|grestore|gsave|gt|' - 'identmatrix|idiv|idtransform|index|invertmatrix|itransform|' - 'length|lineto|ln|load|log|loop|matrix|mod|moveto|mul|neg|newpath|' - 'pathforall|pathbbox|pop|print|pstack|put|quit|rand|rangecheck|' - 'rcurveto|repeat|restore|rlineto|rmoveto|roll|rotate|round|run|' - 'save|scale|scalefont|setdash|setfont|setgray|setlinecap|' - 'setlinejoin|setlinewidth|setmatrix|setrgbcolor|shfill|show|' - 'showpage|sin|sqrt|stack|stringwidth|stroke|strokepath|sub|' - 'syntaxerror|transform|translate|truncate|typecheck|undefined|' - 'undefinedfilename|undefinedresult)' + delimiter_end, - Name.Builtin), - - (r'\s+', Text), - ], - - 'stringliteral': [ - (r'[^\(\)\\]+', String), - (r'\\', String.Escape, 'escape'), - (r'\(', String, '#push'), - (r'\)', String, '#pop'), - ], - - 'escape': [ - (r'([0-8]{3}|n|r|t|b|f|\\|\(|\))?', String.Escape, '#pop'), - ], - } - - -class AutohotkeyLexer(RegexLexer): - """ - For `autohotkey `_ source code. - - *New in Pygments 1.4.* - """ - name = 'autohotkey' - aliases = ['ahk', 'autohotkey'] - filenames = ['*.ahk', '*.ahkl'] - mimetypes = ['text/x-autohotkey'] - - tokens = { - 'root': [ - (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), - 'incomment'), - (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'), - (r'\s+;.*?$', Comment.Singleline), - (r'^;.*?$', Comment.Singleline), - (r'[]{}(),;[]', Punctuation), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'\%[a-zA-Z_#@$][a-zA-Z0-9_#@$]*\%', Name.Variable), - (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), - include('commands'), - include('labels'), - include('builtInFunctions'), - include('builtInVariables'), - (r'"', String, combined('stringescape', 'dqs')), - include('numbers'), - (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), - (r'\\|\'', Text), - (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), - include('garbage'), - ], - 'incomment': [ - (r'^\s*\*/', Comment.Multiline, '#pop'), - (r'[^*/]', Comment.Multiline), - (r'[*/]', Comment.Multiline) - ], - 'incontinuation': [ - (r'^\s*\)', Generic, '#pop'), - (r'[^)]', Generic), - (r'[)]', Generic), - ], - 'commands': [ - (r'(?i)^(\s*)(global|local|static|' - r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|' - r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|' - r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|' - r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|' - r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|' - r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|' - r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|' - r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|' - r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|' - r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|' - r'ControlSendRaw|ControlSetText|CoordMode|Critical|' - r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|' - r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|' - r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|' - r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|' - r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|' - r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|' - r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|' - r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|' - r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|' - r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|' - r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|' - r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|' - r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|' - r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|' - r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|' - r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|' - r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|' - r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|' - r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|' - r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|' - r'SetBatchLines|SetCapslockState|SetControlDelay|' - r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|' - r'SetMouseDelay|SetNumlockState|SetScrollLockState|' - r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|' - r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|' - r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|' - r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|' - r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|' - r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|' - r'StringReplace|StringRight|StringSplit|StringTrimLeft|' - r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|' - r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|' - r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|' - r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|' - r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|' - r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|' - r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|' - r'WinWait)\b', bygroups(Text, Name.Builtin)), - ], - 'builtInFunctions': [ - (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|' - r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|' - r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|' - r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|' - r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|' - r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|' - r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|' - r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|' - r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|' - r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|' - r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|' - r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|' - r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|' - r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|' - r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|' - r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b', - Name.Function), - ], - 'builtInVariables': [ - (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|' - r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|' - r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|' - r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|' - r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|' - r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|' - r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|' - r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|' - r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|' - r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|' - r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|' - r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|' - r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|' - r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|' - r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|' - r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|' - r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|' - r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|' - r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|' - r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|' - r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|' - r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|' - r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|' - r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|' - r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|' - r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|' - r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|' - r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|' - r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|' - r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b', - Name.Variable), - ], - 'labels': [ - # hotkeys and labels - # technically, hotkey names are limited to named keys and buttons - (r'(^\s*)([^:\s\(\"]+?:{1,2})', bygroups(Text, Name.Label)), - (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)), - ], - 'numbers': [ - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+[eE][+-]?[0-9]+', Number.Float), - (r'0\d+', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+L', Number.Integer.Long), - (r'\d+', Number.Integer) - ], - 'stringescape': [ - (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), - ], - 'strings': [ - (r'[^"\n]+', String), - ], - 'dqs': [ - (r'"', String, '#pop'), - include('strings') - ], - 'garbage': [ - (r'[^\S\n]', Text), - # (r'.', Text), # no cheating - ], - } - - -class MaqlLexer(RegexLexer): - """ - Lexer for `GoodData MAQL - `_ - scripts. - - *New in Pygments 1.4.* - """ - - name = 'MAQL' - aliases = ['maql'] - filenames = ['*.maql'] - mimetypes = ['text/x-gooddata-maql','application/x-gooddata-maql'] - - flags = re.IGNORECASE - tokens = { - 'root': [ - # IDENTITY - (r'IDENTIFIER\b', Name.Builtin), - # IDENTIFIER - (r'\{[^}]+\}', Name.Variable), - # NUMBER - (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), - # STRING - (r'"', Literal.String, 'string-literal'), - # RELATION - (r'\<\>|\!\=', Operator), - (r'\=|\>\=|\>|\<\=|\<', Operator), - # := - (r'\:\=', Operator), - # OBJECT - (r'\[[^]]+\]', Name.Variable.Class), - # keywords - (r'(DIMENSIONS?|BOTTOM|METRIC|COUNT|OTHER|FACT|WITH|TOP|OR|' - r'ATTRIBUTE|CREATE|PARENT|FALSE|ROWS?|FROM|ALL|AS|PF|' - r'COLUMNS?|DEFINE|REPORT|LIMIT|TABLE|LIKE|AND|BY|' - r'BETWEEN|EXCEPT|SELECT|MATCH|WHERE|TRUE|FOR|IN|' - r'WITHOUT|FILTER|ALIAS|ORDER|FACT|WHEN|NOT|ON|' - r'KEYS|KEY|FULLSET|PRIMARY|LABELS|LABEL|VISUAL|' - r'TITLE|DESCRIPTION|FOLDER|ALTER|DROP|ADD|DATASET|' - r'DATATYPE|INT|BIGINT|DOUBLE|DATE|VARCHAR|DECIMAL|' - r'SYNCHRONIZE|TYPE|DEFAULT|ORDER|ASC|DESC|HYPERLINK|' - r'INCLUDE|TEMPLATE|MODIFY)\b', Keyword), - # FUNCNAME - (r'[a-zA-Z]\w*\b', Name.Function), - # Comments - (r'#.*', Comment.Single), - # Punctuation - (r'[,;\(\)]', Token.Punctuation), - # Space is not significant - (r'\s+', Text) - ], - 'string-literal': [ - (r'\\[tnrfbae"\\]', String.Escape), - (r'"', Literal.String, '#pop'), - (r'[^\\"]+', Literal.String) - ], - } - - -class GoodDataCLLexer(RegexLexer): - """ - Lexer for `GoodData-CL `_ - script files. - - *New in Pygments 1.4.* - """ - - name = 'GoodData-CL' - aliases = ['gooddata-cl'] - filenames = ['*.gdc'] - mimetypes = ['text/x-gooddata-cl'] - - flags = re.IGNORECASE - tokens = { - 'root': [ - # Comments - (r'#.*', Comment.Single), - # Function call - (r'[a-zA-Z]\w*', Name.Function), - # Argument list - (r'\(', Token.Punctuation, 'args-list'), - # Punctuation - (r';', Token.Punctuation), - # Space is not significant - (r'\s+', Text) - ], - 'args-list': [ - (r'\)', Token.Punctuation, '#pop'), - (r',', Token.Punctuation), - (r'[a-zA-Z]\w*', Name.Variable), - (r'=', Operator), - (r'"', Literal.String, 'string-literal'), - (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), - # Space is not significant - (r'\s', Text) - ], - 'string-literal': [ - (r'\\[tnrfbae"\\]', String.Escape), - (r'"', Literal.String, '#pop'), - (r'[^\\"]+', Literal.String) - ] - } - - -class ProtoBufLexer(RegexLexer): - """ - Lexer for `Protocol Buffer `_ - definition files. - - *New in Pygments 1.4.* - """ - - name = 'Protocol Buffer' - aliases = ['protobuf', 'proto'] - filenames = ['*.proto'] - - tokens = { - 'root': [ - (r'[ \t]+', Text), - (r'[,;{}\[\]\(\)]', Punctuation), - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - (r'\b(import|option|optional|required|repeated|default|packed|' - r'ctype|extensions|to|max|rpc|returns)\b', Keyword), - (r'(int32|int64|uint32|uint64|sint32|sint64|' - r'fixed32|fixed64|sfixed32|sfixed64|' - r'float|double|bool|string|bytes)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - (r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'), - (r'(message|extend)(\s+)', - bygroups(Keyword.Declaration, Text), 'message'), - (r'(enum|group|service)(\s+)', - bygroups(Keyword.Declaration, Text), 'type'), - (r'\".*\"', String), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'(\-?(inf|nan))', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'[+-=]', Operator), - (r'([a-zA-Z_][a-zA-Z0-9_\.]*)([ \t]*)(=)', - bygroups(Name.Attribute, Text, Operator)), - ('[a-zA-Z_][a-zA-Z0-9_\.]*', Name), - ], - 'package': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Namespace, '#pop') - ], - 'message': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'type': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name, '#pop') - ], - } - - -class HybrisLexer(RegexLexer): - """ - For `Hybris `_ source code. - - *New in Pygments 1.4.* - """ - - name = 'Hybris' - aliases = ['hybris', 'hy'] - filenames = ['*.hy', '*.hyb'] - mimetypes = ['text/x-hybris', 'application/x-hybris'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:function|method|operator\s+)+?)' - r'([a-zA-Z_][a-zA-Z0-9_]*)' - r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), - (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' - r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), - (r'(extends|private|protected|public|static|throws|function|method|' - r'operator)\b', Keyword.Declaration), - (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' - r'__INC_PATH__)\b', Keyword.Constant), - (r'(class|struct)(\s+)', - bygroups(Keyword.Declaration, Text), 'class'), - (r'(import|include)(\s+)', - bygroups(Keyword.Namespace, Text), 'import'), - (r'(gc_collect|gc_mm_items|gc_mm_usage|gc_collect_threshold|' - r'urlencode|urldecode|base64encode|base64decode|sha1|crc32|sha2|' - r'md5|md5_file|acos|asin|atan|atan2|ceil|cos|cosh|exp|fabs|floor|' - r'fmod|log|log10|pow|sin|sinh|sqrt|tan|tanh|isint|isfloat|ischar|' - r'isstring|isarray|ismap|isalias|typeof|sizeof|toint|tostring|' - r'fromxml|toxml|binary|pack|load|eval|var_names|var_values|' - r'user_functions|dyn_functions|methods|call|call_method|mknod|' - r'mkfifo|mount|umount2|umount|ticks|usleep|sleep|time|strtime|' - r'strdate|dllopen|dlllink|dllcall|dllcall_argv|dllclose|env|exec|' - r'fork|getpid|wait|popen|pclose|exit|kill|pthread_create|' - r'pthread_create_argv|pthread_exit|pthread_join|pthread_kill|' - r'smtp_send|http_get|http_post|http_download|socket|bind|listen|' - r'accept|getsockname|getpeername|settimeout|connect|server|recv|' - r'send|close|print|println|printf|input|readline|serial_open|' - r'serial_fcntl|serial_get_attr|serial_get_ispeed|serial_get_ospeed|' - r'serial_set_attr|serial_set_ispeed|serial_set_ospeed|serial_write|' - r'serial_read|serial_close|xml_load|xml_parse|fopen|fseek|ftell|' - r'fsize|fread|fwrite|fgets|fclose|file|readdir|pcre_replace|size|' - r'pop|unmap|has|keys|values|length|find|substr|replace|split|trim|' - r'remove|contains|join)\b', Name.Builtin), - (r'(MethodReference|Runner|Dll|Thread|Pipe|Process|Runnable|' - r'CGI|ClientSocket|Socket|ServerSocket|File|Console|Directory|' - r'Exception)\b', Keyword.Type), - (r'"(\\\\|\\"|[^"])*"', String), - (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), - (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), - (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), - (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?\-@]+', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-f]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text), - ], - 'class': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'import': [ - (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') - ], - } - - -class AwkLexer(RegexLexer): - """ - For Awk scripts. - - *New in Pygments 1.5.* - """ - - name = 'Awk' - aliases = ['awk', 'gawk', 'mawk', 'nawk'] - filenames = ['*.awk'] - mimetypes = ['application/x-awk'] - - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'#.*$', Comment.Single) - ], - 'slashstartsregex': [ - include('commentsandwhitespace'), - (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' - r'\B', String.Regex, '#pop'), - (r'(?=/)', Text, ('#pop', 'badregex')), - (r'', Text, '#pop') - ], - 'badregex': [ - (r'\n', Text, '#pop') - ], - 'root': [ - (r'^(?=\s|/)', Text, 'slashstartsregex'), - include('commentsandwhitespace'), - (r'\+\+|--|\|\||&&|in|\$|!?~|' - r'(\*\*|[-<>+*%\^/!=])=?', Operator, 'slashstartsregex'), - (r'[{(\[;,]', Punctuation, 'slashstartsregex'), - (r'[})\].]', Punctuation), - (r'(break|continue|do|while|exit|for|if|' - r'return)\b', Keyword, 'slashstartsregex'), - (r'function\b', Keyword.Declaration, 'slashstartsregex'), - (r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|' - r'length|match|split|sprintf|sub|substr|tolower|toupper|close|' - r'fflush|getline|next|nextfile|print|printf|strftime|systime|' - r'delete|system)\b', Keyword.Reserved), - (r'(ARGC|ARGIND|ARGV|CONVFMT|ENVIRON|ERRNO|FIELDWIDTHS|FILENAME|FNR|FS|' - r'IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|RSTART|RT|' - r'SUBSEP)\b', Name.Builtin), - (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r"'(\\\\|\\'|[^'])*'", String.Single), - ] - } - - -class Cfengine3Lexer(RegexLexer): - """ - Lexer for `CFEngine3 `_ policy files. - - *New in Pygments 1.5.* - """ - - name = 'CFEngine3' - aliases = ['cfengine3', 'cf3'] - filenames = ['*.cf'] - mimetypes = [] - - tokens = { - 'root': [ - (r'#.*?\n', Comment), - (r'(body)(\s+)(\S+)(\s+)(control)', - bygroups(Keyword, Text, Keyword, Text, Keyword)), - (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', - bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation), - 'arglist'), - (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', - bygroups(Keyword, Text, Keyword, Text, Name.Function)), - (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', - bygroups(Punctuation,Name.Variable,Punctuation, - Text,Keyword.Type,Text,Operator,Text)), - (r'(\S+)(\s*)(=>)(\s*)', - bygroups(Keyword.Reserved,Text,Operator,Text)), - (r'"', String, 'string'), - (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), - (r'([\w.!&|\(\)]+)(::)', bygroups(Name.Class, Punctuation)), - (r'(\w+)(:)', bygroups(Keyword.Declaration,Punctuation)), - (r'@[\{\(][^\)\}]+[\}\)]', Name.Variable), - (r'[(){},;]', Punctuation), - (r'=>', Operator), - (r'->', Operator), - (r'\d+\.\d+', Number.Float), - (r'\d+', Number.Integer), - (r'\w+', Name.Function), - (r'\s+', Text), - ], - 'string': [ - (r'\$[\{\(]', String.Interpol, 'interpol'), - (r'\\.', String.Escape), - (r'"', String, '#pop'), - (r'\n', String), - (r'.', String), - ], - 'interpol': [ - (r'\$[\{\(]', String.Interpol, '#push'), - (r'[\}\)]', String.Interpol, '#pop'), - (r'[^\$\{\(\)\}]+', String.Interpol), - ], - 'arglist': [ - (r'\)', Punctuation, '#pop'), - (r',', Punctuation), - (r'\w+', Name.Variable), - (r'\s+', Text), - ], - } - - -class SnobolLexer(RegexLexer): - """ - Lexer for the SNOBOL4 programming language. - - Recognizes the common ASCII equivalents of the original SNOBOL4 operators. - Does not require spaces around binary operators. - - *New in Pygments 1.5.* - """ - - name = "Snobol" - aliases = ["snobol"] - filenames = ['*.snobol'] - mimetypes = ['text/x-snobol'] - - tokens = { - # root state, start of line - # comments, continuation lines, and directives start in column 1 - # as do labels - 'root': [ - (r'\*.*\n', Comment), - (r'[\+\.] ', Punctuation, 'statement'), - (r'-.*\n', Comment), - (r'END\s*\n', Name.Label, 'heredoc'), - (r'[A-Za-z\$][\w$]*', Name.Label, 'statement'), - (r'\s+', Text, 'statement'), - ], - # statement state, line after continuation or label - 'statement': [ - (r'\s*\n', Text, '#pop'), - (r'\s+', Text), - (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' - r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' - r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' - r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', - Name.Builtin), - (r'[A-Za-z][\w\.]*', Name), - # ASCII equivalents of original operators - # | for the EBCDIC equivalent, ! likewise - # \ for EBCDIC negation - (r'\*\*|[\?\$\.!%\*/#+\-@\|&\\=]', Operator), - (r'"[^"]*"', String), - (r"'[^']*'", String), - # Accept SPITBOL syntax for real numbers - # as well as Macro SNOBOL4 - (r'[0-9]+(?=[^\.EeDd])', Number.Integer), - (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), - # Goto - (r':', Punctuation, 'goto'), - (r'[\(\)<>,;]', Punctuation), - ], - # Goto block - 'goto': [ - (r'\s*\n', Text, "#pop:2"), - (r'\s+', Text), - (r'F|S', Keyword), - (r'(\()([A-Za-z][\w.]*)(\))', - bygroups(Punctuation, Name.Label, Punctuation)) - ], - # everything after the END statement is basically one - # big heredoc. - 'heredoc': [ - (r'.*\n', String.Heredoc) - ] - } - - -class UrbiscriptLexer(ExtendedRegexLexer): - """ - For UrbiScript source code. - - *New in Pygments 1.5.* - """ - - name = 'UrbiScript' - aliases = ['urbiscript'] - filenames = ['*.u'] - mimetypes = ['application/x-urbiscript'] - - flags = re.DOTALL - - ## TODO - # - handle Experimental and deprecated tags with specific tokens - # - handle Angles and Durations with specific tokens - - def blob_callback(lexer, match, ctx): - text_before_blob = match.group(1) - blob_start = match.group(2) - blob_size_str = match.group(3) - blob_size = int(blob_size_str) - yield match.start(), String, text_before_blob - ctx.pos += len(text_before_blob) - - # if blob size doesn't match blob format (example : "\B(2)(aaa)") - # yield blob as a string - if ctx.text[match.end() + blob_size] != ")": - result = "\\B(" + blob_size_str + ")(" - yield match.start(), String, result - ctx.pos += len(result) - return - - # if blob is well formated, yield as Escape - blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")" - yield match.start(), String.Escape, blob_text - ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")" - - tokens = { - 'root': [ - (r'\s+', Text), - # comments - (r'//.*?\n', Comment), - (r'/\*', Comment.Multiline, 'comment'), - (r'(?:every|for|loop|while)(?:;|&|\||,)',Keyword), - (r'(?:assert|at|break|case|catch|closure|compl|continue|' - r'default|else|enum|every|external|finally|for|freezeif|if|new|' - r'onleave|return|stopif|switch|this|throw|timeout|try|' - r'waituntil|whenever|while)\b', Keyword), - (r'(?:asm|auto|bool|char|const_cast|delete|double|dynamic_cast|' - r'explicit|export|extern|float|friend|goto|inline|int|' - r'long|mutable|namespace|register|reinterpret_cast|short|' - r'signed|sizeof|static_cast|struct|template|typedef|typeid|' - r'typename|union|unsigned|using|virtual|volatile|' - r'wchar_t)\b', Keyword.Reserved), - # deprecated keywords, use a meaningfull token when available - (r'(?:emit|foreach|internal|loopn|static)\b', Keyword), - # ignored keywords, use a meaningfull token when available - (r'(?:private|protected|public)\b', Keyword), - (r'(?:var|do|const|function|class)\b', Keyword.Declaration), - (r'(?:true|false|nil|void)\b', Keyword.Constant), - (r'(?:Barrier|Binary|Boolean|CallMessage|Channel|Code|' - r'Comparable|Container|Control|Date|Dictionary|Directory|' - r'Duration|Enumeration|Event|Exception|Executable|File|Finalizable|' - r'Float|FormatInfo|Formatter|Global|Group|Hash|InputStream|' - r'IoService|Job|Kernel|Lazy|List|Loadable|Lobby|Location|Logger|Math|' - r'Mutex|nil|Object|Orderable|OutputStream|Pair|Path|Pattern|Position|' - r'Primitive|Process|Profile|PseudoLazy|PubSub|RangeIterable|Regexp|' - r'Semaphore|Server|Singleton|Socket|StackFrame|Stream|String|System|' - r'Tag|Timeout|Traceable|TrajectoryGenerator|Triplet|Tuple' - r'|UObject|UValue|UVar)\b', Name.Builtin), - (r'(?:this)\b', Name.Builtin.Pseudo), - # don't match single | and & - (r'(?:[-=+*%/<>~^:]+|\.&?|\|\||&&)', Operator), - (r'(?:and_eq|and|bitand|bitor|in|not|not_eq|or_eq|or|xor_eq|xor)\b', - Operator.Word), - (r'[{}\[\]()]+', Punctuation), - (r'(?:;|\||,|&|\?|!)+', Punctuation), - (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), - (r'0x[0-9a-fA-F]+', Number.Hex), - # Float, Integer, Angle and Duration - (r'(?:[0-9]+(?:(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?)?' - r'((?:rad|deg|grad)|(?:ms|s|min|h|d))?)\b', Number.Float), - # handle binary blob in strings - (r'"', String.Double, "string.double"), - (r"'", String.Single, "string.single"), - ], - 'string.double': [ - (r'((?:\\\\|\\"|[^"])*?)(\\B\((\d+)\)\()', blob_callback), - (r'(\\\\|\\"|[^"])*?"', String.Double, '#pop'), - ], - 'string.single': [ - (r"((?:\\\\|\\'|[^'])*?)(\\B\((\d+)\)\()", blob_callback), - (r"(\\\\|\\'|[^'])*?'", String.Single, '#pop'), - ], - # from http://pygments.org/docs/lexerdevelopment/#changing-states - 'comment': [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline), - ] - } - - -class OpenEdgeLexer(RegexLexer): - """ - Lexer for `OpenEdge ABL (formerly Progress) - `_ source code. - - *New in Pygments 1.5.* - """ - name = 'OpenEdge ABL' - aliases = ['openedge', 'abl', 'progress'] - filenames = ['*.p', '*.cls'] - mimetypes = ['text/x-openedge', 'application/x-openedge'] - - types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|' - r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|' - r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|' - r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|' - r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))') - - keywords = (r'(?i)(^|(?<=[^0-9a-z_\-]))(' + - r'|'.join(OPENEDGEKEYWORDS) + - r')\s*($|(?=[^0-9a-z_\-]))') - tokens = { - 'root': [ - (r'/\*', Comment.Multiline, 'comment'), - (r'\{', Comment.Preproc, 'preprocessor'), - (r'\s*&.*', Comment.Preproc), - (r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration), - (types, Keyword.Type), - (keywords, Name.Builtin), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'[0-9]+', Number.Integer), - (r'\s+', Text), - (r'[+*/=-]', Operator), - (r'[.:()]', Punctuation), - (r'.', Name.Variable), # Lazy catch-all - ], - 'comment': [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ], - 'preprocessor': [ - (r'[^{}]', Comment.Preproc), - (r'{', Comment.Preproc, '#push'), - (r'}', Comment.Preproc, '#pop'), - ], - } - - -class BroLexer(RegexLexer): - """ - For `Bro `_ scripts. - - *New in Pygments 1.5.* - """ - name = 'Bro' - aliases = ['bro'] - filenames = ['*.bro'] - - _hex = r'[0-9a-fA-F_]+' - _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?' - _h = r'[A-Za-z0-9][-A-Za-z0-9]*' - - tokens = { - 'root': [ - # Whitespace - (r'^@.*?\n', Comment.Preproc), - (r'#.*?\n', Comment.Single), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), - # Keywords - (r'(add|alarm|break|case|const|continue|delete|do|else|enum|event' - r'|export|for|function|if|global|hook|local|module|next' - r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword), - (r'(addr|any|bool|count|counter|double|file|int|interval|net' - r'|pattern|port|record|set|string|subnet|table|time|timer' - r'|vector)\b', Keyword.Type), - (r'(T|F)\b', Keyword.Constant), - (r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire' - r'|default|disable_print_hook|raw_output|encrypt|group|log' - r'|mergeable|optional|persistent|priority|redef' - r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation, - Keyword)), - (r'\s+module\b', Keyword.Namespace), - # Addresses, ports and networks - (r'\d+/(tcp|udp|icmp|unknown)\b', Number), - (r'(\d+\.){3}\d+', Number), - (r'(' + _hex + r'){7}' + _hex, Number), - (r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number), - (r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number), - (r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number), - # Hostnames - (_h + r'(\.' + _h + r')+', String), - # Numeric - (_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date), - (r'0[xX]' + _hex, Number.Hex), - (_float, Number.Float), - (r'\d+', Number.Integer), - (r'/', String.Regex, 'regex'), - (r'"', String, 'string'), - # Operators - (r'[!%*/+:<=>?~|-]', Operator), - (r'([-+=&|]{2}|[+=!><-]=)', Operator), - (r'(in|match)\b', Operator.Word), - (r'[{}()\[\]$.,;]', Punctuation), - # Identfier - (r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)), - (r'[a-zA-Z_][a-zA-Z_0-9]*', Name) - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), - (r'\\\n', String), - (r'\\', String) - ], - 'regex': [ - (r'/', String.Regex, '#pop'), - (r'\\[\\nt/]', String.Regex), # String.Escape is too intense here. - (r'[^\\/\n]+', String.Regex), - (r'\\\n', String.Regex), - (r'\\', String.Regex) - ] - } - - -class CbmBasicV2Lexer(RegexLexer): - """ - For CBM BASIC V2 sources. - - *New in Pygments 1.6.* - """ - name = 'CBM BASIC V2' - aliases = ['cbmbas'] - filenames = ['*.bas'] - - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'rem.*\n', Comment.Single), - (r'\s+', Text), - (r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont' - r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?' - r'|list|clr|cmd|open|close|get#?', Keyword.Reserved), - (r'data|restore|dim|let|def|fn', Keyword.Declaration), - (r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn' - r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin), - (r'[-+*/^<>=]', Operator), - (r'not|and|or', Operator.Word), - (r'"[^"\n]*.', String), - (r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float), - (r'[\(\),:;]', Punctuation), - (r'\w+[$%]?', Name), - ] - } - - def analyse_text(self, text): - # if it starts with a line number, it shouldn't be a "modern" Basic - # like VB.net - if re.match(r'\d+', text): - return True - - -class MscgenLexer(RegexLexer): - """ - For `Mscgen `_ files. - - *New in Pygments 1.6.* - """ - name = 'Mscgen' - aliases = ['mscgen', 'msc'] - filenames = ['*.msc'] - - _var = r'([a-zA-Z0-9_]+|"(?:\\"|[^"])*")' - - tokens = { - 'root': [ - (r'msc\b', Keyword.Type), - # Options - (r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS' - r'|arcgradient|ARCGRADIENT)\b', Name.Property), - # Operators - (r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word), - (r'(\.|-|\|){3}', Keyword), - (r'(?:-|=|\.|:){2}' - r'|<<=>>|<->|<=>|<<>>|<:>' - r'|->|=>>|>>|=>|:>|-x|-X' - r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator), - # Names - (r'\*', Name.Builtin), - (_var, Name.Variable), - # Other - (r'\[', Punctuation, 'attrs'), - (r'\{|\}|,|;', Punctuation), - include('comments') - ], - 'attrs': [ - (r'\]', Punctuation, '#pop'), - (_var + r'(\s*)(=)(\s*)' + _var, - bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace, - String)), - (r',', Punctuation), - include('comments') - ], - 'comments': [ - (r'(?://|#).*?\n', Comment.Single), - (r'/\*(?:.|\n)*?\*/', Comment.Multiline), - (r'[ \t\r\n]+', Text.Whitespace) - ] - } - - -def _rx_indent(level): - # Kconfig *always* interprets a tab as 8 spaces, so this is the default. - # Edit this if you are in an environment where KconfigLexer gets expanded - # input (tabs expanded to spaces) and the expansion tab width is != 8, - # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). - # Value range here is 2 <= {tab_width} <= 8. - tab_width = 8 - # Regex matching a given indentation {level}, assuming that indentation is - # a multiple of {tab_width}. In other cases there might be problems. - return r'(?:\t| {1,%s}\t| {%s}){%s}.*\n' % (tab_width-1, tab_width, level) - - -class KconfigLexer(RegexLexer): - """ - For Linux-style Kconfig files. - - *New in Pygments 1.6.* - """ - - name = 'Kconfig' - aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] - # Adjust this if new kconfig file names appear in your environment - filenames = ['Kconfig', '*Config.in*', 'external.in*', - 'standard-modules.in'] - mimetypes = ['text/x-kconfig'] - # No re.MULTILINE, indentation-aware help text needs line-by-line handling - flags = 0 - - def call_indent(level): - # If indentation >= {level} is detected, enter state 'indent{level}' - return (_rx_indent(level), String.Doc, 'indent%s' % level) - - def do_indent(level): - # Print paragraphs of indentation level >= {level} as String.Doc, - # ignoring blank lines. Then return to 'root' state. - return [ - (_rx_indent(level), String.Doc), - (r'\s*\n', Text), - (r'', Generic, '#pop:2') - ] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'#.*?\n', Comment.Single), - (r'(mainmenu|config|menuconfig|choice|endchoice|comment|menu|' - r'endmenu|visible if|if|endif|source|prompt|select|depends on|' - r'default|range|option)\b', Keyword), - (r'(---help---|help)[\t ]*\n', Keyword, 'help'), - (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', - Name.Builtin), - (r'[!=&|]', Operator), - (r'[()]', Punctuation), - (r'[0-9]+', Number.Integer), - (r"'(''|[^'])*'", String.Single), - (r'"(""|[^"])*"', String.Double), - (r'\S+', Text), - ], - # Help text is indented, multi-line and ends when a lower indentation - # level is detected. - 'help': [ - # Skip blank lines after help token, if any - (r'\s*\n', Text), - # Determine the first help line's indentation level heuristically(!). - # Attention: this is not perfect, but works for 99% of "normal" - # indentation schemes up to a max. indentation level of 7. - call_indent(7), - call_indent(6), - call_indent(5), - call_indent(4), - call_indent(3), - call_indent(2), - call_indent(1), - ('', Text, '#pop'), # for incomplete help sections without text - ], - # Handle text for indentation levels 7 to 1 - 'indent7': do_indent(7), - 'indent6': do_indent(6), - 'indent5': do_indent(5), - 'indent4': do_indent(4), - 'indent3': do_indent(3), - 'indent2': do_indent(2), - 'indent1': do_indent(1), - } - - -class VGLLexer(RegexLexer): - """ - For `SampleManager VGL `_ - source code. - - *New in Pygments 1.6.* - """ - name = 'VGL' - aliases = ['vgl'] - filenames = ['*.rpf'] - - flags = re.MULTILINE | re.DOTALL | re.IGNORECASE - - tokens = { - 'root': [ - (r'\{[^\}]*\}', Comment.Multiline), - (r'declare', Keyword.Constant), - (r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object' - r'|create|on|line|with|global|routine|value|endroutine|constant' - r'|global|set|join|library|compile_option|file|exists|create|copy' - r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])', - Keyword), - (r'(true|false|null|empty|error|locked)', Keyword.Constant), - (r'[~\^\*\#!%&\[\]\(\)<>\|+=:;,./?-]', Operator), - (r'"[^"]*"', String), - (r'(\.)([a-z_\$][a-z0-9_\$]*)', bygroups(Operator, Name.Attribute)), - (r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number), - (r'[a-z_\$][a-z0-9_\$]*', Name), - (r'[\r\n]+', Text), - (r'\s+', Text) - ] - } - - -class SourcePawnLexer(RegexLexer): - """ - For SourcePawn source code with preprocessor directives. - - *New in Pygments 1.6.* - """ - name = 'SourcePawn' - aliases = ['sp'] - filenames = ['*.sp'] - mimetypes = ['text/x-sourcepawn'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' - - tokens = { - 'root': [ - # preprocessor directives: without whitespace - ('^#if\s+0', Comment.Preproc, 'if0'), - ('^#', Comment.Preproc, 'macro'), - # or with whitespace - ('^' + _ws + r'#if\s+0', Comment.Preproc, 'if0'), - ('^' + _ws + '#', Comment.Preproc, 'macro'), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - (r'[{}]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;]', Punctuation), - (r'(case|const|continue|native|' - r'default|else|enum|for|if|new|operator|' - r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), - (r'(bool|Float)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - ('[a-zA-Z_][a-zA-Z0-9_]*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/\*(.|\n)*?\*/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'if0': [ - (r'^\s*#if.*?(?`__ configuration DSL. - - *New in Pygments 1.6.* - """ - name = 'Puppet' - aliases = ['puppet'] - filenames = ['*.pp'] - - tokens = { - 'root': [ - include('comments'), - include('keywords'), - include('names'), - include('numbers'), - include('operators'), - include('strings'), - - (r'[]{}:(),;[]', Punctuation), - (r'[^\S\n]+', Text), - ], - - 'comments': [ - (r'\s*#.*$', Comment), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - ], - - 'operators': [ - (r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator), - (r'(in|and|or|not)\b', Operator.Word), - ], - - 'names': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute), - (r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation, - String, Punctuation)), - (r'\$\S+', Name.Variable), - ], - - 'numbers': [ - # Copypasta from the Python lexer - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), - (r'\d+[eE][+-]?[0-9]+j?', Number.Float), - (r'0[0-7]+j?', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+L', Number.Integer.Long), - (r'\d+j?', Number.Integer) - ], - - 'keywords': [ - # Left out 'group' and 'require' - # Since they're often used as attributes - (r'(?i)(absent|alert|alias|audit|augeas|before|case|check|class|' - r'computer|configured|contained|create_resources|crit|cron|debug|' - r'default|define|defined|directory|else|elsif|emerg|err|exec|' - r'extlookup|fail|false|file|filebucket|fqdn_rand|generate|host|if|' - r'import|include|info|inherits|inline_template|installed|' - r'interface|k5login|latest|link|loglevel|macauthorization|' - r'mailalias|maillist|mcx|md5|mount|mounted|nagios_command|' - r'nagios_contact|nagios_contactgroup|nagios_host|' - r'nagios_hostdependency|nagios_hostescalation|nagios_hostextinfo|' - r'nagios_hostgroup|nagios_service|nagios_servicedependency|' - r'nagios_serviceescalation|nagios_serviceextinfo|' - r'nagios_servicegroup|nagios_timeperiod|node|noop|notice|notify|' - r'package|present|purged|realize|regsubst|resources|role|router|' - r'running|schedule|scheduled_task|search|selboolean|selmodule|' - r'service|sha1|shellquote|split|sprintf|ssh_authorized_key|sshkey|' - r'stage|stopped|subscribe|tag|tagged|template|tidy|true|undef|' - r'unmounted|user|versioncmp|vlan|warning|yumrepo|zfs|zone|' - r'zpool)\b', Keyword), - ], - - 'strings': [ - (r'"([^"])*"', String), - (r'\'([^\'])*\'', String), - ], - - } - - -class NSISLexer(RegexLexer): - """ - For `NSIS `_ scripts. - - *New in Pygments 1.6.* - """ - name = 'NSIS' - aliases = ['nsis', 'nsi', 'nsh'] - filenames = ['*.nsi', '*.nsh'] - mimetypes = ['text/x-nsis'] - - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'[;\#].*\n', Comment), - (r"'.*?'", String.Single), - (r'"', String.Double, 'str_double'), - (r'`', String.Backtick, 'str_backtick'), - include('macro'), - include('interpol'), - include('basic'), - (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), - (r'/[a-z_]\w*', Name.Attribute), - ('.', Text), - ], - 'basic': [ - (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', - bygroups(Text, Keyword, Text, Name.Function)), - (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', - bygroups(Keyword.Namespace, Punctuation, Name.Function)), - (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), - (r'(\b[ULS]|\B)([\!\<\>=]?=|\<\>?|\>)\B', Operator), - (r'[|+-]', Operator), - (r'\\', Punctuation), - (r'\b(Abort|Add(?:BrandingImage|Size)|' - r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' - r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' - r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' - r'ComponentText|CopyFiles|CRCCheck|' - r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' - r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' - r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' - r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' - r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' - r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' - r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' - r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' - r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' - r'InstDirError|LabelAddress|TempFileName)|' - r'Goto|HideWindow|Icon|' - r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' - r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' - r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' - r'IsWindow|LangString(?:UP)?|' - r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' - r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' - r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' - r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' - r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' - r'Return|RMDir|SearchPath|Section(?:Divider|End|' - r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' - r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' - r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' - r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' - r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' - r'Silent|StaticBkColor)|' - r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' - r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' - r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' - r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' - r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' - r'XPStyle)\b', Keyword), - (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' - r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' - r'HK(CC|CR|CU|DD|LM|PD|U)|' - r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' - r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' - r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' - r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' - r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' - r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' - r'YESNO(?:CANCEL)?)|SET|SHCTX|' - r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' - r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' - r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' - r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' - r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' - r'true|try|user|zlib)\b', Name.Constant), - ], - 'macro': [ - (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' - r'delfilefile|echo(?:message)?|else|endif|error|execute|' - r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' - r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' - r'warning)\b', Comment.Preproc), - ], - 'interpol': [ - (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers - (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' - r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' - r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' - r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' - r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' - r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', - Name.Builtin), - (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), - (r'\$[a-z_]\w*', Name.Variable), - ], - 'str_double': [ - (r'"', String, '#pop'), - (r'\$(\\[nrt"]|\$)', String.Escape), - include('interpol'), - (r'.', String.Double), - ], - 'str_backtick': [ - (r'`', String, '#pop'), - (r'\$(\\[nrt"]|\$)', String.Escape), - include('interpol'), - (r'.', String.Double), - ], - } - - -class RPMSpecLexer(RegexLexer): - """ - For RPM *.spec files - - *New in Pygments 1.6.* - """ - - name = 'RPMSpec' - aliases = ['spec'] - filenames = ['*.spec'] - mimetypes = ['text/x-rpm-spec'] - - _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' - 'post[a-z]*|trigger[a-z]*|files)') - - tokens = { - 'root': [ - (r'#.*\n', Comment), - include('basic'), - ], - 'description': [ - (r'^(%' + _directives + ')(.*)$', - bygroups(Name.Decorator, Text), '#pop'), - (r'\n', Text), - (r'.', Text), - ], - 'changelog': [ - (r'\*.*\n', Generic.Subheading), - (r'^(%' + _directives + ')(.*)$', - bygroups(Name.Decorator, Text), '#pop'), - (r'\n', Text), - (r'.', Text), - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - include('interpol'), - (r'.', String.Double), - ], - 'basic': [ - include('macro'), - (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' - r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' - r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' - r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', - bygroups(Generic.Heading, Punctuation, using(this))), - (r'^%description', Name.Decorator, 'description'), - (r'^%changelog', Name.Decorator, 'changelog'), - (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), - (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' - r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', - Keyword), - include('interpol'), - (r"'.*?'", String.Single), - (r'"', String.Double, 'string'), - (r'.', Text), - ], - 'macro': [ - (r'%define.*\n', Comment.Preproc), - (r'%\{\!\?.*%define.*\}', Comment.Preproc), - (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', - bygroups(Comment.Preproc, Text)), - ], - 'interpol': [ - (r'%\{?__[a-z_]+\}?', Name.Function), - (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), - (r'%\{\?[A-Za-z0-9_]+\}', Name.Variable), - (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), - (r'%\{[a-zA-Z][a-zA-Z0-9_]+\}', Keyword.Constant), - ] - } - - -class AutoItLexer(RegexLexer): - """ - For `AutoIt `_ files. - - AutoIt is a freeware BASIC-like scripting language - designed for automating the Windows GUI and general scripting - - *New in Pygments 1.6.* - """ - name = 'AutoIt' - aliases = ['autoit', 'Autoit'] - filenames = ['*.au3'] - mimetypes = ['text/x-autoit'] - - # Keywords, functions, macros from au3.keywords.properties - # which can be found in AutoIt installed directory, e.g. - # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties - - keywords = """\ - #include-once #include #endregion #forcedef #forceref #region - and byref case continueloop dim do else elseif endfunc endif - endselect exit exitloop for func global - if local next not or return select step - then to until wend while exit""".split() - - functions = """\ - abs acos adlibregister adlibunregister asc ascw asin assign atan - autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen - binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor - blockinput break call cdtray ceiling chr chrw clipget clipput consoleread - consolewrite consolewriteerror controlclick controlcommand controldisable - controlenable controlfocus controlgetfocus controlgethandle controlgetpos - controlgettext controlhide controllistview controlmove controlsend - controlsettext controlshow controltreeview cos dec dircopy dircreate - dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree - dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate - dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata - drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype - drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree - drivespacetotal drivestatus envget envset envupdate eval execute exp - filechangedir fileclose filecopy filecreatentfslink filecreateshortcut - filedelete fileexists filefindfirstfile filefindnextfile fileflush - filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut - filegetshortname filegetsize filegettime filegetversion fileinstall filemove - fileopen fileopendialog fileread filereadline filerecycle filerecycleempty - filesavedialog fileselectfolder filesetattrib filesetpos filesettime - filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi - guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo - guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy - guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon - guictrlcreateinput guictrlcreatelabel guictrlcreatelist - guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu - guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj - guictrlcreatepic guictrlcreateprogress guictrlcreateradio - guictrlcreateslider guictrlcreatetab guictrlcreatetabitem - guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown - guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg - guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy - guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata - guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic - guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos - guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete - guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators - guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon - guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset - httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize - inetread inidelete iniread inireadsection inireadsectionnames - inirenamesection iniwrite iniwritesection inputbox int isadmin isarray - isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword - isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag - mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox - number objcreate objcreateinterface objevent objevent objget objname - onautoitexitregister onautoitexitunregister opt ping pixelchecksum - pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists - processgetstats processlist processsetpriority processwait processwaitclose - progressoff progresson progressset ptr random regdelete regenumkey - regenumval regread regwrite round run runas runaswait runwait send - sendkeepactive seterror setextended shellexecute shellexecutewait shutdown - sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton - sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread - string stringaddcr stringcompare stringformat stringfromasciiarray - stringinstr stringisalnum stringisalpha stringisascii stringisdigit - stringisfloat stringisint stringislower stringisspace stringisupper - stringisxdigit stringleft stringlen stringlower stringmid stringregexp - stringregexpreplace stringreplace stringright stringsplit stringstripcr - stringstripws stringtoasciiarray stringtobinary stringtrimleft - stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect - tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff - timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete - trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent - trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent - traysetpauseicon traysetstate traysettooltip traytip ubound udpbind - udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype - winactivate winactive winclose winexists winflash wingetcaretpos - wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess - wingetstate wingettext wingettitle winkill winlist winmenuselectitem - winminimizeall winminimizeallundo winmove winsetontop winsetstate - winsettitle winsettrans winwait winwaitactive winwaitclose - winwaitnotactive""".split() - - macros = """\ - @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion - @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec - @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir - @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error - @exitcode @exitmethod @extended @favoritescommondir @favoritesdir - @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid - @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour - @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf - @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang - @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype - @osversion @programfilesdir @programscommondir @programsdir @scriptdir - @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir - @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide - @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault - @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna - @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir - @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday - @windowsdir @workingdir @yday @year""".split() - - tokens = { - 'root': [ - (r';.*\n', Comment.Single), - (r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline), - (r'[\[\]{}(),;]', Punctuation), - (r'(and|or|not)\b', Operator.Word), - (r'[\$|@][a-zA-Z_][a-zA-Z0-9_]*', Name.Variable), - (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), - include('commands'), - include('labels'), - include('builtInFunctions'), - include('builtInMarcros'), - (r'"', String, combined('stringescape', 'dqs')), - include('numbers'), - (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), - (r'\\|\'', Text), - (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), - (r'_\n', Text), # Line continuation - include('garbage'), - ], - 'commands': [ - (r'(?i)(\s*)(%s)\b' % '|'.join(keywords), - bygroups(Text, Name.Builtin)), - ], - 'builtInFunctions': [ - (r'(?i)(%s)\b' % '|'.join(functions), - Name.Function), - ], - 'builtInMarcros': [ - (r'(?i)(%s)\b' % '|'.join(macros), - Name.Variable.Global), - ], - 'labels': [ - # sendkeys - (r'(^\s*)({\S+?})', bygroups(Text, Name.Label)), - ], - 'numbers': [ - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+[eE][+-]?[0-9]+', Number.Float), - (r'0\d+', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+L', Number.Integer.Long), - (r'\d+', Number.Integer) - ], - 'stringescape': [ - (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), - ], - 'strings': [ - (r'[^"\n]+', String), - ], - 'dqs': [ - (r'"', String, '#pop'), - include('strings') - ], - 'garbage': [ - (r'[^\S\n]', Text), - ], - } - - -class RexxLexer(RegexLexer): - """ - `Rexx `_ is a scripting language available for - a wide range of different platforms with its roots found on mainframe - systems. It is popular for I/O- and data based tasks and can act as glue - language to bind different applications together. - - *New in Pygments 1.7.* - """ - name = 'Rexx' - aliases = ['rexx', 'ARexx', 'arexx'] - filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] - mimetypes = ['text/x-rexx'] - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'\s', Whitespace), - (r'/\*', Comment.Multiline, 'comment'), - (r'"', String, 'string_double'), - (r"'", String, 'string_single'), - (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), - (r'([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b', - bygroups(Name.Function, Whitespace, Operator, Whitespace, - Keyword.Declaration)), - (r'([a-z_][a-z0-9_]*)(\s*)(:)', - bygroups(Name.Label, Whitespace, Operator)), - include('function'), - include('keyword'), - include('operator'), - (r'[a-z_][a-z0-9_]*', Text), - ], - 'function': [ - (r'(abbrev|abs|address|arg|b2x|bitand|bitor|bitxor|c2d|c2x|' - r'center|charin|charout|chars|compare|condition|copies|d2c|' - r'd2x|datatype|date|delstr|delword|digits|errortext|form|' - r'format|fuzz|insert|lastpos|left|length|linein|lineout|lines|' - r'max|min|overlay|pos|queued|random|reverse|right|sign|' - r'sourceline|space|stream|strip|substr|subword|symbol|time|' - r'trace|translate|trunc|value|verify|word|wordindex|' - r'wordlength|wordpos|words|x2b|x2c|x2d|xrange)(\s*)(\()', - bygroups(Name.Builtin, Whitespace, Operator)), - ], - 'keyword': [ - (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' - r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' - r'pull|push|queue|return|say|select|signal|to|then|trace|until|' - r'while)\b', Keyword.Reserved), - ], - 'operator': [ - (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' - r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' - r'¬>>|¬>|¬|\.|,)', Operator), - ], - 'string_double': [ - (r'[^"\n]+', String), - (r'""', String), - (r'"', String, '#pop'), - (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. - ], - 'string_single': [ - (r'[^\'\n]', String), - (r'\'\'', String), - (r'\'', String, '#pop'), - (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. - ], - 'comment': [ - (r'[^*]+', Comment.Multiline), - (r'\*/', Comment.Multiline, '#pop'), - (r'\*', Comment.Multiline), - ] - } - - _c = lambda s: re.compile(s, re.MULTILINE) - _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') - _ADDRESS_PATTERN = _c(r'^\s*address\s+') - _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') - _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') - _PROCEDURE_PATTERN = _c(r'^\s*([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b') - _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') - _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') - PATTERNS_AND_WEIGHTS = ( - (_ADDRESS_COMMAND_PATTERN, 0.2), - (_ADDRESS_PATTERN, 0.05), - (_DO_WHILE_PATTERN, 0.1), - (_ELSE_DO_PATTERN, 0.1), - (_IF_THEN_DO_PATTERN, 0.1), - (_PROCEDURE_PATTERN, 0.5), - (_PARSE_ARG_PATTERN, 0.2), - ) - - def analyse_text(text): - """ - Check for inital comment and patterns that distinguish Rexx from other - C-like languages. - """ - if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): - # Header matches MVS Rexx requirements, this is certainly a Rexx - # script. - return 1.0 - elif text.startswith('/*'): - # Header matches general Rexx requirements; the source code might - # still be any language using C comments such as C++, C# or Java. - lowerText = text.lower() - result = sum(weight - for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS - if pattern.search(lowerText)) + 0.01 - return min(result, 1.0) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/text.py b/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/text.py deleted file mode 100644 index 610fcf0..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/text.py +++ /dev/null @@ -1,1893 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.text - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for non-source code file types. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from bisect import bisect - -from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \ - bygroups, include, using, this, do_insertions -from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \ - Generic, Operator, Number, Whitespace, Literal -from pygments.util import get_bool_opt, ClassNotFound -from pygments.lexers.other import BashLexer - -__all__ = ['IniLexer', 'PropertiesLexer', 'SourcesListLexer', 'BaseMakefileLexer', - 'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer', - 'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer', - 'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer', - 'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer', - 'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer', 'HttpLexer', - 'PyPyLogLexer', 'RegeditLexer', 'HxmlLexer', 'EbnfLexer'] - - -class IniLexer(RegexLexer): - """ - Lexer for configuration files in INI style. - """ - - name = 'INI' - aliases = ['ini', 'cfg', 'dosini'] - filenames = ['*.ini', '*.cfg'] - mimetypes = ['text/x-ini'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'[;#].*', Comment.Single), - (r'\[.*?\]$', Keyword), - (r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)', - bygroups(Name.Attribute, Text, Operator, Text, String)) - ] - } - - def analyse_text(text): - npos = text.find('\n') - if npos < 3: - return False - return text[0] == '[' and text[npos-1] == ']' - - -class RegeditLexer(RegexLexer): - """ - Lexer for `Windows Registry - `_ files produced - by regedit. - - *New in Pygments 1.6.* - """ - - name = 'reg' - aliases = ['registry'] - filenames = ['*.reg'] - mimetypes = ['text/x-windows-registry'] - - tokens = { - 'root': [ - (r'Windows Registry Editor.*', Text), - (r'\s+', Text), - (r'[;#].*', Comment.Single), - (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$', - bygroups(Keyword, Operator, Name.Builtin, Keyword)), - # String keys, which obey somewhat normal escaping - (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)', - bygroups(Name.Attribute, Text, Operator, Text), - 'value'), - # Bare keys (includes @) - (r'(.*?)([ \t]*)(=)([ \t]*)', - bygroups(Name.Attribute, Text, Operator, Text), - 'value'), - ], - 'value': [ - (r'-', Operator, '#pop'), # delete value - (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)', - bygroups(Name.Variable, Punctuation, Number), '#pop'), - # As far as I know, .reg files do not support line continuation. - (r'.*', String, '#pop'), - ] - } - - def analyse_text(text): - return text.startswith('Windows Registry Editor') - - -class PropertiesLexer(RegexLexer): - """ - Lexer for configuration files in Java's properties format. - - *New in Pygments 1.4.* - """ - - name = 'Properties' - aliases = ['properties', 'jproperties'] - filenames = ['*.properties'] - mimetypes = ['text/x-java-properties'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'(?:[;#]|//).*$', Comment), - (r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)', - bygroups(Name.Attribute, Text, Operator, Text, String)), - ], - } - - -class SourcesListLexer(RegexLexer): - """ - Lexer that highlights debian sources.list files. - - *New in Pygments 0.7.* - """ - - name = 'Debian Sourcelist' - aliases = ['sourceslist', 'sources.list', 'debsources'] - filenames = ['sources.list'] - mimetype = ['application/x-debian-sourceslist'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'#.*?$', Comment), - (r'^(deb(?:-src)?)(\s+)', - bygroups(Keyword, Text), 'distribution') - ], - 'distribution': [ - (r'#.*?$', Comment, '#pop'), - (r'\$\(ARCH\)', Name.Variable), - (r'[^\s$[]+', String), - (r'\[', String.Other, 'escaped-distribution'), - (r'\$', String), - (r'\s+', Text, 'components') - ], - 'escaped-distribution': [ - (r'\]', String.Other, '#pop'), - (r'\$\(ARCH\)', Name.Variable), - (r'[^\]$]+', String.Other), - (r'\$', String.Other) - ], - 'components': [ - (r'#.*?$', Comment, '#pop:2'), - (r'$', Text, '#pop:2'), - (r'\s+', Text), - (r'\S+', Keyword.Pseudo), - ] - } - - def analyse_text(text): - for line in text.split('\n'): - line = line.strip() - if not (line.startswith('#') or line.startswith('deb ') or - line.startswith('deb-src ') or not line): - return False - return True - - -class MakefileLexer(Lexer): - """ - Lexer for BSD and GNU make extensions (lenient enough to handle both in - the same file even). - - *Rewritten in Pygments 0.10.* - """ - - name = 'Makefile' - aliases = ['make', 'makefile', 'mf', 'bsdmake'] - filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'] - mimetypes = ['text/x-makefile'] - - r_special = re.compile(r'^(?:' - # BSD Make - r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|' - # GNU Make - r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)') - r_comment = re.compile(r'^\s*@?#') - - def get_tokens_unprocessed(self, text): - ins = [] - lines = text.splitlines(True) - done = '' - lex = BaseMakefileLexer(**self.options) - backslashflag = False - for line in lines: - if self.r_special.match(line) or backslashflag: - ins.append((len(done), [(0, Comment.Preproc, line)])) - backslashflag = line.strip().endswith('\\') - elif self.r_comment.match(line): - ins.append((len(done), [(0, Comment, line)])) - else: - done += line - for item in do_insertions(ins, lex.get_tokens_unprocessed(done)): - yield item - - -class BaseMakefileLexer(RegexLexer): - """ - Lexer for simple Makefiles (no preprocessing). - - *New in Pygments 0.10.* - """ - - name = 'Base Makefile' - aliases = ['basemake'] - filenames = [] - mimetypes = [] - - tokens = { - 'root': [ - (r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)), - (r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)), - (r'\s+', Text), - (r'#.*?\n', Comment), - (r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)', - bygroups(Keyword, Text), 'export'), - (r'export\s+', Keyword), - # assignment - (r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)', - bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))), - # strings - (r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double), - (r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single), - # targets - (r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text), - 'block-header'), - # TODO: add paren handling (grr) - ], - 'export': [ - (r'[a-zA-Z0-9_${}-]+', Name.Variable), - (r'\n', Text, '#pop'), - (r'\s+', Text), - ], - 'block-header': [ - (r'[^,\\\n#]+', Number), - (r',', Punctuation), - (r'#.*?\n', Comment), - (r'\\\n', Text), # line continuation - (r'\\.', Text), - (r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'), - ], - } - - -class DiffLexer(RegexLexer): - """ - Lexer for unified or context-style diffs or patches. - """ - - name = 'Diff' - aliases = ['diff', 'udiff'] - filenames = ['*.diff', '*.patch'] - mimetypes = ['text/x-diff', 'text/x-patch'] - - tokens = { - 'root': [ - (r' .*\n', Text), - (r'\+.*\n', Generic.Inserted), - (r'-.*\n', Generic.Deleted), - (r'!.*\n', Generic.Strong), - (r'@.*\n', Generic.Subheading), - (r'([Ii]ndex|diff).*\n', Generic.Heading), - (r'=.*\n', Generic.Heading), - (r'.*\n', Text), - ] - } - - def analyse_text(text): - if text[:7] == 'Index: ': - return True - if text[:5] == 'diff ': - return True - if text[:4] == '--- ': - return 0.9 - - -DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move', - 'replace'] - -class DarcsPatchLexer(RegexLexer): - """ - DarcsPatchLexer is a lexer for the various versions of the darcs patch - format. Examples of this format are derived by commands such as - ``darcs annotate --patch`` and ``darcs send``. - - *New in Pygments 0.10.* - """ - name = 'Darcs Patch' - aliases = ['dpatch'] - filenames = ['*.dpatch', '*.darcspatch'] - - tokens = { - 'root': [ - (r'<', Operator), - (r'>', Operator), - (r'{', Operator), - (r'}', Operator), - (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])', - bygroups(Operator, Keyword, Name, Text, Name, Operator, - Literal.Date, Text, Operator)), - (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)', - bygroups(Operator, Keyword, Name, Text, Name, Operator, - Literal.Date, Text), 'comment'), - (r'New patches:', Generic.Heading), - (r'Context:', Generic.Heading), - (r'Patch bundle hash:', Generic.Heading), - (r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS), - bygroups(Text, Keyword, Text)), - (r'\+', Generic.Inserted, "insert"), - (r'-', Generic.Deleted, "delete"), - (r'.*\n', Text), - ], - 'comment': [ - (r'[^\]].*\n', Comment), - (r'\]', Operator, "#pop"), - ], - 'specialText': [ # darcs add [_CODE_] special operators for clarity - (r'\n', Text, "#pop"), # line-based - (r'\[_[^_]*_]', Operator), - ], - 'insert': [ - include('specialText'), - (r'\[', Generic.Inserted), - (r'[^\n\[]+', Generic.Inserted), - ], - 'delete': [ - include('specialText'), - (r'\[', Generic.Deleted), - (r'[^\n\[]+', Generic.Deleted), - ], - } - - -class IrcLogsLexer(RegexLexer): - """ - Lexer for IRC logs in *irssi*, *xchat* or *weechat* style. - """ - - name = 'IRC logs' - aliases = ['irc'] - filenames = ['*.weechatlog'] - mimetypes = ['text/x-irclog'] - - flags = re.VERBOSE | re.MULTILINE - timestamp = r""" - ( - # irssi / xchat and others - (?: \[|\()? # Opening bracket or paren for the timestamp - (?: # Timestamp - (?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits - [T ])? # Date/time separator: T or space - (?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits - ) - (?: \]|\))?\s+ # Closing bracket or paren for the timestamp - | - # weechat - \d{4}\s\w{3}\s\d{2}\s # Date - \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace - | - # xchat - \w{3}\s\d{2}\s # Date - \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace - )? - """ - tokens = { - 'root': [ - # log start/end - (r'^\*\*\*\*(.*)\*\*\*\*$', Comment), - # hack - ("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)), - # normal msgs - ("^" + timestamp + r""" - (\s*<.*?>\s*) # Nick """, - bygroups(Comment.Preproc, Name.Tag), 'msg'), - # /me msgs - ("^" + timestamp + r""" - (\s*[*]\s+) # Star - (\S+\s+.*?\n) # Nick + rest of message """, - bygroups(Comment.Preproc, Keyword, Generic.Inserted)), - # join/part msgs - ("^" + timestamp + r""" - (\s*(?:\*{3}|?)\s*) # Star(s) or symbols - (\S+\s+) # Nick + Space - (.*?\n) # Rest of message """, - bygroups(Comment.Preproc, Keyword, String, Comment)), - (r"^.*?\n", Text), - ], - 'msg': [ - (r"\S+:(?!//)", Name.Attribute), # Prefix - (r".*\n", Text, '#pop'), - ], - } - - -class BBCodeLexer(RegexLexer): - """ - A lexer that highlights BBCode(-like) syntax. - - *New in Pygments 0.6.* - """ - - name = 'BBCode' - aliases = ['bbcode'] - mimetypes = ['text/x-bbcode'] - - tokens = { - 'root': [ - (r'[^[]+', Text), - # tag/end tag begin - (r'\[/?\w+', Keyword, 'tag'), - # stray bracket - (r'\[', Text), - ], - 'tag': [ - (r'\s+', Text), - # attribute with value - (r'(\w+)(=)("?[^\s"\]]+"?)', - bygroups(Name.Attribute, Operator, String)), - # tag argument (a la [color=green]) - (r'(=)("?[^\s"\]]+"?)', - bygroups(Operator, String)), - # tag end - (r'\]', Keyword, '#pop'), - ], - } - - -class TexLexer(RegexLexer): - """ - Lexer for the TeX and LaTeX typesetting languages. - """ - - name = 'TeX' - aliases = ['tex', 'latex'] - filenames = ['*.tex', '*.aux', '*.toc'] - mimetypes = ['text/x-tex', 'text/x-latex'] - - tokens = { - 'general': [ - (r'%.*?\n', Comment), - (r'[{}]', Name.Builtin), - (r'[&_^]', Name.Builtin), - ], - 'root': [ - (r'\\\[', String.Backtick, 'displaymath'), - (r'\\\(', String, 'inlinemath'), - (r'\$\$', String.Backtick, 'displaymath'), - (r'\$', String, 'inlinemath'), - (r'\\([a-zA-Z]+|.)', Keyword, 'command'), - include('general'), - (r'[^\\$%&_^{}]+', Text), - ], - 'math': [ - (r'\\([a-zA-Z]+|.)', Name.Variable), - include('general'), - (r'[0-9]+', Number), - (r'[-=!+*/()\[\]]', Operator), - (r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin), - ], - 'inlinemath': [ - (r'\\\)', String, '#pop'), - (r'\$', String, '#pop'), - include('math'), - ], - 'displaymath': [ - (r'\\\]', String, '#pop'), - (r'\$\$', String, '#pop'), - (r'\$', Name.Builtin), - include('math'), - ], - 'command': [ - (r'\[.*?\]', Name.Attribute), - (r'\*', Keyword), - (r'', Text, '#pop'), - ], - } - - def analyse_text(text): - for start in ("\\documentclass", "\\input", "\\documentstyle", - "\\relax"): - if text[:len(start)] == start: - return True - - -class GroffLexer(RegexLexer): - """ - Lexer for the (g)roff typesetting language, supporting groff - extensions. Mainly useful for highlighting manpage sources. - - *New in Pygments 0.6.* - """ - - name = 'Groff' - aliases = ['groff', 'nroff', 'man'] - filenames = ['*.[1234567]', '*.man'] - mimetypes = ['application/x-troff', 'text/troff'] - - tokens = { - 'root': [ - (r'(\.)(\w+)', bygroups(Text, Keyword), 'request'), - (r'\.', Punctuation, 'request'), - # Regular characters, slurp till we find a backslash or newline - (r'[^\\\n]*', Text, 'textline'), - ], - 'textline': [ - include('escapes'), - (r'[^\\\n]+', Text), - (r'\n', Text, '#pop'), - ], - 'escapes': [ - # groff has many ways to write escapes. - (r'\\"[^\n]*', Comment), - (r'\\[fn]\w', String.Escape), - (r'\\\(.{2}', String.Escape), - (r'\\.\[.*\]', String.Escape), - (r'\\.', String.Escape), - (r'\\\n', Text, 'request'), - ], - 'request': [ - (r'\n', Text, '#pop'), - include('escapes'), - (r'"[^\n"]+"', String.Double), - (r'\d+', Number), - (r'\S+', String), - (r'\s+', Text), - ], - } - - def analyse_text(text): - if text[:1] != '.': - return False - if text[:3] == '.\\"': - return True - if text[:4] == '.TH ': - return True - if text[1:3].isalnum() and text[3].isspace(): - return 0.9 - - -class ApacheConfLexer(RegexLexer): - """ - Lexer for configuration files following the Apache config file - format. - - *New in Pygments 0.6.* - """ - - name = 'ApacheConf' - aliases = ['apacheconf', 'aconf', 'apache'] - filenames = ['.htaccess', 'apache.conf', 'apache2.conf'] - mimetypes = ['text/x-apacheconf'] - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'\s+', Text), - (r'(#.*?)$', Comment), - (r'(<[^\s>]+)(?:(\s+)(.*?))?(>)', - bygroups(Name.Tag, Text, String, Name.Tag)), - (r'([a-zA-Z][a-zA-Z0-9_]*)(\s+)', - bygroups(Name.Builtin, Text), 'value'), - (r'\.+', Text), - ], - 'value': [ - (r'$', Text, '#pop'), - (r'[^\S\n]+', Text), - (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), - (r'\d+', Number), - (r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other), - (r'(on|off|none|any|all|double|email|dns|min|minimal|' - r'os|productonly|full|emerg|alert|crit|error|warn|' - r'notice|info|debug|registry|script|inetd|standalone|' - r'user|group)\b', Keyword), - (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), - (r'[^\s"]+', Text) - ] - } - - -class MoinWikiLexer(RegexLexer): - """ - For MoinMoin (and Trac) Wiki markup. - - *New in Pygments 0.7.* - """ - - name = 'MoinMoin/Trac Wiki markup' - aliases = ['trac-wiki', 'moin'] - filenames = [] - mimetypes = ['text/x-trac-wiki'] - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'^#.*$', Comment), - (r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next - # Titles - (r'^(=+)([^=]+)(=+)(\s*#.+)?$', - bygroups(Generic.Heading, using(this), Generic.Heading, String)), - # Literal code blocks, with optional shebang - (r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'), - (r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting - # Lists - (r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)), - (r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)), - # Other Formatting - (r'\[\[\w+.*?\]\]', Keyword), # Macro - (r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])', - bygroups(Keyword, String, Keyword)), # Link - (r'^----+$', Keyword), # Horizontal rules - (r'[^\n\'\[{!_~^,|]+', Text), - (r'\n', Text), - (r'.', Text), - ], - 'codeblock': [ - (r'}}}', Name.Builtin, '#pop'), - # these blocks are allowed to be nested in Trac, but not MoinMoin - (r'{{{', Text, '#push'), - (r'[^{}]+', Comment.Preproc), # slurp boring text - (r'.', Comment.Preproc), # allow loose { or } - ], - } - - -class RstLexer(RegexLexer): - """ - For `reStructuredText `_ markup. - - *New in Pygments 0.7.* - - Additional options accepted: - - `handlecodeblocks` - Highlight the contents of ``.. sourcecode:: langauge`` and - ``.. code:: language`` directives with a lexer for the given - language (default: ``True``). *New in Pygments 0.8.* - """ - name = 'reStructuredText' - aliases = ['rst', 'rest', 'restructuredtext'] - filenames = ['*.rst', '*.rest'] - mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"] - flags = re.MULTILINE - - def _handle_sourcecode(self, match): - from pygments.lexers import get_lexer_by_name - - # section header - yield match.start(1), Punctuation, match.group(1) - yield match.start(2), Text, match.group(2) - yield match.start(3), Operator.Word, match.group(3) - yield match.start(4), Punctuation, match.group(4) - yield match.start(5), Text, match.group(5) - yield match.start(6), Keyword, match.group(6) - yield match.start(7), Text, match.group(7) - - # lookup lexer if wanted and existing - lexer = None - if self.handlecodeblocks: - try: - lexer = get_lexer_by_name(match.group(6).strip()) - except ClassNotFound: - pass - indention = match.group(8) - indention_size = len(indention) - code = (indention + match.group(9) + match.group(10) + match.group(11)) - - # no lexer for this language. handle it like it was a code block - if lexer is None: - yield match.start(8), String, code - return - - # highlight the lines with the lexer. - ins = [] - codelines = code.splitlines(True) - code = '' - for line in codelines: - if len(line) > indention_size: - ins.append((len(code), [(0, Text, line[:indention_size])])) - code += line[indention_size:] - else: - code += line - for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)): - yield item - - # from docutils.parsers.rst.states - closers = '\'")]}>\u2019\u201d\xbb!?' - unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0' - end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))' - % (re.escape(unicode_delimiters), - re.escape(closers))) - - tokens = { - 'root': [ - # Heading with overline - (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)' - r'(.+)(\n)(\1)(\n)', - bygroups(Generic.Heading, Text, Generic.Heading, - Text, Generic.Heading, Text)), - # Plain heading - (r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|' - r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)', - bygroups(Generic.Heading, Text, Generic.Heading, Text)), - # Bulleted lists - (r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)', - bygroups(Text, Number, using(this, state='inline'))), - # Numbered lists - (r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)', - bygroups(Text, Number, using(this, state='inline'))), - (r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)', - bygroups(Text, Number, using(this, state='inline'))), - # Numbered, but keep words at BOL from becoming lists - (r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)', - bygroups(Text, Number, using(this, state='inline'))), - (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)', - bygroups(Text, Number, using(this, state='inline'))), - # Line blocks - (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)', - bygroups(Text, Operator, using(this, state='inline'))), - # Sourcecode directives - (r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)' - r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)', - _handle_sourcecode), - # A directive - (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', - bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, - using(this, state='inline'))), - # A reference target - (r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$', - bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), - # A footnote/citation target - (r'^( *\.\.)(\s*)(\[.+\])(.*?)$', - bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), - # A substitution def - (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', - bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word, - Punctuation, Text, using(this, state='inline'))), - # Comments - (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc), - # Field list - (r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)), - (r'^( *)(:.*?:)([ \t]+)(.*?)$', - bygroups(Text, Name.Class, Text, Name.Function)), - # Definition list - (r'^([^ ].*(?)(`__?)', # reference with inline target - bygroups(String, String.Interpol, String)), - (r'`.+?`__?', String), # reference - (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?', - bygroups(Name.Variable, Name.Attribute)), # role - (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)', - bygroups(Name.Attribute, Name.Variable)), # role (content first) - (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis - (r'\*.+?\*', Generic.Emph), # Emphasis - (r'\[.*?\]_', String), # Footnote or citation - (r'<.+?>', Name.Tag), # Hyperlink - (r'[^\\\n\[*`:]+', Text), - (r'.', Text), - ], - 'literal': [ - (r'[^`]+', String), - (r'``' + end_string_suffix, String, '#pop'), - (r'`', String), - ] - } - - def __init__(self, **options): - self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) - RegexLexer.__init__(self, **options) - - def analyse_text(text): - if text[:2] == '..' and text[2:3] != '.': - return 0.3 - p1 = text.find("\n") - p2 = text.find("\n", p1 + 1) - if (p2 > -1 and # has two lines - p1 * 2 + 1 == p2 and # they are the same length - text[p1+1] in '-=' and # the next line both starts and ends with - text[p1+1] == text[p2-1]): # ...a sufficiently high header - return 0.5 - - -class VimLexer(RegexLexer): - """ - Lexer for VimL script files. - - *New in Pygments 0.8.* - """ - name = 'VimL' - aliases = ['vim'] - filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc', - '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'] - mimetypes = ['text/x-vim'] - flags = re.MULTILINE - - tokens = { - 'root': [ - (r'^\s*".*', Comment), - - (r'[ \t]+', Text), - # TODO: regexes can have other delims - (r'/(\\\\|\\/|[^\n/])*/', String.Regex), - (r'"(\\\\|\\"|[^\n"])*"', String.Double), - (r"'(\\\\|\\'|[^\n'])*'", String.Single), - - # Who decided that doublequote was a good comment character?? - (r'(?<=\s)"[^\-:.%#=*].*', Comment), - (r'-?\d+', Number), - (r'#[0-9a-f]{6}', Number.Hex), - (r'^:', Punctuation), - (r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent. - (r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b', - Keyword), - (r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin), - (r'\b\w+\b', Name.Other), # These are postprocessed below - (r'.', Text), - ], - } - def __init__(self, **options): - from pygments.lexers._vimbuiltins import command, option, auto - self._cmd = command - self._opt = option - self._aut = auto - - RegexLexer.__init__(self, **options) - - def is_in(self, w, mapping): - r""" - It's kind of difficult to decide if something might be a keyword - in VimL because it allows you to abbreviate them. In fact, - 'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are - valid ways to call it so rather than making really awful regexps - like:: - - \bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b - - we match `\b\w+\b` and then call is_in() on those tokens. See - `scripts/get_vimkw.py` for how the lists are extracted. - """ - p = bisect(mapping, (w,)) - if p > 0: - if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \ - mapping[p-1][1][:len(w)] == w: return True - if p < len(mapping): - return mapping[p][0] == w[:len(mapping[p][0])] and \ - mapping[p][1][:len(w)] == w - return False - - def get_tokens_unprocessed(self, text): - # TODO: builtins are only subsequent tokens on lines - # and 'keywords' only happen at the beginning except - # for :au ones - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - if token is Name.Other: - if self.is_in(value, self._cmd): - yield index, Keyword, value - elif self.is_in(value, self._opt) or \ - self.is_in(value, self._aut): - yield index, Name.Builtin, value - else: - yield index, Text, value - else: - yield index, token, value - - -class GettextLexer(RegexLexer): - """ - Lexer for Gettext catalog files. - - *New in Pygments 0.9.* - """ - name = 'Gettext Catalog' - aliases = ['pot', 'po'] - filenames = ['*.pot', '*.po'] - mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext'] - - tokens = { - 'root': [ - (r'^#,\s.*?$', Keyword.Type), - (r'^#:\s.*?$', Keyword.Declaration), - #(r'^#$', Comment), - (r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single), - (r'^(")([A-Za-z-]+:)(.*")$', - bygroups(String, Name.Property, String)), - (r'^".*"$', String), - (r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$', - bygroups(Name.Variable, Text, String)), - (r'^(msgstr\[)(\d)(\])(\s+)(".*")$', - bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)), - ] - } - - -class SquidConfLexer(RegexLexer): - """ - Lexer for `squid `_ configuration files. - - *New in Pygments 0.9.* - """ - - name = 'SquidConf' - aliases = ['squidconf', 'squid.conf', 'squid'] - filenames = ['squid.conf'] - mimetypes = ['text/x-squidconf'] - flags = re.IGNORECASE - - keywords = [ - "access_log", "acl", "always_direct", "announce_host", - "announce_period", "announce_port", "announce_to", "anonymize_headers", - "append_domain", "as_whois_server", "auth_param_basic", - "authenticate_children", "authenticate_program", "authenticate_ttl", - "broken_posts", "buffered_logs", "cache_access_log", "cache_announce", - "cache_dir", "cache_dns_program", "cache_effective_group", - "cache_effective_user", "cache_host", "cache_host_acl", - "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high", - "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer", - "cache_peer_access", "cahce_replacement_policy", "cache_stoplist", - "cache_stoplist_pattern", "cache_store_log", "cache_swap", - "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db", - "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir", - "dead_peer_timeout", "debug_options", "delay_access", "delay_class", - "delay_initial_bucket_level", "delay_parameters", "delay_pools", - "deny_info", "dns_children", "dns_defnames", "dns_nameservers", - "dns_testnames", "emulate_httpd_log", "err_html_text", - "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port", - "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width", - "ftp_passive", "ftp_user", "half_closed_clients", "header_access", - "header_replace", "hierarchy_stoplist", "high_response_time_warning", - "high_page_fault_warning", "hosts_file", "htcp_port", "http_access", - "http_anonymizer", "httpd_accel", "httpd_accel_host", - "httpd_accel_port", "httpd_accel_uses_host_header", - "httpd_accel_with_proxy", "http_port", "http_reply_access", - "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout", - "ident_lookup", "ident_lookup_access", "ident_timeout", - "incoming_http_average", "incoming_icp_average", "inside_firewall", - "ipcache_high", "ipcache_low", "ipcache_size", "local_domain", - "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries", - "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries", - "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr", - "mcast_miss_encode_key", "mcast_miss_port", "memory_pools", - "memory_pools_limit", "memory_replacement_policy", "mime_table", - "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops", - "minimum_object_size", "minimum_retry_timeout", "miss_access", - "negative_dns_ttl", "negative_ttl", "neighbor_timeout", - "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period", - "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy", - "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl", - "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp", - "quick_abort", "quick_abort", "quick_abort_max", "quick_abort_min", - "quick_abort_pct", "range_offset_limit", "read_timeout", - "redirect_children", "redirect_program", - "redirect_rewrites_host_header", "reference_age", "reference_age", - "refresh_pattern", "reload_into_ims", "request_body_max_size", - "request_size", "request_timeout", "shutdown_lifetime", - "single_parent_bypass", "siteselect_timeout", "snmp_access", - "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy", - "store_avg_object_size", "store_objects_per_bucket", - "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs", - "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize", - "test_reachability", "udp_hit_obj", "udp_hit_obj_size", - "udp_incoming_address", "udp_outgoing_address", "unique_hostname", - "unlinkd_program", "uri_whitespace", "useragent_log", - "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port", - ] - - opts = [ - "proxy-only", "weight", "ttl", "no-query", "default", "round-robin", - "multicast-responder", "on", "off", "all", "deny", "allow", "via", - "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2", - "credentialsttl", "none", "disable", "offline_toggle", "diskd", - ] - - actions = [ - "shutdown", "info", "parameter", "server_list", "client_list", - r'squid\.conf', - ] - - actions_stats = [ - "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns", - "redirector", "io", "reply_headers", "filedescriptors", "netdb", - ] - - actions_log = ["status", "enable", "disable", "clear"] - - acls = [ - "url_regex", "urlpath_regex", "referer_regex", "port", "proto", - "req_mime_type", "rep_mime_type", "method", "browser", "user", "src", - "dst", "time", "dstdomain", "ident", "snmp_community", - ] - - ip_re = ( - r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|' - r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|' - r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|' - r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?`` outputs. - - *New in Pygments 0.9.* - """ - name = 'Debian Control file' - aliases = ['control', 'debcontrol'] - filenames = ['control'] - - tokens = { - 'root': [ - (r'^(Description)', Keyword, 'description'), - (r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'), - (r'^((Build-)?Depends)', Keyword, 'depends'), - (r'^((?:Python-)?Version)(:\s*)(\S+)$', - bygroups(Keyword, Text, Number)), - (r'^((?:Installed-)?Size)(:\s*)(\S+)$', - bygroups(Keyword, Text, Number)), - (r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$', - bygroups(Keyword, Text, Number)), - (r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$', - bygroups(Keyword, Whitespace, String)), - ], - 'maintainer': [ - (r'<[^>]+>', Generic.Strong), - (r'<[^>]+>$', Generic.Strong, '#pop'), - (r',\n?', Text), - (r'.', Text), - ], - 'description': [ - (r'(.*)(Homepage)(: )(\S+)', - bygroups(Text, String, Name, Name.Class)), - (r':.*\n', Generic.Strong), - (r' .*\n', Text), - ('', Text, '#pop'), - ], - 'depends': [ - (r':\s*', Text), - (r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)), - (r'\(', Text, 'depend_vers'), - (r',', Text), - (r'\|', Operator), - (r'[\s]+', Text), - (r'[}\)]\s*$', Text, '#pop'), - (r'}', Text), - (r'[^,]$', Name.Function, '#pop'), - (r'([\+\.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)), - (r'\[.*?\]', Name.Entity), - ], - 'depend_vers': [ - (r'\),', Text, '#pop'), - (r'\)[^,]', Text, '#pop:2'), - (r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number)) - ] - } - - -class YamlLexerContext(LexerContext): - """Indentation context for the YAML lexer.""" - - def __init__(self, *args, **kwds): - super(YamlLexerContext, self).__init__(*args, **kwds) - self.indent_stack = [] - self.indent = -1 - self.next_indent = 0 - self.block_scalar_indent = None - - -class YamlLexer(ExtendedRegexLexer): - """ - Lexer for `YAML `_, a human-friendly data serialization - language. - - *New in Pygments 0.11.* - """ - - name = 'YAML' - aliases = ['yaml'] - filenames = ['*.yaml', '*.yml'] - mimetypes = ['text/x-yaml'] - - - def something(token_class): - """Do not produce empty tokens.""" - def callback(lexer, match, context): - text = match.group() - if not text: - return - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def reset_indent(token_class): - """Reset the indentation levels.""" - def callback(lexer, match, context): - text = match.group() - context.indent_stack = [] - context.indent = -1 - context.next_indent = 0 - context.block_scalar_indent = None - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def save_indent(token_class, start=False): - """Save a possible indentation level.""" - def callback(lexer, match, context): - text = match.group() - extra = '' - if start: - context.next_indent = len(text) - if context.next_indent < context.indent: - while context.next_indent < context.indent: - context.indent = context.indent_stack.pop() - if context.next_indent > context.indent: - extra = text[context.indent:] - text = text[:context.indent] - else: - context.next_indent += len(text) - if text: - yield match.start(), token_class, text - if extra: - yield match.start()+len(text), token_class.Error, extra - context.pos = match.end() - return callback - - def set_indent(token_class, implicit=False): - """Set the previously saved indentation level.""" - def callback(lexer, match, context): - text = match.group() - if context.indent < context.next_indent: - context.indent_stack.append(context.indent) - context.indent = context.next_indent - if not implicit: - context.next_indent += len(text) - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def set_block_scalar_indent(token_class): - """Set an explicit indentation level for a block scalar.""" - def callback(lexer, match, context): - text = match.group() - context.block_scalar_indent = None - if not text: - return - increment = match.group(1) - if increment: - current_indent = max(context.indent, 0) - increment = int(increment) - context.block_scalar_indent = current_indent + increment - if text: - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def parse_block_scalar_empty_line(indent_token_class, content_token_class): - """Process an empty line in a block scalar.""" - def callback(lexer, match, context): - text = match.group() - if (context.block_scalar_indent is None or - len(text) <= context.block_scalar_indent): - if text: - yield match.start(), indent_token_class, text - else: - indentation = text[:context.block_scalar_indent] - content = text[context.block_scalar_indent:] - yield match.start(), indent_token_class, indentation - yield (match.start()+context.block_scalar_indent, - content_token_class, content) - context.pos = match.end() - return callback - - def parse_block_scalar_indent(token_class): - """Process indentation spaces in a block scalar.""" - def callback(lexer, match, context): - text = match.group() - if context.block_scalar_indent is None: - if len(text) <= max(context.indent, 0): - context.stack.pop() - context.stack.pop() - return - context.block_scalar_indent = len(text) - else: - if len(text) < context.block_scalar_indent: - context.stack.pop() - context.stack.pop() - return - if text: - yield match.start(), token_class, text - context.pos = match.end() - return callback - - def parse_plain_scalar_indent(token_class): - """Process indentation spaces in a plain scalar.""" - def callback(lexer, match, context): - text = match.group() - if len(text) <= context.indent: - context.stack.pop() - context.stack.pop() - return - if text: - yield match.start(), token_class, text - context.pos = match.end() - return callback - - - - tokens = { - # the root rules - 'root': [ - # ignored whitespaces - (r'[ ]+(?=#|$)', Text), - # line breaks - (r'\n+', Text), - # a comment - (r'#[^\n]*', Comment.Single), - # the '%YAML' directive - (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'), - # the %TAG directive - (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'), - # document start and document end indicators - (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace), - 'block-line'), - # indentation spaces - (r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True), - ('block-line', 'indentation')), - ], - - # trailing whitespaces after directives or a block scalar indicator - 'ignored-line': [ - # ignored whitespaces - (r'[ ]+(?=#|$)', Text), - # a comment - (r'#[^\n]*', Comment.Single), - # line break - (r'\n', Text, '#pop:2'), - ], - - # the %YAML directive - 'yaml-directive': [ - # the version number - (r'([ ]+)([0-9]+\.[0-9]+)', - bygroups(Text, Number), 'ignored-line'), - ], - - # the %YAG directive - 'tag-directive': [ - # a tag handle and the corresponding prefix - (r'([ ]+)(!|![0-9A-Za-z_-]*!)' - r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)', - bygroups(Text, Keyword.Type, Text, Keyword.Type), - 'ignored-line'), - ], - - # block scalar indicators and indentation spaces - 'indentation': [ - # trailing whitespaces are ignored - (r'[ ]*$', something(Text), '#pop:2'), - # whitespaces preceeding block collection indicators - (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)), - # block collection indicators - (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)), - # the beginning a block line - (r'[ ]*', save_indent(Text), '#pop'), - ], - - # an indented line in the block context - 'block-line': [ - # the line end - (r'[ ]*(?=#|$)', something(Text), '#pop'), - # whitespaces separating tokens - (r'[ ]+', Text), - # tags, anchors and aliases, - include('descriptors'), - # block collections and scalars - include('block-nodes'), - # flow collections and quoted scalars - include('flow-nodes'), - # a plain scalar - (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])', - something(Name.Variable), - 'plain-scalar-in-block-context'), - ], - - # tags, anchors, aliases - 'descriptors' : [ - # a full-form tag - (r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type), - # a tag in the form '!', '!suffix' or '!handle!suffix' - (r'!(?:[0-9A-Za-z_-]+)?' - r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type), - # an anchor - (r'&[0-9A-Za-z_-]+', Name.Label), - # an alias - (r'\*[0-9A-Za-z_-]+', Name.Variable), - ], - - # block collections and scalars - 'block-nodes': [ - # implicit key - (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)), - # literal and folded scalars - (r'[|>]', Punctuation.Indicator, - ('block-scalar-content', 'block-scalar-header')), - ], - - # flow collections and quoted scalars - 'flow-nodes': [ - # a flow sequence - (r'\[', Punctuation.Indicator, 'flow-sequence'), - # a flow mapping - (r'\{', Punctuation.Indicator, 'flow-mapping'), - # a single-quoted scalar - (r'\'', String, 'single-quoted-scalar'), - # a double-quoted scalar - (r'\"', String, 'double-quoted-scalar'), - ], - - # the content of a flow collection - 'flow-collection': [ - # whitespaces - (r'[ ]+', Text), - # line breaks - (r'\n+', Text), - # a comment - (r'#[^\n]*', Comment.Single), - # simple indicators - (r'[?:,]', Punctuation.Indicator), - # tags, anchors and aliases - include('descriptors'), - # nested collections and quoted scalars - include('flow-nodes'), - # a plain scalar - (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])', - something(Name.Variable), - 'plain-scalar-in-flow-context'), - ], - - # a flow sequence indicated by '[' and ']' - 'flow-sequence': [ - # include flow collection rules - include('flow-collection'), - # the closing indicator - (r'\]', Punctuation.Indicator, '#pop'), - ], - - # a flow mapping indicated by '{' and '}' - 'flow-mapping': [ - # include flow collection rules - include('flow-collection'), - # the closing indicator - (r'\}', Punctuation.Indicator, '#pop'), - ], - - # block scalar lines - 'block-scalar-content': [ - # line break - (r'\n', Text), - # empty line - (r'^[ ]+$', - parse_block_scalar_empty_line(Text, Name.Constant)), - # indentation spaces (we may leave the state here) - (r'^[ ]*', parse_block_scalar_indent(Text)), - # line content - (r'[^\n\r\f\v]+', Name.Constant), - ], - - # the content of a literal or folded scalar - 'block-scalar-header': [ - # indentation indicator followed by chomping flag - (r'([1-9])?[+-]?(?=[ ]|$)', - set_block_scalar_indent(Punctuation.Indicator), - 'ignored-line'), - # chomping flag followed by indentation indicator - (r'[+-]?([1-9])?(?=[ ]|$)', - set_block_scalar_indent(Punctuation.Indicator), - 'ignored-line'), - ], - - # ignored and regular whitespaces in quoted scalars - 'quoted-scalar-whitespaces': [ - # leading and trailing whitespaces are ignored - (r'^[ ]+', Text), - (r'[ ]+$', Text), - # line breaks are ignored - (r'\n+', Text), - # other whitespaces are a part of the value - (r'[ ]+', Name.Variable), - ], - - # single-quoted scalars - 'single-quoted-scalar': [ - # include whitespace and line break rules - include('quoted-scalar-whitespaces'), - # escaping of the quote character - (r'\'\'', String.Escape), - # regular non-whitespace characters - (r'[^ \t\n\r\f\v\']+', String), - # the closing quote - (r'\'', String, '#pop'), - ], - - # double-quoted scalars - 'double-quoted-scalar': [ - # include whitespace and line break rules - include('quoted-scalar-whitespaces'), - # escaping of special characters - (r'\\[0abt\tn\nvfre "\\N_LP]', String), - # escape codes - (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})', - String.Escape), - # regular non-whitespace characters - (r'[^ \t\n\r\f\v\"\\]+', String), - # the closing quote - (r'"', String, '#pop'), - ], - - # the beginning of a new line while scanning a plain scalar - 'plain-scalar-in-block-context-new-line': [ - # empty lines - (r'^[ ]+$', Text), - # line breaks - (r'\n+', Text), - # document start and document end indicators - (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'), - # indentation spaces (we may leave the block line state here) - (r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'), - ], - - # a plain scalar in the block context - 'plain-scalar-in-block-context': [ - # the scalar ends with the ':' indicator - (r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'), - # the scalar ends with whitespaces followed by a comment - (r'[ ]+(?=#)', Text, '#pop'), - # trailing whitespaces are ignored - (r'[ ]+$', Text), - # line breaks are ignored - (r'\n+', Text, 'plain-scalar-in-block-context-new-line'), - # other whitespaces are a part of the value - (r'[ ]+', Literal.Scalar.Plain), - # regular non-whitespace characters - (r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain), - ], - - # a plain scalar is the flow context - 'plain-scalar-in-flow-context': [ - # the scalar ends with an indicator character - (r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'), - # the scalar ends with a comment - (r'[ ]+(?=#)', Text, '#pop'), - # leading and trailing whitespaces are ignored - (r'^[ ]+', Text), - (r'[ ]+$', Text), - # line breaks are ignored - (r'\n+', Text), - # other whitespaces are a part of the value - (r'[ ]+', Name.Variable), - # regular non-whitespace characters - (r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable), - ], - - } - - def get_tokens_unprocessed(self, text=None, context=None): - if context is None: - context = YamlLexerContext(text, 0) - return super(YamlLexer, self).get_tokens_unprocessed(text, context) - - -class LighttpdConfLexer(RegexLexer): - """ - Lexer for `Lighttpd `_ configuration files. - - *New in Pygments 0.11.* - """ - name = 'Lighttpd configuration file' - aliases = ['lighty', 'lighttpd'] - filenames = [] - mimetypes = ['text/x-lighttpd-conf'] - - tokens = { - 'root': [ - (r'#.*\n', Comment.Single), - (r'/\S*', Name), # pathname - (r'[a-zA-Z._-]+', Keyword), - (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), - (r'[0-9]+', Number), - (r'=>|=~|\+=|==|=|\+', Operator), - (r'\$[A-Z]+', Name.Builtin), - (r'[(){}\[\],]', Punctuation), - (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), - (r'\s+', Text), - ], - - } - - -class NginxConfLexer(RegexLexer): - """ - Lexer for `Nginx `_ configuration files. - - *New in Pygments 0.11.* - """ - name = 'Nginx configuration file' - aliases = ['nginx'] - filenames = [] - mimetypes = ['text/x-nginx-conf'] - - tokens = { - 'root': [ - (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)), - (r'[^\s;#]+', Keyword, 'stmt'), - include('base'), - ], - 'block': [ - (r'}', Punctuation, '#pop:2'), - (r'[^\s;#]+', Keyword.Namespace, 'stmt'), - include('base'), - ], - 'stmt': [ - (r'{', Punctuation, 'block'), - (r';', Punctuation, '#pop'), - include('base'), - ], - 'base': [ - (r'#.*\n', Comment.Single), - (r'on|off', Name.Constant), - (r'\$[^\s;#()]+', Name.Variable), - (r'([a-z0-9.-]+)(:)([0-9]+)', - bygroups(Name, Punctuation, Number.Integer)), - (r'[a-z-]+/[a-z-+]+', String), # mimetype - #(r'[a-zA-Z._-]+', Keyword), - (r'[0-9]+[km]?\b', Number.Integer), - (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)), - (r'[:=~]', Punctuation), - (r'[^\s;#{}$]+', String), # catch all - (r'/[^\s;#]*', Name), # pathname - (r'\s+', Text), - (r'[$;]', Text), # leftover characters - ], - } - - -class CMakeLexer(RegexLexer): - """ - Lexer for `CMake `_ files. - - *New in Pygments 1.2.* - """ - name = 'CMake' - aliases = ['cmake'] - filenames = ['*.cmake', 'CMakeLists.txt'] - mimetypes = ['text/x-cmake'] - - tokens = { - 'root': [ - #(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|' - # r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|' - # r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|' - # r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|' - # r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|' - # r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|' - # r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|' - # r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|' - # r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|' - # r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|' - # r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|' - # r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|' - # r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|' - # r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|' - # r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|' - # r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|' - # r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|' - # r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|' - # r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|' - # r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|' - # r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|' - # r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|' - # r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|' - # r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|' - # r'VTK_WRAP_TCL|WHILE|WRITE_FILE|' - # r'COUNTARGS)\b', Name.Builtin, 'args'), - (r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Text, - Punctuation), 'args'), - include('keywords'), - include('ws') - ], - 'args': [ - (r'\(', Punctuation, '#push'), - (r'\)', Punctuation, '#pop'), - (r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)), - (r'(?s)".*?"', String.Double), - (r'\\\S+', String), - (r'[^\)$"# \t\n]+', String), - (r'\n', Text), # explicitly legal - include('keywords'), - include('ws') - ], - 'string': [ - - ], - 'keywords': [ - (r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|' - r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword), - ], - 'ws': [ - (r'[ \t]+', Text), - (r'#.+\n', Comment), - ] - } - - -class HttpLexer(RegexLexer): - """ - Lexer for HTTP sessions. - - *New in Pygments 1.5.* - """ - - name = 'HTTP' - aliases = ['http'] - - flags = re.DOTALL - - def header_callback(self, match): - if match.group(1).lower() == 'content-type': - content_type = match.group(5).strip() - if ';' in content_type: - content_type = content_type[:content_type.find(';')].strip() - self.content_type = content_type - yield match.start(1), Name.Attribute, match.group(1) - yield match.start(2), Text, match.group(2) - yield match.start(3), Operator, match.group(3) - yield match.start(4), Text, match.group(4) - yield match.start(5), Literal, match.group(5) - yield match.start(6), Text, match.group(6) - - def continuous_header_callback(self, match): - yield match.start(1), Text, match.group(1) - yield match.start(2), Literal, match.group(2) - yield match.start(3), Text, match.group(3) - - def content_callback(self, match): - content_type = getattr(self, 'content_type', None) - content = match.group() - offset = match.start() - if content_type: - from pygments.lexers import get_lexer_for_mimetype - try: - lexer = get_lexer_for_mimetype(content_type) - except ClassNotFound: - pass - else: - for idx, token, value in lexer.get_tokens_unprocessed(content): - yield offset + idx, token, value - return - yield offset, Text, content - - tokens = { - 'root': [ - (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)' - r'(HTTP)(/)(1\.[01])(\r?\n|$)', - bygroups(Name.Function, Text, Name.Namespace, Text, - Keyword.Reserved, Operator, Number, Text), - 'headers'), - (r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)', - bygroups(Keyword.Reserved, Operator, Number, Text, Number, - Text, Name.Exception, Text), - 'headers'), - ], - 'headers': [ - (r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)', header_callback), - (r'([\t ]+)([^\r\n]+)(\r?\n|$)', continuous_header_callback), - (r'\r?\n', Text, 'content') - ], - 'content': [ - (r'.+', content_callback) - ] - } - - -class PyPyLogLexer(RegexLexer): - """ - Lexer for PyPy log files. - - *New in Pygments 1.5.* - """ - name = "PyPy Log" - aliases = ["pypylog", "pypy"] - filenames = ["*.pypylog"] - mimetypes = ['application/x-pypylog'] - - tokens = { - "root": [ - (r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"), - (r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"), - include("extra-stuff"), - ], - "jit-log": [ - (r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"), - (r"^\+\d+: ", Comment), - (r"--end of the loop--", Comment), - (r"[ifp]\d+", Name), - (r"ptr\d+", Name), - (r"(\()(\w+(?:\.\w+)?)(\))", - bygroups(Punctuation, Name.Builtin, Punctuation)), - (r"[\[\]=,()]", Punctuation), - (r"(\d+\.\d+|inf|-inf)", Number.Float), - (r"-?\d+", Number.Integer), - (r"'.*'", String), - (r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name), - (r"<.*?>+", Name.Builtin), - (r"(label|debug_merge_point|jump|finish)", Name.Class), - (r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|" - r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|" - r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|" - r"int_is_true|" - r"uint_floordiv|uint_ge|uint_lt|" - r"float_add|float_sub|float_mul|float_truediv|float_neg|" - r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|" - r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|" - r"cast_int_to_float|cast_float_to_int|" - r"force_token|quasiimmut_field|same_as|virtual_ref_finish|" - r"virtual_ref|mark_opaque_ptr|" - r"call_may_force|call_assembler|call_loopinvariant|" - r"call_release_gil|call_pure|call|" - r"new_with_vtable|new_array|newstr|newunicode|new|" - r"arraylen_gc|" - r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|" - r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|" - r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|" - r"getfield_raw|setfield_gc|setfield_raw|" - r"strgetitem|strsetitem|strlen|copystrcontent|" - r"unicodegetitem|unicodesetitem|unicodelen|" - r"guard_true|guard_false|guard_value|guard_isnull|" - r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|" - r"guard_not_forced|guard_no_exception|guard_not_invalidated)", - Name.Builtin), - include("extra-stuff"), - ], - "jit-backend-counts": [ - (r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"), - (r":", Punctuation), - (r"\d+", Number), - include("extra-stuff"), - ], - "extra-stuff": [ - (r"\s+", Text), - (r"#.*?$", Comment), - ], - } - - -class HxmlLexer(RegexLexer): - """ - Lexer for `haXe build `_ files. - - *New in Pygments 1.6.* - """ - name = 'Hxml' - aliases = ['haxeml', 'hxml'] - filenames = ['*.hxml'] - - tokens = { - 'root': [ - # Seperator - (r'(--)(next)', bygroups(Punctuation, Generic.Heading)), - # Compiler switches with one dash - (r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)), - # Compilerswitches with two dashes - (r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|' - r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)), - # Targets and other options that take an argument - (r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|' - r'cp|cmd)( +)(.+)', - bygroups(Punctuation, Keyword, Whitespace, String)), - # Options that take only numerical arguments - (r'(-)(swf-version)( +)(\d+)', - bygroups(Punctuation, Keyword, Number.Integer)), - # An Option that defines the size, the fps and the background - # color of an flash movie - (r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})', - bygroups(Punctuation, Keyword, Whitespace, Number.Integer, - Punctuation, Number.Integer, Punctuation, Number.Integer, - Punctuation, Number.Hex)), - # options with two dashes that takes arguments - (r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)' - r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)), - # Single line comment, multiline ones are not allowed. - (r'#.*', Comment.Single) - ] - } - - -class EbnfLexer(RegexLexer): - """ - Lexer for `ISO/IEC 14977 EBNF - `_ - grammars. - - *New in Pygments 1.7.* - """ - - name = 'EBNF' - aliases = ['ebnf'] - filenames = ['*.ebnf'] - mimetypes = ['text/x-ebnf'] - - tokens = { - 'root': [ - include('whitespace'), - include('comment_start'), - include('identifier'), - (r'=', Operator, 'production'), - ], - 'production': [ - include('whitespace'), - include('comment_start'), - include('identifier'), - (r'"[^"]*"', String.Double), - (r"'[^']*'", String.Single), - (r'(\?[^?]*\?)', Name.Entity), - (r'[\[\]{}(),|]', Punctuation), - (r'-', Operator), - (r';', Punctuation, '#pop'), - ], - 'whitespace': [ - (r'\s+', Text), - ], - 'comment_start': [ - (r'\(\*', Comment.Multiline, 'comment'), - ], - 'comment': [ - (r'[^*)]', Comment.Multiline), - include('comment_start'), - (r'\*\)', Comment.Multiline, '#pop'), - (r'[*)]', Comment.Multiline), - ], - 'identifier': [ - (r'([a-zA-Z][a-zA-Z0-9 \-]*)', Keyword), - ], - } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/web.py b/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/web.py deleted file mode 100644 index 327f426..0000000 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/web.py +++ /dev/null @@ -1,4045 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.web - ~~~~~~~~~~~~~~~~~~~ - - Lexers for web-related languages and markup. - - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -import copy - -from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \ - include, this -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Other, Punctuation, Literal -from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \ - html_doctype_matches, unirange -from pygments.lexers.agile import RubyLexer -from pygments.lexers.compiled import ScalaLexer - - -__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer', - 'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer', - 'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer', - 'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer', - 'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer', - 'DtdLexer', 'DartLexer', 'LassoLexer', 'QmlLexer', 'TypeScriptLexer'] - - -class JavascriptLexer(RegexLexer): - """ - For JavaScript source code. - """ - - name = 'JavaScript' - aliases = ['js', 'javascript'] - filenames = ['*.js', ] - mimetypes = ['application/javascript', 'application/x-javascript', - 'text/x-javascript', 'text/javascript', ] - - flags = re.DOTALL - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'', Comment, '#pop'), - ('-', Comment), - ], - 'tag': [ - (r'\s+', Text), - (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'), - (r'[a-zA-Z0-9_:-]+', Name.Attribute), - (r'/?\s*>', Name.Tag, '#pop'), - ], - 'script-content': [ - (r'<\s*/\s*script\s*>', Name.Tag, '#pop'), - (r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)), - ], - 'style-content': [ - (r'<\s*/\s*style\s*>', Name.Tag, '#pop'), - (r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)), - ], - 'attr': [ - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - def analyse_text(text): - if html_doctype_matches(text): - return 0.5 - - -class PhpLexer(RegexLexer): - """ - For `PHP `_ source code. - For PHP embedded in HTML, use the `HtmlPhpLexer`. - - Additional options accepted: - - `startinline` - If given and ``True`` the lexer starts highlighting with - php code (i.e.: no starting ``>> from pygments.lexers._phpbuiltins import MODULES - >>> MODULES.keys() - ['PHP Options/Info', 'Zip', 'dba', ...] - - In fact the names of those modules match the module names from - the php documentation. - """ - - name = 'PHP' - aliases = ['php', 'php3', 'php4', 'php5'] - filenames = ['*.php', '*.php[345]', '*.inc'] - mimetypes = ['text/x-php'] - - flags = re.IGNORECASE | re.DOTALL | re.MULTILINE - tokens = { - 'root': [ - (r'<\?(php)?', Comment.Preproc, 'php'), - (r'[^<]+', Other), - (r'<', Other) - ], - 'php': [ - (r'\?>', Comment.Preproc, '#pop'), - (r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String), - (r'\s+', Text), - (r'#.*?\n', Comment.Single), - (r'//.*?\n', Comment.Single), - # put the empty comment here, it is otherwise seen as - # the start of a docstring - (r'/\*\*/', Comment.Multiline), - (r'/\*\*.*?\*/', String.Doc), - (r'/\*.*?\*/', Comment.Multiline), - (r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Operator, Text, Name.Attribute)), - (r'[~!%^&*+=|:.<>/?@-]+', Operator), - (r'[\[\]{}();,]+', Punctuation), - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)), - (r'(function)(\s+)(&?)(\s*)', - bygroups(Keyword, Text, Operator, Text), 'functionname'), - (r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', - bygroups(Keyword, Text, Name.Constant)), - (r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|' - r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|' - r'FALSE|print|for|require|continue|foreach|require_once|' - r'declare|return|default|static|do|switch|die|stdClass|' - r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|' - r'virtual|endfor|include_once|while|endforeach|global|__FILE__|' - r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|' - r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|' - r'implements|public|private|protected|abstract|clone|try|' - r'catch|throw|this|use|namespace|trait)\b', Keyword), - (r'(true|false|null)\b', Keyword.Constant), - (r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable), - (r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable), - (r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other), - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+[eE][+-]?[0-9]+', Number.Float), - (r'0[0-7]+', Number.Oct), - (r'0[xX][a-fA-F0-9]+', Number.Hex), - (r'\d+', Number.Integer), - (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single), - (r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick), - (r'"', String.Double, 'string'), - ], - 'classname': [ - (r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop') - ], - 'functionname': [ - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop') - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'[^{$"\\]+', String.Double), - (r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape), - (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?', - String.Interpol), - (r'(\{\$\{)(.*?)(\}\})', - bygroups(String.Interpol, using(this, _startinline=True), - String.Interpol)), - (r'(\{)(\$.*?)(\})', - bygroups(String.Interpol, using(this, _startinline=True), - String.Interpol)), - (r'(\$\{)(\S+)(\})', - bygroups(String.Interpol, Name.Variable, String.Interpol)), - (r'[${\\]+', String.Double) - ], - } - - def __init__(self, **options): - self.funcnamehighlighting = get_bool_opt( - options, 'funcnamehighlighting', True) - self.disabledmodules = get_list_opt( - options, 'disabledmodules', ['unknown']) - self.startinline = get_bool_opt(options, 'startinline', False) - - # private option argument for the lexer itself - if '_startinline' in options: - self.startinline = options.pop('_startinline') - - # collect activated functions in a set - self._functions = set() - if self.funcnamehighlighting: - from pygments.lexers._phpbuiltins import MODULES - for key, value in MODULES.items(): - if key not in self.disabledmodules: - self._functions.update(value) - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - stack = ['root'] - if self.startinline: - stack.append('php') - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text, stack): - if token is Name.Other: - if value in self._functions: - yield index, Name.Builtin, value - continue - yield index, token, value - - def analyse_text(text): - rv = 0.0 - if re.search(r'<\?(?!xml)', text): - rv += 0.3 - if '?>' in text: - rv += 0.1 - return rv - - -class DtdLexer(RegexLexer): - """ - A lexer for DTDs (Document Type Definitions). - - *New in Pygments 1.5.* - """ - - flags = re.MULTILINE | re.DOTALL - - name = 'DTD' - aliases = ['dtd'] - filenames = ['*.dtd'] - mimetypes = ['application/xml-dtd'] - - tokens = { - 'root': [ - include('common'), - - (r'(\s]+)', - bygroups(Keyword, Text, Name.Tag)), - (r'PUBLIC|SYSTEM', Keyword.Constant), - (r'[\[\]>]', Keyword), - ], - - 'common': [ - (r'\s+', Text), - (r'(%|&)[^;]*;', Name.Entity), - ('', Comment, '#pop'), - ('-', Comment), - ], - - 'element': [ - include('common'), - (r'EMPTY|ANY|#PCDATA', Keyword.Constant), - (r'[^>\s\|()?+*,]+', Name.Tag), - (r'>', Keyword, '#pop'), - ], - - 'attlist': [ - include('common'), - (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', - Keyword.Constant), - (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant), - (r'xml:space|xml:lang', Keyword.Reserved), - (r'[^>\s\|()?+*,]+', Name.Attribute), - (r'>', Keyword, '#pop'), - ], - - 'entity': [ - include('common'), - (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant), - (r'[^>\s\|()?+*,]+', Name.Entity), - (r'>', Keyword, '#pop'), - ], - - 'notation': [ - include('common'), - (r'SYSTEM|PUBLIC', Keyword.Constant), - (r'[^>\s\|()?+*,]+', Name.Attribute), - (r'>', Keyword, '#pop'), - ], - } - - def analyse_text(text): - if not looks_like_xml(text) and \ - ('', Comment.Preproc), - ('', Comment, '#pop'), - ('-', Comment), - ], - 'tag': [ - (r'\s+', Text), - (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), - (r'/?\s*>', Name.Tag, '#pop'), - ], - 'attr': [ - ('\s+', Text), - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - def analyse_text(text): - if looks_like_xml(text): - return 0.5 - - -class XsltLexer(XmlLexer): - ''' - A lexer for XSLT. - - *New in Pygments 0.10.* - ''' - - name = 'XSLT' - aliases = ['xslt'] - filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc - mimetypes = ['application/xsl+xml', 'application/xslt+xml'] - - EXTRA_KEYWORDS = set([ - 'apply-imports', 'apply-templates', 'attribute', - 'attribute-set', 'call-template', 'choose', 'comment', - 'copy', 'copy-of', 'decimal-format', 'element', 'fallback', - 'for-each', 'if', 'import', 'include', 'key', 'message', - 'namespace-alias', 'number', 'otherwise', 'output', 'param', - 'preserve-space', 'processing-instruction', 'sort', - 'strip-space', 'stylesheet', 'template', 'text', 'transform', - 'value-of', 'variable', 'when', 'with-param' - ]) - - def get_tokens_unprocessed(self, text): - for index, token, value in XmlLexer.get_tokens_unprocessed(self, text): - m = re.match(']*)/?>?', value) - - if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS: - yield index, Keyword, value - else: - yield index, token, value - - def analyse_text(text): - if looks_like_xml(text) and ' tags is highlighted by the appropriate lexer. - - *New in Pygments 1.1.* - """ - flags = re.MULTILINE | re.DOTALL - name = 'MXML' - aliases = ['mxml'] - filenames = ['*.mxml'] - mimetimes = ['text/xml', 'application/xml'] - - tokens = { - 'root': [ - ('[^<&]+', Text), - (r'&\S*?;', Name.Entity), - (r'(\<\!\[CDATA\[)(.*?)(\]\]\>)', - bygroups(String, using(ActionScript3Lexer), String)), - ('', Comment, '#pop'), - ('-', Comment), - ], - 'tag': [ - (r'\s+', Text), - (r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'), - (r'/?\s*>', Name.Tag, '#pop'), - ], - 'attr': [ - ('\s+', Text), - ('".*?"', String, '#pop'), - ("'.*?'", String, '#pop'), - (r'[^\s>]+', String, '#pop'), - ], - } - - -class HaxeLexer(ExtendedRegexLexer): - """ - For Haxe source code (http://haxe.org/). - - *New in Pygments 1.3.* - """ - - name = 'Haxe' - aliases = ['hx', 'Haxe', 'haxe', 'haXe', 'hxsl'] - filenames = ['*.hx', '*.hxsl'] - mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx'] - - # keywords extracted from lexer.mll in the haxe compiler source - keyword = (r'(?:function|class|static|var|if|else|while|do|for|' - r'break|return|continue|extends|implements|import|' - r'switch|case|default|public|private|try|untyped|' - r'catch|new|this|throw|extern|enum|in|interface|' - r'cast|override|dynamic|typedef|package|' - r'inline|using|null|true|false|abstract)\b') - - # idtype in lexer.mll - typeid = r'_*[A-Z][_a-zA-Z0-9]*' - - # combined ident and dollar and idtype - ident = r'(?:_*[a-z][_a-zA-Z0-9]*|_+[0-9][_a-zA-Z0-9]*|' + typeid + \ - '|_+|\$[_a-zA-Z0-9]+)' - - binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|' - r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|' - r'/|\-|=>|=)') - - # ident except keywords - ident_no_keyword = r'(?!' + keyword + ')' + ident - - flags = re.DOTALL | re.MULTILINE - - preproc_stack = [] - - def preproc_callback(self, match, ctx): - proc = match.group(2) - - if proc == 'if': - # store the current stack - self.preproc_stack.append(ctx.stack[:]) - elif proc in ['else', 'elseif']: - # restore the stack back to right before #if - if self.preproc_stack: ctx.stack = self.preproc_stack[-1][:] - elif proc == 'end': - # remove the saved stack of previous #if - if self.preproc_stack: self.preproc_stack.pop() - - # #if and #elseif should follow by an expr - if proc in ['if', 'elseif']: - ctx.stack.append('preproc-expr') - - # #error can be optionally follow by the error msg - if proc in ['error']: - ctx.stack.append('preproc-error') - - yield match.start(), Comment.Preproc, '#' + proc - ctx.pos = match.end() - - - tokens = { - 'root': [ - include('spaces'), - include('meta'), - (r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')), - (r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')), - (r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')), - (r'(?:extern|private)\b', Keyword.Declaration), - (r'(?:abstract)\b', Keyword.Declaration, 'abstract'), - (r'(?:class|interface)\b', Keyword.Declaration, 'class'), - (r'(?:enum)\b', Keyword.Declaration, 'enum'), - (r'(?:typedef)\b', Keyword.Declaration, 'typedef'), - - # top-level expression - # although it is not supported in haxe, but it is common to write - # expression in web pages the positive lookahead here is to prevent - # an infinite loop at the EOF - (r'(?=.)', Text, 'expr-statement'), - ], - - # space/tab/comment/preproc - 'spaces': [ - (r'\s+', Text), - (r'//[^\n\r]*', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'(#)(if|elseif|else|end|error)\b', preproc_callback), - ], - - 'string-single-interpol': [ - (r'\$\{', String.Interpol, ('string-interpol-close', 'expr')), - (r'\$\$', String.Escape), - (r'\$(?=' + ident + ')', String.Interpol, 'ident'), - include('string-single'), - ], - - 'string-single': [ - (r"'", String.Single, '#pop'), - (r'\\.', String.Escape), - (r'.', String.Single), - ], - - 'string-double': [ - (r'"', String.Double, '#pop'), - (r'\\.', String.Escape), - (r'.', String.Double), - ], - - 'string-interpol-close': [ - (r'\$'+ident, String.Interpol), - (r'\}', String.Interpol, '#pop'), - ], - - 'package': [ - include('spaces'), - (ident, Name.Namespace), - (r'\.', Punctuation, 'import-ident'), - (r'', Text, '#pop'), - ], - - 'import': [ - include('spaces'), - (ident, Name.Namespace), - (r'\*', Keyword), # wildcard import - (r'\.', Punctuation, 'import-ident'), - (r'in', Keyword.Namespace, 'ident'), - (r'', Text, '#pop'), - ], - - 'import-ident': [ - include('spaces'), - (r'\*', Keyword, '#pop'), # wildcard import - (ident, Name.Namespace, '#pop'), - ], - - 'using': [ - include('spaces'), - (ident, Name.Namespace), - (r'\.', Punctuation, 'import-ident'), - (r'', Text, '#pop'), - ], - - 'preproc-error': [ - (r'\s+', Comment.Preproc), - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - (r'', Text, '#pop'), - ], - - 'preproc-expr': [ - (r'\s+', Comment.Preproc), - (r'\!', Comment.Preproc), - (r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')), - - (ident, Comment.Preproc, '#pop'), - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - ], - - 'preproc-parenthesis': [ - (r'\s+', Comment.Preproc), - (r'\)', Comment.Preproc, '#pop'), - ('', Text, 'preproc-expr-in-parenthesis'), - ], - - 'preproc-expr-chain': [ - (r'\s+', Comment.Preproc), - (binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')), - (r'', Text, '#pop'), - ], - - # same as 'preproc-expr' but able to chain 'preproc-expr-chain' - 'preproc-expr-in-parenthesis': [ - (r'\s+', Comment.Preproc), - (r'\!', Comment.Preproc), - (r'\(', Comment.Preproc, - ('#pop', 'preproc-expr-chain', 'preproc-parenthesis')), - - (ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')), - (r"'", String.Single, - ('#pop', 'preproc-expr-chain', 'string-single')), - (r'"', String.Double, - ('#pop', 'preproc-expr-chain', 'string-double')), - ], - - 'abstract' : [ - include('spaces'), - (r'', Text, ('#pop', 'abstract-body', 'abstract-relation', - 'abstract-opaque', 'type-param-constraint', 'type-name')), - ], - - 'abstract-body' : [ - include('spaces'), - (r'\{', Punctuation, ('#pop', 'class-body')), - ], - - 'abstract-opaque' : [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')), - (r'', Text, '#pop'), - ], - - 'abstract-relation': [ - include('spaces'), - (r'(?:to|from)', Keyword.Declaration, 'type'), - (r',', Punctuation), - (r'', Text, '#pop'), - ], - - 'meta': [ - include('spaces'), - (r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')), - ], - - # optional colon - 'meta-colon': [ - include('spaces'), - (r':', Name.Decorator, '#pop'), - (r'', Text, '#pop'), - ], - - # same as 'ident' but set token as Name.Decorator instead of Name - 'meta-ident': [ - include('spaces'), - (ident, Name.Decorator, '#pop'), - ], - - 'meta-body': [ - include('spaces'), - (r'\(', Name.Decorator, ('#pop', 'meta-call')), - (r'', Text, '#pop'), - ], - - 'meta-call': [ - include('spaces'), - (r'\)', Name.Decorator, '#pop'), - (r'', Text, ('#pop', 'meta-call-sep', 'expr')), - ], - - 'meta-call-sep': [ - include('spaces'), - (r'\)', Name.Decorator, '#pop'), - (r',', Punctuation, ('#pop', 'meta-call')), - ], - - 'typedef': [ - include('spaces'), - (r'', Text, ('#pop', 'typedef-body', 'type-param-constraint', - 'type-name')), - ], - - 'typedef-body': [ - include('spaces'), - (r'=', Operator, ('#pop', 'optional-semicolon', 'type')), - ], - - 'enum': [ - include('spaces'), - (r'', Text, ('#pop', 'enum-body', 'bracket-open', - 'type-param-constraint', 'type-name')), - ], - - 'enum-body': [ - include('spaces'), - include('meta'), - (r'\}', Punctuation, '#pop'), - (ident_no_keyword, Name, ('enum-member', 'type-param-constraint')), - ], - - 'enum-member': [ - include('spaces'), - (r'\(', Punctuation, - ('#pop', 'semicolon', 'flag', 'function-param')), - (r'', Punctuation, ('#pop', 'semicolon', 'flag')), - ], - - 'class': [ - include('spaces'), - (r'', Text, ('#pop', 'class-body', 'bracket-open', 'extends', - 'type-param-constraint', 'type-name')), - ], - - 'extends': [ - include('spaces'), - (r'(?:extends|implements)\b', Keyword.Declaration, 'type'), - (r',', Punctuation), # the comma is made optional here, since haxe2 - # requires the comma but haxe3 does not allow it - (r'', Text, '#pop'), - ], - - 'bracket-open': [ - include('spaces'), - (r'\{', Punctuation, '#pop'), - ], - - 'bracket-close': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - ], - - 'class-body': [ - include('spaces'), - include('meta'), - (r'\}', Punctuation, '#pop'), - (r'(?:static|public|private|override|dynamic|inline|macro)\b', - Keyword.Declaration), - (r'', Text, 'class-member'), - ], - - 'class-member': [ - include('spaces'), - (r'(var)\b', Keyword.Declaration, - ('#pop', 'optional-semicolon', 'prop')), - (r'(function)\b', Keyword.Declaration, - ('#pop', 'optional-semicolon', 'class-method')), - ], - - # local function, anonymous or not - 'function-local': [ - include('spaces'), - (r'(' + ident_no_keyword + ')?', Name.Function, - ('#pop', 'expr', 'flag', 'function-param', - 'parenthesis-open', 'type-param-constraint')), - ], - - 'optional-expr': [ - include('spaces'), - include('expr'), - (r'', Text, '#pop'), - ], - - 'class-method': [ - include('spaces'), - (ident, Name.Function, ('#pop', 'optional-expr', 'flag', - 'function-param', 'parenthesis-open', - 'type-param-constraint')), - ], - - # function arguments - 'function-param': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r'\?', Punctuation), - (ident_no_keyword, Name, - ('#pop', 'function-param-sep', 'assign', 'flag')), - ], - - 'function-param-sep': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'function-param')), - ], - - # class property - # eg. var prop(default, null):String; - 'prop': [ - include('spaces'), - (ident_no_keyword, Name, ('#pop', 'assign', 'flag', 'prop-get-set')), - ], - - 'prop-get-set': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'parenthesis-close', - 'prop-get-set-opt', 'comma', 'prop-get-set-opt')), - (r'', Text, '#pop'), - ], - - 'prop-get-set-opt': [ - include('spaces'), - (r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'), - (ident_no_keyword, Text, '#pop'), #custom getter/setter - ], - - 'expr-statement': [ - include('spaces'), - # makes semicolon optional here, just to avoid checking the last - # one is bracket or not. - (r'', Text, ('#pop', 'optional-semicolon', 'expr')), - ], - - 'expr': [ - include('spaces'), - (r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body', - 'meta-ident', 'meta-colon')), - (r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator), - (r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')), - (r'(?:inline)\b', Keyword.Declaration), - (r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain', - 'function-local')), - (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')), - (r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')), - (r'(?:this)\b', Keyword, ('#pop', 'expr-chain')), - (r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')), - (r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')), - (r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')), - (r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')), - (r'(?:switch)\b', Keyword, ('#pop', 'switch')), - (r'(?:if)\b', Keyword, ('#pop', 'if')), - (r'(?:do)\b', Keyword, ('#pop', 'do')), - (r'(?:while)\b', Keyword, ('#pop', 'while')), - (r'(?:for)\b', Keyword, ('#pop', 'for')), - (r'(?:untyped|throw)\b', Keyword), - (r'(?:return)\b', Keyword, ('#pop', 'optional-expr')), - (r'(?:macro)\b', Keyword, ('#pop', 'macro')), - (r'(?:continue|break)\b', Keyword, '#pop'), - (r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')), - (ident_no_keyword, Name, ('#pop', 'expr-chain')), - - # Float - (r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), - (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')), - - # Int - (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')), - (r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')), - - # String - (r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')), - (r'"', String.Double, ('#pop', 'expr-chain', 'string-double')), - - # EReg - (r'~/(\\\\|\\/|[^/\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')), - - # Array - (r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')), - ], - - 'expr-chain': [ - include('spaces'), - (r'(?:\+\+|\-\-)', Operator), - (binop, Operator, ('#pop', 'expr')), - (r'(?:in)\b', Keyword, ('#pop', 'expr')), - (r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')), - (r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)), - (r'\[', Punctuation, 'array-access'), - (r'\(', Punctuation, 'call'), - (r'', Text, '#pop'), - ], - - # macro reification - 'macro': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'type')), - (r'', Text, ('#pop', 'expr')), - ], - - # cast can be written as "cast expr" or "cast(expr, type)" - 'cast': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'parenthesis-close', - 'cast-type', 'expr')), - (r'', Text, ('#pop', 'expr')), - ], - - # optionally give a type as the 2nd argument of cast() - 'cast-type': [ - include('spaces'), - (r',', Punctuation, ('#pop', 'type')), - (r'', Text, '#pop'), - ], - - 'catch': [ - include('spaces'), - (r'(?:catch)\b', Keyword, ('expr', 'function-param', - 'parenthesis-open')), - (r'', Text, '#pop'), - ], - - # do-while loop - 'do': [ - include('spaces'), - (r'', Punctuation, ('#pop', 'do-while', 'expr')), - ], - - # the while after do - 'do-while': [ - include('spaces'), - (r'(?:while)\b', Keyword, ('#pop', 'parenthesis', - 'parenthesis-open')), - ], - - 'while': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), - ], - - 'for': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), - ], - - 'if': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr', - 'parenthesis')), - ], - - 'else': [ - include('spaces'), - (r'(?:else)\b', Keyword, ('#pop', 'expr')), - (r'', Text, '#pop'), - ], - - 'switch': [ - include('spaces'), - (r'', Text, ('#pop', 'switch-body', 'bracket-open', 'expr')), - ], - - 'switch-body': [ - include('spaces'), - (r'(?:case|default)\b', Keyword, ('case-block', 'case')), - (r'\}', Punctuation, '#pop'), - ], - - 'case': [ - include('spaces'), - (r':', Punctuation, '#pop'), - (r'', Text, ('#pop', 'case-sep', 'case-guard', 'expr')), - ], - - 'case-sep': [ - include('spaces'), - (r':', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'case')), - ], - - 'case-guard': [ - include('spaces'), - (r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')), - (r'', Text, '#pop'), - ], - - # optional multiple expr under a case - 'case-block': [ - include('spaces'), - (r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'), - (r'', Text, '#pop'), - ], - - 'new': [ - include('spaces'), - (r'', Text, ('#pop', 'call', 'parenthesis-open', 'type')), - ], - - 'array-decl': [ - include('spaces'), - (r'\]', Punctuation, '#pop'), - (r'', Text, ('#pop', 'array-decl-sep', 'expr')), - ], - - 'array-decl-sep': [ - include('spaces'), - (r'\]', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'array-decl')), - ], - - 'array-access': [ - include('spaces'), - (r'', Text, ('#pop', 'array-access-close', 'expr')), - ], - - 'array-access-close': [ - include('spaces'), - (r'\]', Punctuation, '#pop'), - ], - - 'comma': [ - include('spaces'), - (r',', Punctuation, '#pop'), - ], - - 'colon': [ - include('spaces'), - (r':', Punctuation, '#pop'), - ], - - 'semicolon': [ - include('spaces'), - (r';', Punctuation, '#pop'), - ], - - 'optional-semicolon': [ - include('spaces'), - (r';', Punctuation, '#pop'), - (r'', Text, '#pop'), - ], - - # identity that CAN be a Haxe keyword - 'ident': [ - include('spaces'), - (ident, Name, '#pop'), - ], - - 'dollar': [ - include('spaces'), - (r'\{', Keyword, ('#pop', 'bracket-close', 'expr')), - (r'', Text, ('#pop', 'expr-chain')), - ], - - 'type-name': [ - include('spaces'), - (typeid, Name, '#pop'), - ], - - 'type-full-name': [ - include('spaces'), - (r'\.', Punctuation, 'ident'), - (r'', Text, '#pop'), - ], - - 'type': [ - include('spaces'), - (r'\?', Punctuation), - (ident, Name, ('#pop', 'type-check', 'type-full-name')), - (r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')), - (r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')), - ], - - 'type-parenthesis': [ - include('spaces'), - (r'', Text, ('#pop', 'parenthesis-close', 'type')), - ], - - 'type-check': [ - include('spaces'), - (r'->', Punctuation, ('#pop', 'type')), - (r'<(?!=)', Punctuation, 'type-param'), - (r'', Text, '#pop'), - ], - - 'type-struct': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r'\?', Punctuation), - (r'>', Punctuation, ('comma', 'type')), - (ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')), - include('class-body'), - ], - - 'type-struct-sep': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'type-struct')), - ], - - # type-param can be a normal type or a constant literal... - 'type-param-type': [ - # Float - (r'\.[0-9]+', Number.Float, '#pop'), - (r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, '#pop'), - (r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, '#pop'), - (r'[0-9]+\.[0-9]+', Number.Float, '#pop'), - (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'), - - # Int - (r'0x[0-9a-fA-F]+', Number.Hex, '#pop'), - (r'[0-9]+', Number.Integer, '#pop'), - - # String - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - - # EReg - (r'~/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex, '#pop'), - - # Array - (r'\[', Operator, ('#pop', 'array-decl')), - - include('type'), - ], - - # type-param part of a type - # ie. the path in Map - 'type-param': [ - include('spaces'), - (r'', Text, ('#pop', 'type-param-sep', 'type-param-type')), - ], - - 'type-param-sep': [ - include('spaces'), - (r'>', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'type-param')), - ], - - # optional type-param that may include constraint - # ie. - 'type-param-constraint': [ - include('spaces'), - (r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep', - 'type-param-constraint-flag', 'type-name')), - (r'', Text, '#pop'), - ], - - 'type-param-constraint-sep': [ - include('spaces'), - (r'>', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'type-param-constraint-sep', - 'type-param-constraint-flag', 'type-name')), - ], - - # the optional constraint inside type-param - 'type-param-constraint-flag': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')), - (r'', Text, '#pop'), - ], - - 'type-param-constraint-flag-type': [ - include('spaces'), - (r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep', - 'type')), - (r'', Text, ('#pop', 'type')), - ], - - 'type-param-constraint-flag-type-sep': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r',', Punctuation, 'type'), - ], - - # a parenthesis expr that contain exactly one expr - 'parenthesis': [ - include('spaces'), - (r'', Text, ('#pop', 'parenthesis-close', 'expr')), - ], - - 'parenthesis-open': [ - include('spaces'), - (r'\(', Punctuation, '#pop'), - ], - - 'parenthesis-close': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - ], - - 'var': [ - include('spaces'), - (ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag')), - ], - - # optional more var decl. - 'var-sep': [ - include('spaces'), - (r',', Punctuation, ('#pop', 'var')), - (r'', Text, '#pop'), - ], - - # optional assignment - 'assign': [ - include('spaces'), - (r'=', Operator, ('#pop', 'expr')), - (r'', Text, '#pop'), - ], - - # optional type flag - 'flag': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'type')), - (r'', Text, '#pop'), - ], - - # colon as part of a ternary operator (?:) - 'ternary': [ - include('spaces'), - (r':', Operator, '#pop'), - ], - - # function call - 'call': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r'', Text, ('#pop', 'call-sep', 'expr')), - ], - - # after a call param - 'call-sep': [ - include('spaces'), - (r'\)', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'call')), - ], - - # bracket can be block or object - 'bracket': [ - include('spaces'), - (r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name, - ('#pop', 'bracket-check')), - (r"'", String.Single, ('#pop', 'bracket-check', 'string-single')), - (r'"', String.Double, ('#pop', 'bracket-check', 'string-double')), - (r'', Text, ('#pop', 'block')), - ], - - 'bracket-check': [ - include('spaces'), - (r':', Punctuation, ('#pop', 'object-sep', 'expr')), #is object - (r'', Text, ('#pop', 'block', 'optional-semicolon', 'expr-chain')), #is block - ], - - # code block - 'block': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r'', Text, 'expr-statement'), - ], - - # object in key-value pairs - 'object': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r'', Text, ('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string')) - ], - - # a key of an object - 'ident-or-string': [ - include('spaces'), - (ident_no_keyword, Name, '#pop'), - (r"'", String.Single, ('#pop', 'string-single')), - (r'"', String.Double, ('#pop', 'string-double')), - ], - - # after a key-value pair in object - 'object-sep': [ - include('spaces'), - (r'\}', Punctuation, '#pop'), - (r',', Punctuation, ('#pop', 'object')), - ], - - - - } - - def analyse_text(text): - if re.match(r'\w+\s*:\s*\w', text): return 0.3 - - -def _indentation(lexer, match, ctx): - indentation = match.group(0) - yield match.start(), Text, indentation - ctx.last_indentation = indentation - ctx.pos = match.end() - - if hasattr(ctx, 'block_state') and ctx.block_state and \ - indentation.startswith(ctx.block_indentation) and \ - indentation != ctx.block_indentation: - ctx.stack.append(ctx.block_state) - else: - ctx.block_state = None - ctx.block_indentation = None - ctx.stack.append('content') - -def _starts_block(token, state): - def callback(lexer, match, ctx): - yield match.start(), token, match.group(0) - - if hasattr(ctx, 'last_indentation'): - ctx.block_indentation = ctx.last_indentation - else: - ctx.block_indentation = '' - - ctx.block_state = state - ctx.pos = match.end() - - return callback - - -class HamlLexer(ExtendedRegexLexer): - """ - For Haml markup. - - *New in Pygments 1.3.* - """ - - name = 'Haml' - aliases = ['haml', 'HAML'] - filenames = ['*.haml'] - mimetypes = ['text/x-haml'] - - flags = re.IGNORECASE - # Haml can include " |\n" anywhere, - # which is ignored and used to wrap long lines. - # To accomodate this, use this custom faux dot instead. - _dot = r'(?: \|\n(?=.* \|)|.)' - - # In certain places, a comma at the end of the line - # allows line wrapping as well. - _comma_dot = r'(?:,\s*\n|' + _dot + ')' - tokens = { - 'root': [ - (r'[ \t]*\n', Text), - (r'[ \t]*', _indentation), - ], - - 'css': [ - (r'\.[a-z0-9_:-]+', Name.Class, 'tag'), - (r'\#[a-z0-9_:-]+', Name.Function, 'tag'), - ], - - 'eval-or-plain': [ - (r'[&!]?==', Punctuation, 'plain'), - (r'([&!]?[=~])(' + _comma_dot + r'*\n)', - bygroups(Punctuation, using(RubyLexer)), - 'root'), - (r'', Text, 'plain'), - ], - - 'content': [ - include('css'), - (r'%[a-z0-9_:-]+', Name.Tag, 'tag'), - (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), - (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', - bygroups(Comment, Comment.Special, Comment), - '#pop'), - (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), - '#pop'), - (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, - 'haml-comment-block'), '#pop'), - (r'(-)(' + _comma_dot + r'*\n)', - bygroups(Punctuation, using(RubyLexer)), - '#pop'), - (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), - '#pop'), - include('eval-or-plain'), - ], - - 'tag': [ - include('css'), - (r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)), - (r'\[' + _dot + '*?\]', using(RubyLexer)), - (r'\(', Text, 'html-attributes'), - (r'/[ \t]*\n', Punctuation, '#pop:2'), - (r'[<>]{1,2}(?=[ \t=])', Punctuation), - include('eval-or-plain'), - ], - - 'plain': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), - (r'(#\{)(' + _dot + '*?)(\})', - bygroups(String.Interpol, using(RubyLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - - 'html-attributes': [ - (r'\s+', Text), - (r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), - (r'[a-z0-9_:-]+', Name.Attribute), - (r'\)', Text, '#pop'), - ], - - 'html-attribute-value': [ - (r'[ \t]+', Text), - (r'[a-z0-9_]+', Name.Variable, '#pop'), - (r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'), - (r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'), - (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), - (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), - ], - - 'html-comment-block': [ - (_dot + '+', Comment), - (r'\n', Text, 'root'), - ], - - 'haml-comment-block': [ - (_dot + '+', Comment.Preproc), - (r'\n', Text, 'root'), - ], - - 'filter-block': [ - (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), - (r'(#\{)(' + _dot + '*?)(\})', - bygroups(String.Interpol, using(RubyLexer), String.Interpol)), - (r'\n', Text, 'root'), - ], - } - - -common_sass_tokens = { - 'value': [ - (r'[ \t]+', Text), - (r'[!$][\w-]+', Name.Variable), - (r'url\(', String.Other, 'string-url'), - (r'[a-z_-][\w-]*(?=\()', Name.Function), - (r'(azimuth|background-attachment|background-color|' - r'background-image|background-position|background-repeat|' - r'background|border-bottom-color|border-bottom-style|' - r'border-bottom-width|border-left-color|border-left-style|' - r'border-left-width|border-right|border-right-color|' - r'border-right-style|border-right-width|border-top-color|' - r'border-top-style|border-top-width|border-bottom|' - r'border-collapse|border-left|border-width|border-color|' - r'border-spacing|border-style|border-top|border|caption-side|' - r'clear|clip|color|content|counter-increment|counter-reset|' - r'cue-after|cue-before|cue|cursor|direction|display|' - r'elevation|empty-cells|float|font-family|font-size|' - r'font-size-adjust|font-stretch|font-style|font-variant|' - r'font-weight|font|height|letter-spacing|line-height|' - r'list-style-type|list-style-image|list-style-position|' - r'list-style|margin-bottom|margin-left|margin-right|' - r'margin-top|margin|marker-offset|marks|max-height|max-width|' - r'min-height|min-width|opacity|orphans|outline|outline-color|' - r'outline-style|outline-width|overflow|padding-bottom|' - r'padding-left|padding-right|padding-top|padding|page|' - r'page-break-after|page-break-before|page-break-inside|' - r'pause-after|pause-before|pause|pitch|pitch-range|' - r'play-during|position|quotes|richness|right|size|' - r'speak-header|speak-numeral|speak-punctuation|speak|' - r'speech-rate|stress|table-layout|text-align|text-decoration|' - r'text-indent|text-shadow|text-transform|top|unicode-bidi|' - r'vertical-align|visibility|voice-family|volume|white-space|' - r'widows|width|word-spacing|z-index|bottom|left|' - r'above|absolute|always|armenian|aural|auto|avoid|baseline|' - r'behind|below|bidi-override|blink|block|bold|bolder|both|' - r'capitalize|center-left|center-right|center|circle|' - r'cjk-ideographic|close-quote|collapse|condensed|continuous|' - r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|' - r'decimal|default|digits|disc|dotted|double|e-resize|embed|' - r'extra-condensed|extra-expanded|expanded|fantasy|far-left|' - r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|' - r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|' - r'inherit|inline-table|inline|inset|inside|invert|italic|' - r'justify|katakana-iroha|katakana|landscape|larger|large|' - r'left-side|leftwards|level|lighter|line-through|list-item|' - r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|' - r'lower|low|medium|message-box|middle|mix|monospace|' - r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|' - r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|' - r'open-quote|outset|outside|overline|pointer|portrait|px|' - r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|' - r'rightwards|s-resize|sans-serif|scroll|se-resize|' - r'semi-condensed|semi-expanded|separate|serif|show|silent|' - r'slow|slower|small-caps|small-caption|smaller|soft|solid|' - r'spell-out|square|static|status-bar|super|sw-resize|' - r'table-caption|table-cell|table-column|table-column-group|' - r'table-footer-group|table-header-group|table-row|' - r'table-row-group|text|text-bottom|text-top|thick|thin|' - r'transparent|ultra-condensed|ultra-expanded|underline|' - r'upper-alpha|upper-latin|upper-roman|uppercase|url|' - r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|' - r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant), - (r'(indigo|gold|firebrick|indianred|darkolivegreen|' - r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|' - r'mediumslateblue|springgreen|crimson|lightsalmon|brown|' - r'turquoise|olivedrab|cyan|skyblue|darkturquoise|' - r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|' - r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|' - r'violet|orchid|ghostwhite|honeydew|cornflowerblue|' - r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|' - r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|' - r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|' - r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|' - r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|' - r'lightcoral|orangered|navajowhite|palegreen|burlywood|' - r'seashell|mediumspringgreen|papayawhip|blanchedalmond|' - r'peru|aquamarine|darkslategray|ivory|dodgerblue|' - r'lemonchiffon|chocolate|orange|forestgreen|slateblue|' - r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|' - r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|' - r'plum|darkgoldenrod|sandybrown|magenta|tan|' - r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|' - r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|' - r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|' - r'lightslategray|lawngreen|lightgreen|tomato|hotpink|' - r'lightyellow|lavenderblush|linen|mediumaquamarine|' - r'blueviolet|peachpuff)\b', Name.Entity), - (r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|' - r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin), - (r'\!(important|default)', Name.Exception), - (r'(true|false)', Name.Pseudo), - (r'(and|or|not)', Operator.Word), - (r'/\*', Comment.Multiline, 'inline-comment'), - (r'//[^\n]*', Comment.Single), - (r'\#[a-z0-9]{1,6}', Number.Hex), - (r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)), - (r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)), - (r'#{', String.Interpol, 'interpolation'), - (r'[~\^\*!&%<>\|+=@:,./?-]+', Operator), - (r'[\[\]()]+', Punctuation), - (r'"', String.Double, 'string-double'), - (r"'", String.Single, 'string-single'), - (r'[a-z_-][\w-]*', Name), - ], - - 'interpolation': [ - (r'\}', String.Interpol, '#pop'), - include('value'), - ], - - 'selector': [ - (r'[ \t]+', Text), - (r'\:', Name.Decorator, 'pseudo-class'), - (r'\.', Name.Class, 'class'), - (r'\#', Name.Namespace, 'id'), - (r'[a-zA-Z0-9_-]+', Name.Tag), - (r'#\{', String.Interpol, 'interpolation'), - (r'&', Keyword), - (r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator), - (r'"', String.Double, 'string-double'), - (r"'", String.Single, 'string-single'), - ], - - 'string-double': [ - (r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double), - (r'#\{', String.Interpol, 'interpolation'), - (r'"', String.Double, '#pop'), - ], - - 'string-single': [ - (r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double), - (r'#\{', String.Interpol, 'interpolation'), - (r"'", String.Double, '#pop'), - ], - - 'string-url': [ - (r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other), - (r'#\{', String.Interpol, 'interpolation'), - (r'\)', String.Other, '#pop'), - ], - - 'pseudo-class': [ - (r'[\w-]+', Name.Decorator), - (r'#\{', String.Interpol, 'interpolation'), - (r'', Text, '#pop'), - ], - - 'class': [ - (r'[\w-]+', Name.Class), - (r'#\{', String.Interpol, 'interpolation'), - (r'', Text, '#pop'), - ], - - 'id': [ - (r'[\w-]+', Name.Namespace), - (r'#\{', String.Interpol, 'interpolation'), - (r'', Text, '#pop'), - ], - - 'for': [ - (r'(from|to|through)', Operator.Word), - include('value'), - ], -} - -class SassLexer(ExtendedRegexLexer): - """ - For Sass stylesheets. - - *New in Pygments 1.3.* - """ - - name = 'Sass' - aliases = ['sass', 'SASS'] - filenames = ['*.sass'] - mimetypes = ['text/x-sass'] - - flags = re.IGNORECASE - tokens = { - 'root': [ - (r'[ \t]*\n', Text), - (r'[ \t]*', _indentation), - ], - - 'content': [ - (r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'), - 'root'), - (r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'), - 'root'), - (r'@import', Keyword, 'import'), - (r'@for', Keyword, 'for'), - (r'@(debug|warn|if|while)', Keyword, 'value'), - (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), - (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), - (r'@extend', Keyword, 'selector'), - (r'@[a-z0-9_-]+', Keyword, 'selector'), - (r'=[\w-]+', Name.Function, 'value'), - (r'\+[\w-]+', Name.Decorator, 'value'), - (r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))', - bygroups(Name.Variable, Operator), 'value'), - (r':', Name.Attribute, 'old-style-attr'), - (r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'), - (r'', Text, 'selector'), - ], - - 'single-comment': [ - (r'.+', Comment.Single), - (r'\n', Text, 'root'), - ], - - 'multi-comment': [ - (r'.+', Comment.Multiline), - (r'\n', Text, 'root'), - ], - - 'import': [ - (r'[ \t]+', Text), - (r'\S+', String), - (r'\n', Text, 'root'), - ], - - 'old-style-attr': [ - (r'[^\s:="\[]+', Name.Attribute), - (r'#{', String.Interpol, 'interpolation'), - (r'[ \t]*=', Operator, 'value'), - (r'', Text, 'value'), - ], - - 'new-style-attr': [ - (r'[^\s:="\[]+', Name.Attribute), - (r'#{', String.Interpol, 'interpolation'), - (r'[ \t]*[=:]', Operator, 'value'), - ], - - 'inline-comment': [ - (r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline), - (r'#\{', String.Interpol, 'interpolation'), - (r"\*/", Comment, '#pop'), - ], - } - for group, common in common_sass_tokens.items(): - tokens[group] = copy.copy(common) - tokens['value'].append((r'\n', Text, 'root')) - tokens['selector'].append((r'\n', Text, 'root')) - - -class ScssLexer(RegexLexer): - """ - For SCSS stylesheets. - """ - - name = 'SCSS' - aliases = ['scss'] - filenames = ['*.scss'] - mimetypes = ['text/x-scss'] - - flags = re.IGNORECASE | re.DOTALL - tokens = { - 'root': [ - (r'\s+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@import', Keyword, 'value'), - (r'@for', Keyword, 'for'), - (r'@(debug|warn|if|while)', Keyword, 'value'), - (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), - (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), - (r'@extend', Keyword, 'selector'), - (r'@[a-z0-9_-]+', Keyword, 'selector'), - (r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'), - (r'(?=[^;{}][;}])', Name.Attribute, 'attr'), - (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'), - (r'', Text, 'selector'), - ], - - 'attr': [ - (r'[^\s:="\[]+', Name.Attribute), - (r'#{', String.Interpol, 'interpolation'), - (r'[ \t]*:', Operator, 'value'), - ], - - 'inline-comment': [ - (r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline), - (r'#\{', String.Interpol, 'interpolation'), - (r"\*/", Comment, '#pop'), - ], - } - for group, common in common_sass_tokens.items(): - tokens[group] = copy.copy(common) - tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')]) - tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')]) - - -class CoffeeScriptLexer(RegexLexer): - """ - For `CoffeeScript`_ source code. - - .. _CoffeeScript: http://coffeescript.org - - *New in Pygments 1.3.* - """ - - name = 'CoffeeScript' - aliases = ['coffee-script', 'coffeescript', 'coffee'] - filenames = ['*.coffee'] - mimetypes = ['text/coffeescript'] - - flags = re.DOTALL - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'###[^#].*?###', Comment.Multiline), - (r'#(?!##[^#]).*?\n', Comment.Single), - ], - 'multilineregex': [ - (r'[^/#]+', String.Regex), - (r'///([gim]+\b|\B)', String.Regex, '#pop'), - (r'#{', String.Interpol, 'interpoling_string'), - (r'[/#]', String.Regex), - ], - 'slashstartsregex': [ - include('commentsandwhitespace'), - (r'///', String.Regex, ('#pop', 'multilineregex')), - (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' - r'([gim]+\b|\B)', String.Regex, '#pop'), - (r'', Text, '#pop'), - ], - 'root': [ - # this next expr leads to infinite loops root -> slashstartsregex - #(r'^(?=\s|/|)', popstate_xmlcomment_callback), - (r'[^-]{1,2}', Literal), - (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - ], - 'processing_instruction': [ - (r'\s+', Text, 'processing_instruction_content'), - (r'\?>', String.Doc, '#pop'), - (pitarget, Name), - ], - 'processing_instruction_content': [ - (r'\?>', String.Doc, '#pop'), - (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - ], - 'cdata_section': [ - (r']]>', String.Doc, '#pop'), - (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - ], - 'start_tag': [ - include('whitespace'), - (r'(/>)', popstate_tag_callback), - (r'>', Name.Tag, 'element_content'), - (r'"', Punctuation, 'quot_attribute_content'), - (r"'", Punctuation, 'apos_attribute_content'), - (r'=', Operator), - (qname, Name.Tag), - ], - 'quot_attribute_content': [ - (r'"', Punctuation, 'start_tag'), - (r'(\{)', pushstate_root_callback), - (r'""', Name.Attribute), - (quotattrcontentchar, Name.Attribute), - (entityref, Name.Attribute), - (charref, Name.Attribute), - (r'\{\{|\}\}', Name.Attribute), - ], - 'apos_attribute_content': [ - (r"'", Punctuation, 'start_tag'), - (r'\{', Punctuation, 'root'), - (r"''", Name.Attribute), - (aposattrcontentchar, Name.Attribute), - (entityref, Name.Attribute), - (charref, Name.Attribute), - (r'\{\{|\}\}', Name.Attribute), - ], - 'element_content': [ - (r')', popstate_tag_callback), - (qname, Name.Tag), - ], - 'xmlspace_decl': [ - (r'\(:', Comment, 'comment'), - (r'preserve|strip', Keyword, '#pop'), - ], - 'declareordering': [ - (r'\(:', Comment, 'comment'), - include('whitespace'), - (r'ordered|unordered', Keyword, '#pop'), - ], - 'xqueryversion': [ - include('whitespace'), - (r'\(:', Comment, 'comment'), - (stringdouble, String.Double), - (stringsingle, String.Single), - (r'encoding', Keyword), - (r';', Punctuation, '#pop'), - ], - 'pragma': [ - (qname, Name.Variable, 'pragmacontents'), - ], - 'pragmacontents': [ - (r'#\)', Punctuation, 'operator'), - (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + - unirange(0x10000, 0x10ffff), Literal), - (r'(\s+)', Text), - ], - 'occurrenceindicator': [ - include('whitespace'), - (r'\(:', Comment, 'comment'), - (r'\*|\?|\+', Operator, 'operator'), - (r':=', Operator, 'root'), - (r'', Text, 'operator'), - ], - 'option': [ - include('whitespace'), - (qname, Name.Variable, '#pop'), - ], - 'qname_braren': [ - include('whitespace'), - (r'(\{)', pushstate_operator_root_callback), - (r'(\()', Punctuation, 'root'), - ], - 'element_qname': [ - (qname, Name.Variable, 'root'), - ], - 'attribute_qname': [ - (qname, Name.Variable, 'root'), - ], - 'root': [ - include('whitespace'), - (r'\(:', Comment, 'comment'), - - # handle operator state - # order on numbers matters - handle most complex first - (r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'), - (r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'), - (r'(\.\d+|\d+\.\d*)', Number, 'operator'), - (r'(\d+)', Number.Integer, 'operator'), - (r'(\.\.|\.|\))', Punctuation, 'operator'), - (r'(declare)(\s+)(construction)', - bygroups(Keyword, Text, Keyword), 'operator'), - (r'(declare)(\s+)(default)(\s+)(order)', - bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'), - (ncname + ':\*', Name, 'operator'), - ('\*:'+ncname, Name.Tag, 'operator'), - ('\*', Name.Tag, 'operator'), - (stringdouble, String.Double, 'operator'), - (stringsingle, String.Single, 'operator'), - - (r'(\})', popstate_callback), - - #NAMESPACE DECL - (r'(declare)(\s+)(default)(\s+)(collation)', - bygroups(Keyword, Text, Keyword, Text, Keyword)), - (r'(module|declare)(\s+)(namespace)', - bygroups(Keyword, Text, Keyword), 'namespacedecl'), - (r'(declare)(\s+)(base-uri)', - bygroups(Keyword, Text, Keyword), 'namespacedecl'), - - #NAMESPACE KEYWORD - (r'(declare)(\s+)(default)(\s+)(element|function)', - bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'), - (r'(import)(\s+)(schema|module)', - bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'), - (r'(declare)(\s+)(copy-namespaces)', - bygroups(Keyword, Text, Keyword), 'namespacekeyword'), - - #VARNAMEs - (r'(for|let|some|every)(\s+)(\$)', - bygroups(Keyword, Text, Name.Variable), 'varname'), - (r'\$', Name.Variable, 'varname'), - (r'(declare)(\s+)(variable)(\s+)(\$)', - bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'), - - #ITEMTYPE - (r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), - - (r'(element|attribute|schema-element|schema-attribute|comment|' - r'text|node|document-node|empty-sequence)(\s+)(\()', - pushstate_operator_kindtest_callback), - - (r'(processing-instruction)(\s+)(\()', - pushstate_operator_kindtestforpi_callback), - - (r'(', Comment, '#pop'), + ('-', Comment), + ], + 'tag': [ + (r'\s+', Text), + (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), + (r'/?\s*>', Name.Tag, '#pop'), + ], + 'attr': [ + ('\s+', Text), + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/agile.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/agile.py new file mode 100644 index 0000000..7ad60c8 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/agile.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.agile + ~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.lisp import SchemeLexer +from pygments.lexers.jvm import IokeLexer, ClojureLexer +from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \ + PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer +from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer +from pygments.lexers.perl import PerlLexer, Perl6Lexer +from pygments.lexers.d import CrocLexer, MiniDLexer +from pygments.lexers.iolang import IoLexer +from pygments.lexers.tcl import TclLexer +from pygments.lexers.factor import FactorLexer +from pygments.lexers.scripting import LuaLexer, MoonScriptLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/algebra.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/algebra.py new file mode 100644 index 0000000..6bb1b08 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/algebra.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.algebra + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for computer algebra systems. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['GAPLexer', 'MathematicaLexer', 'MuPADLexer'] + + +class GAPLexer(RegexLexer): + """ + For `GAP `_ source code. + + .. versionadded:: 2.0 + """ + name = 'GAP' + aliases = ['gap'] + filenames = ['*.g', '*.gd', '*.gi', '*.gap'] + + tokens = { + 'root': [ + (r'#.*$', Comment.Single), + (r'"(?:[^"\\]|\\.)*"', String), + (r'\(|\)|\[|\]|\{|\}', Punctuation), + (r'''(?x)\b(?: + if|then|elif|else|fi| + for|while|do|od| + repeat|until| + break|continue| + function|local|return|end| + rec| + quit|QUIT| + IsBound|Unbind| + TryNextMethod| + Info|Assert + )\b''', Keyword), + (r'''(?x)\b(?: + true|false|fail|infinity + )\b''', + Name.Constant), + (r'''(?x)\b(?: + (Declare|Install)([A-Z][A-Za-z]+)| + BindGlobal|BIND_GLOBAL + )\b''', + Name.Builtin), + (r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator), + (r'''(?x)\b(?: + and|or|not|mod|in + )\b''', + Operator.Word), + (r'''(?x) + (?:\w+|`[^`]*`) + (?:::\w+|`[^`]*`)*''', Name.Variable), + (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number), + (r'\.[0-9]+(?:e[0-9]+)?', Number), + (r'.', Text) + ] + } + + +class MathematicaLexer(RegexLexer): + """ + Lexer for `Mathematica `_ source code. + + .. versionadded:: 2.0 + """ + name = 'Mathematica' + aliases = ['mathematica', 'mma', 'nb'] + filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma'] + mimetypes = ['application/mathematica', + 'application/vnd.wolfram.mathematica', + 'application/vnd.wolfram.mathematica.package', + 'application/vnd.wolfram.cdf'] + + # http://reference.wolfram.com/mathematica/guide/Syntax.html + operators = ( + ";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/", + "^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@", + "@@@", "~~", "===", "&", "<", ">", "<=", ">=", + ) + + punctuation = (",", ";", "(", ")", "[", "]", "{", "}") + + def _multi_escape(entries): + return '(%s)' % ('|'.join(re.escape(entry) for entry in entries)) + + tokens = { + 'root': [ + (r'(?s)\(\*.*?\*\)', Comment), + + (r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace), + (r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable), + (r'#\d*', Name.Variable), + (r'([a-zA-Z]+[a-zA-Z0-9]*)', Name), + + (r'-?[0-9]+\.[0-9]*', Number.Float), + (r'-?[0-9]*\.[0-9]+', Number.Float), + (r'-?[0-9]+', Number.Integer), + + (words(operators), Operator), + (words(punctuation), Punctuation), + (r'".*?"', String), + (r'\s+', Text.Whitespace), + ], + } + + +class MuPADLexer(RegexLexer): + """ + A `MuPAD `_ lexer. + Contributed by Christopher Creutzig . + + .. versionadded:: 0.8 + """ + name = 'MuPAD' + aliases = ['mupad'] + filenames = ['*.mu'] + + tokens = { + 'root': [ + (r'//.*?$', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + (r'"(?:[^"\\]|\\.)*"', String), + (r'\(|\)|\[|\]|\{|\}', Punctuation), + (r'''(?x)\b(?: + next|break|end| + axiom|end_axiom|category|end_category|domain|end_domain|inherits| + if|%if|then|elif|else|end_if| + case|of|do|otherwise|end_case| + while|end_while| + repeat|until|end_repeat| + for|from|to|downto|step|end_for| + proc|local|option|save|begin|end_proc| + delete|frame + )\b''', Keyword), + (r'''(?x)\b(?: + DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR| + DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT| + DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC| + DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR + )\b''', Name.Class), + (r'''(?x)\b(?: + PI|EULER|E|CATALAN| + NIL|FAIL|undefined|infinity| + TRUE|FALSE|UNKNOWN + )\b''', + Name.Constant), + (r'\b(?:dom|procname)\b', Name.Builtin.Pseudo), + (r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator), + (r'''(?x)\b(?: + and|or|not|xor| + assuming| + div|mod| + union|minus|intersect|in|subset + )\b''', + Operator.Word), + (r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number), + # (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin), + (r'''(?x) + ((?:[a-zA-Z_#][\w#]*|`[^`]*`) + (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''', + bygroups(Name.Function, Text, Punctuation)), + (r'''(?x) + (?:[a-zA-Z_#][\w#]*|`[^`]*`) + (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable), + (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number), + (r'\.[0-9]+(?:e[0-9]+)?', Number), + (r'.', Text) + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ambient.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ambient.py new file mode 100644 index 0000000..ff8a1f6 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ambient.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ambient + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for AmbientTalk language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['AmbientTalkLexer'] + + +class AmbientTalkLexer(RegexLexer): + """ + Lexer for `AmbientTalk `_ source code. + + .. versionadded:: 2.0 + """ + name = 'AmbientTalk' + filenames = ['*.at'] + aliases = ['at', 'ambienttalk', 'ambienttalk/2'] + mimetypes = ['text/x-ambienttalk'] + + flags = re.MULTILINE | re.DOTALL + + builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:', + 'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:', + 'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:', + 'mirroredBy:', 'is:')) + tokens = { + 'root': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'(def|deftype|import|alias|exclude)\b', Keyword), + (builtin, Name.Builtin), + (r'(true|false|nil)\b', Keyword.Constant), + (r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'), + (r'"(\\\\|\\"|[^"])*"', String), + (r'\|', Punctuation, 'arglist'), + (r'<:|[*^!%&<>+=,./?-]|:=', Operator), + (r"`[a-zA-Z_]\w*", String.Symbol), + (r"[a-zA-Z_]\w*:", Name.Function), + (r"[{}()\[\];`]", Punctuation), + (r'(self|super)\b', Name.Variable.Instance), + (r"[a-zA-Z_]\w*", Name.Variable), + (r"@[a-zA-Z_]\w*", Name.Class), + (r"@\[", Name.Class, 'annotations'), + include('numbers'), + ], + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer) + ], + 'namespace': [ + (r'[a-zA-Z_]\w*\.', Name.Namespace), + (r'[a-zA-Z_]\w*:', Name.Function, '#pop'), + (r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop') + ], + 'annotations': [ + (r"(.*?)\]", Name.Class, '#pop') + ], + 'arglist': [ + (r'\|', Punctuation, '#pop'), + (r'\s*(,)\s*', Punctuation), + (r'[a-zA-Z_]\w*', Name.Variable), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/apl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/apl.py new file mode 100644 index 0000000..d29133e --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/apl.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.apl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for APL. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['APLLexer'] + + +class APLLexer(RegexLexer): + """ + A simple APL lexer. + + .. versionadded:: 2.0 + """ + name = 'APL' + aliases = ['apl'] + filenames = ['*.apl'] + + tokens = { + 'root': [ + # Whitespace + # ========== + (r'\s+', Text), + # + # Comment + # ======= + # '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog) + (u'[⍝#].*$', Comment.Single), + # + # Strings + # ======= + (r'\'((\'\')|[^\'])*\'', String.Single), + (r'"(("")|[^"])*"', String.Double), # supported by NGN APL + # + # Punctuation + # =========== + # This token type is used for diamond and parenthesis + # but not for bracket and ; (see below) + (u'[⋄◇()]', Punctuation), + # + # Array indexing + # ============== + # Since this token type is very important in APL, it is not included in + # the punctuation token type but rather in the following one + (r'[\[\];]', String.Regex), + # + # Distinguished names + # =================== + # following IBM APL2 standard + (u'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function), + # + # Labels + # ====== + # following IBM APL2 standard + # (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label), + # + # Variables + # ========= + # following IBM APL2 standard + (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable), + # + # Numbers + # ======= + (u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)' + u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?', + Number), + # + # Operators + # ========== + (u'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type + (u'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]', + Operator), + # + # Constant + # ======== + (u'⍬', Name.Constant), + # + # Quad symbol + # =========== + (u'[⎕⍞]', Name.Variable.Global), + # + # Arrows left/right + # ================= + (u'[←→]', Keyword.Declaration), + # + # D-Fn + # ==== + (u'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo), + (r'[{}]', Keyword.Type), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/asm.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/asm.py similarity index 79% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/asm.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/asm.py index 3f67862..7745f9c 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/asm.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/asm.py @@ -5,19 +5,21 @@ Lexers for assembly languages. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer -from pygments.lexers.compiled import DLexer, CppLexer, CLexer +from pygments.lexers.c_cpp import CppLexer, CLexer +from pygments.lexers.d import DLexer from pygments.token import Text, Name, Number, String, Comment, Punctuation, \ - Other, Keyword, Operator + Other, Keyword, Operator -__all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer', - 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'Ca65Lexer'] +__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer', + 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'NasmObjdumpLexer', + 'Ca65Lexer'] class GasLexer(RegexLexer): @@ -31,7 +33,7 @@ class GasLexer(RegexLexer): #: optional Comment or Whitespace string = r'"(\\"|[^"])*"' - char = r'[a-zA-Z$._0-9@-]' + char = r'[\w$.@-]' identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)' number = r'(?:0[xX][a-zA-Z0-9]+|\d+)' @@ -96,18 +98,12 @@ class GasLexer(RegexLexer): return 0.1 -class ObjdumpLexer(RegexLexer): +def _objdump_lexer_tokens(asm_lexer): """ - For the output of 'objdump -dr' + Common objdump lexer tokens to wrap an ASM lexer. """ - name = 'objdump' - aliases = ['objdump'] - filenames = ['*.objdump'] - mimetypes = ['text/x-objdump'] - - hex = r'[0-9A-Za-z]' - - tokens = { + hex_re = r'[0-9A-Za-z]' + return { 'root': [ # File name & format: ('(.*?)(:)( +file format )(.*?)$', @@ -117,33 +113,33 @@ class ObjdumpLexer(RegexLexer): bygroups(Text, Name.Label, Punctuation)), # Function labels # (With offset) - ('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$', + ('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$', bygroups(Number.Hex, Text, Punctuation, Name.Function, Punctuation, Number.Hex, Punctuation)), # (Without offset) - ('('+hex+'+)( )(<)(.*?)(>:)$', + ('('+hex_re+'+)( )(<)(.*?)(>:)$', bygroups(Number.Hex, Text, Punctuation, Name.Function, Punctuation)), # Code line with disassembled instructions - ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$', + ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$', bygroups(Text, Name.Label, Text, Number.Hex, Text, - using(GasLexer))), + using(asm_lexer))), # Code line with ascii - ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$', + ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$', bygroups(Text, Name.Label, Text, Number.Hex, Text, String)), # Continued code line, only raw opcodes without disassembled # instruction - ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$', + ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$', bygroups(Text, Name.Label, Text, Number.Hex)), # Skipped a few bytes (r'\t\.\.\.$', Text), # Relocation line # (With offset) - (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$', + (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$', bygroups(Text, Name.Label, Text, Name.Property, Text, Name.Constant, Punctuation, Number.Hex)), # (Without offset) - (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)$', + (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$', bygroups(Text, Name.Label, Text, Name.Property, Text, Name.Constant)), (r'[^\n]+\n', Other) @@ -151,6 +147,18 @@ class ObjdumpLexer(RegexLexer): } +class ObjdumpLexer(RegexLexer): + """ + For the output of 'objdump -dr' + """ + name = 'objdump' + aliases = ['objdump'] + filenames = ['*.objdump'] + mimetypes = ['text/x-objdump'] + + tokens = _objdump_lexer_tokens(GasLexer) + + class DObjdumpLexer(DelegatingLexer): """ For the output of 'objdump -Sr on compiled D files' @@ -201,7 +209,7 @@ class LlvmLexer(RegexLexer): #: optional Comment or Whitespace string = r'"[^"]*?"' - identifier = r'([-a-zA-Z$._][-a-zA-Z$._0-9]*|' + string + ')' + identifier = r'([-a-zA-Z$._][\w\-$.]*|' + string + ')' tokens = { 'root': [ @@ -212,10 +220,11 @@ class LlvmLexer(RegexLexer): include('keyword'), - (r'%' + identifier, Name.Variable),#Name.Identifier.Local), - (r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global), - (r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous), - (r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous), + (r'%' + identifier, Name.Variable), + (r'@' + identifier, Name.Variable.Global), + (r'%\d+', Name.Variable.Anonymous), + (r'@\d+', Name.Variable.Global), + (r'#\d+', Name.Variable.Global), (r'!' + identifier, Name.Variable), (r'!\d+', Name.Variable.Anonymous), (r'c?' + string, String), @@ -242,17 +251,24 @@ class LlvmLexer(RegexLexer): r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple' r'|datalayout|volatile|nuw|nsw|nnan|ninf|nsz|arcp|fast|exact|inbounds' r'|align|addrspace|section|alias|module|asm|sideeffect|gc|dbg' + r'|linker_private_weak' + r'|attributes|blockaddress|initialexec|localdynamic|localexec' + r'|prefix|unnamed_addr' r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc' r'|arm_aapcscc|arm_aapcs_vfpcc|ptx_device|ptx_kernel' + r'|intel_ocl_bicc|msp430_intrcc|spir_func|spir_kernel' + r'|x86_64_sysvcc|x86_64_win64cc|x86_thiscallcc' r'|cc|c' r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture' r'|byval|nest|readnone|readonly' - r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone' r'|noimplicitfloat|naked' + r'|builtin|cold|nobuiltin|noduplicate|nonlazybind|optnone' + r'|returns_twice|sanitize_address|sanitize_memory|sanitize_thread' + r'|sspstrong|uwtable|returned' r'|type|opaque' @@ -261,24 +277,30 @@ class LlvmLexer(RegexLexer): r'|oeq|one|olt|ogt|ole' r'|oge|ord|uno|ueq|une' r'|x' + r'|acq_rel|acquire|alignstack|atomic|catch|cleanup|filter' + r'|inteldialect|max|min|monotonic|nand|personality|release' + r'|seq_cst|singlethread|umax|umin|unordered|xchg' # instructions r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl' r'|lshr|ashr|and|or|xor|icmp|fcmp' r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui' - r'fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch' + r'|fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch' r'|invoke|unwind|unreachable' + r'|indirectbr|landingpad|resume' r'|malloc|alloca|free|load|store|getelementptr' r'|extractelement|insertelement|shufflevector|getresult' r'|extractvalue|insertvalue' + r'|atomicrmw|cmpxchg|fence' + r')\b', Keyword), # Types - (r'void|float|double|x86_fp80|fp128|ppc_fp128|label|metadata', + (r'void|half|float|double|x86_fp80|fp128|ppc_fp128|label|metadata', Keyword.Type), # Integer types @@ -296,8 +318,8 @@ class NasmLexer(RegexLexer): filenames = ['*.asm', '*.ASM'] mimetypes = ['text/x-nasm'] - identifier = r'[a-zA-Z$._?][a-zA-Z0-9$._?#@~]*' - hexn = r'(?:0[xX][0-9a-fA-F]+|$0[0-9a-fA-F]*|[0-9]+[0-9a-fA-F]*h)' + identifier = r'[a-z$._?][\w$.?#@~]*' + hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)' octn = r'[0-7]+q' binn = r'[01]+b' decn = r'[0-9]+' @@ -316,8 +338,8 @@ class NasmLexer(RegexLexer): flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ - include('whitespace'), (r'^\s*%', Comment.Preproc, 'preproc'), + include('whitespace'), (identifier + ':', Name.Label), (r'(%s)(\s+)(equ)' % identifier, bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration), @@ -331,7 +353,7 @@ class NasmLexer(RegexLexer): (string, String), (hexn, Number.Hex), (octn, Number.Oct), - (binn, Number), + (binn, Number.Bin), (floatn, Number.Float), (decn, Number.Integer), include('punctuation'), @@ -360,13 +382,27 @@ class NasmLexer(RegexLexer): } +class NasmObjdumpLexer(ObjdumpLexer): + """ + For the output of 'objdump -d -M intel'. + + .. versionadded:: 2.0 + """ + name = 'objdump-nasm' + aliases = ['objdump-nasm'] + filenames = ['*.objdump-intel'] + mimetypes = ['text/x-nasm-objdump'] + + tokens = _objdump_lexer_tokens(NasmLexer) + + class Ca65Lexer(RegexLexer): """ For ca65 assembler sources. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ - name = 'ca65' + name = 'ca65 assembler' aliases = ['ca65'] filenames = ['*.s'] @@ -381,13 +417,14 @@ class Ca65Lexer(RegexLexer): r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs' r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor' r'|bit)\b', Keyword), - (r'\.[a-z0-9_]+', Keyword.Pseudo), + (r'\.\w+', Keyword.Pseudo), (r'[-+~*/^&|!<>=]', Operator), (r'"[^"\n]*.', String), (r"'[^'\n]*.", String.Char), (r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex), - (r'\d+|%[01]+', Number.Integer), - (r'[#,.:()=]', Punctuation), + (r'\d+', Number.Integer), + (r'%[01]+', Number.Bin), + (r'[#,.:()=\[\]]', Punctuation), (r'[a-z_.@$][\w.@$]*', Name), ] } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/automation.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/automation.py new file mode 100644 index 0000000..a66ceff --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/automation.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.automation + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for automation scripting languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, combined +from pygments.token import Text, Comment, Operator, Name, String, \ + Number, Punctuation, Generic + +__all__ = ['AutohotkeyLexer', 'AutoItLexer'] + + +class AutohotkeyLexer(RegexLexer): + """ + For `autohotkey `_ source code. + + .. versionadded:: 1.4 + """ + name = 'autohotkey' + aliases = ['ahk', 'autohotkey'] + filenames = ['*.ahk', '*.ahkl'] + mimetypes = ['text/x-autohotkey'] + + tokens = { + 'root': [ + (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'), + (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'), + (r'\s+;.*?$', Comment.Singleline), + (r'^;.*?$', Comment.Singleline), + (r'[]{}(),;[]', Punctuation), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable), + (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), + include('commands'), + include('labels'), + include('builtInFunctions'), + include('builtInVariables'), + (r'"', String, combined('stringescape', 'dqs')), + include('numbers'), + (r'[a-zA-Z_#@$][\w#@$]*', Name), + (r'\\|\'', Text), + (r'\`([,%`abfnrtv\-+;])', String.Escape), + include('garbage'), + ], + 'incomment': [ + (r'^\s*\*/', Comment.Multiline, '#pop'), + (r'[^*/]', Comment.Multiline), + (r'[*/]', Comment.Multiline) + ], + 'incontinuation': [ + (r'^\s*\)', Generic, '#pop'), + (r'[^)]', Generic), + (r'[)]', Generic), + ], + 'commands': [ + (r'(?i)^(\s*)(global|local|static|' + r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|' + r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|' + r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|' + r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|' + r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|' + r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|' + r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|' + r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|' + r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|' + r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|' + r'ControlSendRaw|ControlSetText|CoordMode|Critical|' + r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|' + r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|' + r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|' + r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|' + r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|' + r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|' + r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|' + r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|' + r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|' + r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|' + r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|' + r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|' + r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|' + r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|' + r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|' + r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|' + r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|' + r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|' + r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|' + r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|' + r'SetBatchLines|SetCapslockState|SetControlDelay|' + r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|' + r'SetMouseDelay|SetNumlockState|SetScrollLockState|' + r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|' + r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|' + r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|' + r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|' + r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|' + r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|' + r'StringReplace|StringRight|StringSplit|StringTrimLeft|' + r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|' + r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|' + r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|' + r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|' + r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|' + r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|' + r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|' + r'WinWait)\b', bygroups(Text, Name.Builtin)), + ], + 'builtInFunctions': [ + (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|' + r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|' + r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|' + r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|' + r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|' + r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|' + r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|' + r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|' + r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|' + r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|' + r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|' + r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|' + r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|' + r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|' + r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|' + r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b', + Name.Function), + ], + 'builtInVariables': [ + (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|' + r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|' + r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|' + r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|' + r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|' + r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|' + r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|' + r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|' + r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|' + r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|' + r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|' + r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|' + r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|' + r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|' + r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|' + r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|' + r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|' + r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|' + r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|' + r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|' + r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|' + r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|' + r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|' + r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|' + r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|' + r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|' + r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|' + r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|' + r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|' + r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b', + Name.Variable), + ], + 'labels': [ + # hotkeys and labels + # technically, hotkey names are limited to named keys and buttons + (r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)), + (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)), + ], + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+[eE][+-]?[0-9]+', Number.Float), + (r'0\d+', Number.Oct), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+', Number.Integer) + ], + 'stringescape': [ + (r'\"\"|\`([,%`abfnrtv])', String.Escape), + ], + 'strings': [ + (r'[^"\n]+', String), + ], + 'dqs': [ + (r'"', String, '#pop'), + include('strings') + ], + 'garbage': [ + (r'[^\S\n]', Text), + # (r'.', Text), # no cheating + ], + } + + +class AutoItLexer(RegexLexer): + """ + For `AutoIt `_ files. + + AutoIt is a freeware BASIC-like scripting language + designed for automating the Windows GUI and general scripting + + .. versionadded:: 1.6 + """ + name = 'AutoIt' + aliases = ['autoit'] + filenames = ['*.au3'] + mimetypes = ['text/x-autoit'] + + # Keywords, functions, macros from au3.keywords.properties + # which can be found in AutoIt installed directory, e.g. + # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties + + keywords = """\ + #include-once #include #endregion #forcedef #forceref #region + and byref case continueloop dim do else elseif endfunc endif + endselect exit exitloop for func global + if local next not or return select step + then to until wend while exit""".split() + + functions = """\ + abs acos adlibregister adlibunregister asc ascw asin assign atan + autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen + binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor + blockinput break call cdtray ceiling chr chrw clipget clipput consoleread + consolewrite consolewriteerror controlclick controlcommand controldisable + controlenable controlfocus controlgetfocus controlgethandle controlgetpos + controlgettext controlhide controllistview controlmove controlsend + controlsettext controlshow controltreeview cos dec dircopy dircreate + dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree + dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate + dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata + drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype + drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree + drivespacetotal drivestatus envget envset envupdate eval execute exp + filechangedir fileclose filecopy filecreatentfslink filecreateshortcut + filedelete fileexists filefindfirstfile filefindnextfile fileflush + filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut + filegetshortname filegetsize filegettime filegetversion fileinstall filemove + fileopen fileopendialog fileread filereadline filerecycle filerecycleempty + filesavedialog fileselectfolder filesetattrib filesetpos filesettime + filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi + guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo + guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy + guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon + guictrlcreateinput guictrlcreatelabel guictrlcreatelist + guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu + guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj + guictrlcreatepic guictrlcreateprogress guictrlcreateradio + guictrlcreateslider guictrlcreatetab guictrlcreatetabitem + guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown + guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg + guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy + guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata + guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic + guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos + guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete + guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators + guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon + guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset + httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize + inetread inidelete iniread inireadsection inireadsectionnames + inirenamesection iniwrite iniwritesection inputbox int isadmin isarray + isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword + isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag + mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox + number objcreate objcreateinterface objevent objevent objget objname + onautoitexitregister onautoitexitunregister opt ping pixelchecksum + pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists + processgetstats processlist processsetpriority processwait processwaitclose + progressoff progresson progressset ptr random regdelete regenumkey + regenumval regread regwrite round run runas runaswait runwait send + sendkeepactive seterror setextended shellexecute shellexecutewait shutdown + sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton + sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread + string stringaddcr stringcompare stringformat stringfromasciiarray + stringinstr stringisalnum stringisalpha stringisascii stringisdigit + stringisfloat stringisint stringislower stringisspace stringisupper + stringisxdigit stringleft stringlen stringlower stringmid stringregexp + stringregexpreplace stringreplace stringright stringsplit stringstripcr + stringstripws stringtoasciiarray stringtobinary stringtrimleft + stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect + tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff + timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete + trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent + trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent + traysetpauseicon traysetstate traysettooltip traytip ubound udpbind + udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype + winactivate winactive winclose winexists winflash wingetcaretpos + wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess + wingetstate wingettext wingettitle winkill winlist winmenuselectitem + winminimizeall winminimizeallundo winmove winsetontop winsetstate + winsettitle winsettrans winwait winwaitactive winwaitclose + winwaitnotactive""".split() + + macros = """\ + @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion + @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec + @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir + @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error + @exitcode @exitmethod @extended @favoritescommondir @favoritesdir + @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid + @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour + @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf + @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang + @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype + @osversion @programfilesdir @programscommondir @programsdir @scriptdir + @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir + @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide + @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault + @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna + @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir + @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday + @windowsdir @workingdir @yday @year""".split() + + tokens = { + 'root': [ + (r';.*\n', Comment.Single), + (r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline), + (r'[\[\]{}(),;]', Punctuation), + (r'(and|or|not)\b', Operator.Word), + (r'[$|@][a-zA-Z_]\w*', Name.Variable), + (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), + include('commands'), + include('labels'), + include('builtInFunctions'), + include('builtInMarcros'), + (r'"', String, combined('stringescape', 'dqs')), + include('numbers'), + (r'[a-zA-Z_#@$][\w#@$]*', Name), + (r'\\|\'', Text), + (r'\`([,%`abfnrtv\-+;])', String.Escape), + (r'_\n', Text), # Line continuation + include('garbage'), + ], + 'commands': [ + (r'(?i)(\s*)(%s)\b' % '|'.join(keywords), + bygroups(Text, Name.Builtin)), + ], + 'builtInFunctions': [ + (r'(?i)(%s)\b' % '|'.join(functions), + Name.Function), + ], + 'builtInMarcros': [ + (r'(?i)(%s)\b' % '|'.join(macros), + Name.Variable.Global), + ], + 'labels': [ + # sendkeys + (r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)), + ], + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+[eE][+-]?[0-9]+', Number.Float), + (r'0\d+', Number.Oct), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+', Number.Integer) + ], + 'stringescape': [ + (r'\"\"|\`([,%`abfnrtv])', String.Escape), + ], + 'strings': [ + (r'[^"\n]+', String), + ], + 'dqs': [ + (r'"', String, '#pop'), + include('strings') + ], + 'garbage': [ + (r'[^\S\n]', Text), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/basic.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/basic.py new file mode 100644 index 0000000..0ec459e --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/basic.py @@ -0,0 +1,500 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.basic + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for BASIC like languages (other than VB.net). + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, default, words, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer', + 'QBasicLexer'] + + +class BlitzMaxLexer(RegexLexer): + """ + For `BlitzMax `_ source code. + + .. versionadded:: 1.4 + """ + + name = 'BlitzMax' + aliases = ['blitzmax', 'bmax'] + filenames = ['*.bmx'] + mimetypes = ['text/x-bmx'] + + bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b' + bmax_sktypes = r'@{1,2}|[!#$%]' + bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b' + bmax_name = r'[a-z_]\w*' + bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)' + r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \ + (bmax_name, bmax_sktypes, bmax_lktypes, bmax_name) + bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])' + + flags = re.MULTILINE | re.IGNORECASE + tokens = { + 'root': [ + # Text + (r'[ \t]+', Text), + (r'\.\.\n', Text), # Line continuation + # Comments + (r"'.*?\n", Comment.Single), + (r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline), + # Data types + ('"', String.Double, 'string'), + # Numbers + (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), + (r'\.[0-9]*(?!\.)', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\$[0-9a-f]+', Number.Hex), + (r'\%[10]+', Number.Bin), + # Other + (r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' % + (bmax_vopwords), Operator), + (r'[(),.:\[\]]', Punctuation), + (r'(?:#[\w \t]*)', Name.Label), + (r'(?:\?[\w \t]*)', Comment.Preproc), + # Identifiers + (r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name), + bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)), + (r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' % + (bmax_name, bmax_name), + bygroups(Keyword.Reserved, Text, Keyword.Namespace)), + (bmax_func, bygroups(Name.Function, Text, Keyword.Type, + Operator, Text, Punctuation, Text, + Keyword.Type, Name.Class, Text, + Keyword.Type, Text, Punctuation)), + (bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator, + Text, Punctuation, Text, Keyword.Type, + Name.Class, Text, Keyword.Type)), + (r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name), + bygroups(Keyword.Reserved, Text, Name.Class)), + # Keywords + (r'\b(Ptr)\b', Keyword.Type), + (r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant), + (r'\b(Local|Global|Const|Field)\b', Keyword.Declaration), + (words(( + 'TNullMethodException', 'TNullFunctionException', + 'TNullObjectException', 'TArrayBoundsException', + 'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception), + (words(( + 'Strict', 'SuperStrict', 'Module', 'ModuleInfo', + 'End', 'Return', 'Continue', 'Exit', 'Public', 'Private', + 'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max', + 'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen', + 'Framework', 'Include', 'Import', 'Extern', 'EndExtern', + 'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod', + 'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf', + 'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile', + 'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect', + 'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData', + 'RestoreData'), prefix=r'\b', suffix=r'\b'), + Keyword.Reserved), + # Final resolve (for variable names and such) + (r'(%s)' % (bmax_name), Name.Variable), + ], + 'string': [ + (r'""', String.Double), + (r'"C?', String.Double, '#pop'), + (r'[^"]+', String.Double), + ], + } + + +class BlitzBasicLexer(RegexLexer): + """ + For `BlitzBasic `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'BlitzBasic' + aliases = ['blitzbasic', 'b3d', 'bplus'] + filenames = ['*.bb', '*.decls'] + mimetypes = ['text/x-bb'] + + bb_sktypes = r'@{1,2}|[#$%]' + bb_name = r'[a-z]\w*' + bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \ + (bb_name, bb_sktypes, bb_name) + + flags = re.MULTILINE | re.IGNORECASE + tokens = { + 'root': [ + # Text + (r'[ \t]+', Text), + # Comments + (r";.*?\n", Comment.Single), + # Data types + ('"', String.Double, 'string'), + # Numbers + (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), + (r'\.[0-9]+(?!\.)', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\$[0-9a-f]+', Number.Hex), + (r'\%[10]+', Number.Bin), + # Other + (words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not', + 'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str', + 'First', 'Last', 'Before', 'After'), + prefix=r'\b', suffix=r'\b'), + Operator), + (r'([+\-*/~=<>^])', Operator), + (r'[(),:\[\]\\]', Punctuation), + (r'\.([ \t]*)(%s)' % bb_name, Name.Label), + # Identifiers + (r'\b(New)\b([ \t]+)(%s)' % (bb_name), + bygroups(Keyword.Reserved, Text, Name.Class)), + (r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name), + bygroups(Keyword.Reserved, Text, Name.Label)), + (r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name), + bygroups(Operator, Text, Punctuation, Text, Name.Class)), + (r'\b%s\b([ \t]*)(\()' % bb_var, + bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation, + Text, Name.Class, Text, Punctuation)), + (r'\b(Function)\b([ \t]+)%s' % bb_var, + bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type, + Text, Punctuation, Text, Name.Class)), + (r'\b(Type)([ \t]+)(%s)' % (bb_name), + bygroups(Keyword.Reserved, Text, Name.Class)), + # Keywords + (r'\b(Pi|True|False|Null)\b', Keyword.Constant), + (r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration), + (words(( + 'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert', + 'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf', + 'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend', + 'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', + 'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'), + Keyword.Reserved), + # Final resolve (for variable names and such) + # (r'(%s)' % (bb_name), Name.Variable), + (bb_var, bygroups(Name.Variable, Text, Keyword.Type, + Text, Punctuation, Text, Name.Class)), + ], + 'string': [ + (r'""', String.Double), + (r'"C?', String.Double, '#pop'), + (r'[^"]+', String.Double), + ], + } + + +class MonkeyLexer(RegexLexer): + """ + For + `Monkey `_ + source code. + + .. versionadded:: 1.6 + """ + + name = 'Monkey' + aliases = ['monkey'] + filenames = ['*.monkey'] + mimetypes = ['text/x-monkey'] + + name_variable = r'[a-z_]\w*' + name_function = r'[A-Z]\w*' + name_constant = r'[A-Z_][A-Z0-9_]*' + name_class = r'[A-Z]\w*' + name_module = r'[a-z0-9_]*' + + keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)' + # ? == Bool // % == Int // # == Float // $ == String + keyword_type_special = r'[?%#$]' + + flags = re.MULTILINE + + tokens = { + 'root': [ + # Text + (r'\s+', Text), + # Comments + (r"'.*", Comment), + (r'(?i)^#rem\b', Comment.Multiline, 'comment'), + # preprocessor directives + (r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc), + # preprocessor variable (any line starting with '#' that is not a directive) + (r'^#', Comment.Preproc, 'variables'), + # String + ('"', String.Double, 'string'), + # Numbers + (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), + (r'\.[0-9]+(?!\.)', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\$[0-9a-fA-Z]+', Number.Hex), + (r'\%[10]+', Number.Bin), + # Native data types + (r'\b%s\b' % keyword_type, Keyword.Type), + # Exception handling + (r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved), + (r'Throwable', Name.Exception), + # Builtins + (r'(?i)\b(?:Null|True|False)\b', Name.Builtin), + (r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo), + (r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant), + # Keywords + (r'(?i)^(Import)(\s+)(.*)(\n)', + bygroups(Keyword.Namespace, Text, Name.Namespace, Text)), + (r'(?i)^Strict\b.*\n', Keyword.Reserved), + (r'(?i)(Const|Local|Global|Field)(\s+)', + bygroups(Keyword.Declaration, Text), 'variables'), + (r'(?i)(New|Class|Interface|Extends|Implements)(\s+)', + bygroups(Keyword.Reserved, Text), 'classname'), + (r'(?i)(Function|Method)(\s+)', + bygroups(Keyword.Reserved, Text), 'funcname'), + (r'(?i)(?:End|Return|Public|Private|Extern|Property|' + r'Final|Abstract)\b', Keyword.Reserved), + # Flow Control stuff + (r'(?i)(?:If|Then|Else|ElseIf|EndIf|' + r'Select|Case|Default|' + r'While|Wend|' + r'Repeat|Until|Forever|' + r'For|To|Until|Step|EachIn|Next|' + r'Exit|Continue)\s+', Keyword.Reserved), + # not used yet + (r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved), + # Array + (r'[\[\]]', Punctuation), + # Other + (r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator), + (r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word), + (r'[(){}!#,.:]', Punctuation), + # catch the rest + (r'%s\b' % name_constant, Name.Constant), + (r'%s\b' % name_function, Name.Function), + (r'%s\b' % name_variable, Name.Variable), + ], + 'funcname': [ + (r'(?i)%s\b' % name_function, Name.Function), + (r':', Punctuation, 'classname'), + (r'\s+', Text), + (r'\(', Punctuation, 'variables'), + (r'\)', Punctuation, '#pop') + ], + 'classname': [ + (r'%s\.' % name_module, Name.Namespace), + (r'%s\b' % keyword_type, Keyword.Type), + (r'%s\b' % name_class, Name.Class), + # array (of given size) + (r'(\[)(\s*)(\d*)(\s*)(\])', + bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)), + # generics + (r'\s+(?!<)', Text, '#pop'), + (r'<', Punctuation, '#push'), + (r'>', Punctuation, '#pop'), + (r'\n', Text, '#pop'), + default('#pop') + ], + 'variables': [ + (r'%s\b' % name_constant, Name.Constant), + (r'%s\b' % name_variable, Name.Variable), + (r'%s' % keyword_type_special, Keyword.Type), + (r'\s+', Text), + (r':', Punctuation, 'classname'), + (r',', Punctuation, '#push'), + default('#pop') + ], + 'string': [ + (r'[^"~]+', String.Double), + (r'~q|~n|~r|~t|~z|~~', String.Escape), + (r'"', String.Double, '#pop'), + ], + 'comment': [ + (r'(?i)^#rem.*?', Comment.Multiline, "#push"), + (r'(?i)^#end.*?', Comment.Multiline, "#pop"), + (r'\n', Comment.Multiline), + (r'.+', Comment.Multiline), + ], + } + + +class CbmBasicV2Lexer(RegexLexer): + """ + For CBM BASIC V2 sources. + + .. versionadded:: 1.6 + """ + name = 'CBM BASIC V2' + aliases = ['cbmbas'] + filenames = ['*.bas'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'rem.*\n', Comment.Single), + (r'\s+', Text), + (r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont' + r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?' + r'|list|clr|cmd|open|close|get#?', Keyword.Reserved), + (r'data|restore|dim|let|def|fn', Keyword.Declaration), + (r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn' + r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin), + (r'[-+*/^<>=]', Operator), + (r'not|and|or', Operator.Word), + (r'"[^"\n]*.', String), + (r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float), + (r'[(),:;]', Punctuation), + (r'\w+[$%]?', Name), + ] + } + + def analyse_text(self, text): + # if it starts with a line number, it shouldn't be a "modern" Basic + # like VB.net + if re.match(r'\d+', text): + return 0.2 + + +class QBasicLexer(RegexLexer): + """ + For + `QBasic `_ + source code. + + .. versionadded:: 2.0 + """ + + name = 'QBasic' + aliases = ['qbasic', 'basic'] + filenames = ['*.BAS', '*.bas'] + mimetypes = ['text/basic'] + + declarations = ('DATA', 'LET') + + functions = ( + 'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG', + 'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI', + 'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV', + 'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE', + 'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT', + 'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF', + 'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$', + 'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY', + 'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD', + 'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC', + 'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN', + 'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR', + 'VARPTR$', 'VARSEG' + ) + + metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC') + + operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR') + + statements = ( + 'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE', + 'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR', + 'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA', + 'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT', + 'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP', + 'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD', + 'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO', + 'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY', + 'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE', + 'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME', + 'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY', + 'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM', + 'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN', + 'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING', + 'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM', + 'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN', + 'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP', + 'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM', + 'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK', + 'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE' + ) + + keywords = ( + 'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY', + 'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF', + 'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD', + 'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE', + 'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND' + ) + + tokens = { + 'root': [ + (r'\n+', Text), + (r'\s+', Text.Whitespace), + (r'^(\s*)(\d*)(\s*)(REM .*)$', + bygroups(Text.Whitespace, Name.Label, Text.Whitespace, + Comment.Single)), + (r'^(\s*)(\d+)(\s*)', + bygroups(Text.Whitespace, Name.Label, Text.Whitespace)), + (r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global), + (r'(?=[^"]*)\'.*$', Comment.Single), + (r'"[^\n"]*"', String.Double), + (r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)', + bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)), + (r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)', + bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, + Text.Whitespace, Name)), + (r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)', + bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, + Text.Whitespace, Name.Variable.Global)), + (r'(DIM)(\s+)([^\s(]+)', + bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)), + (r'^(\s*)([a-zA-Z_]+)(\s*)(\=)', + bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace, + Operator)), + (r'(GOTO|GOSUB)(\s+)(\w+\:?)', + bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)), + (r'(SUB)(\s+)(\w+\:?)', + bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)), + include('declarations'), + include('functions'), + include('metacommands'), + include('operators'), + include('statements'), + include('keywords'), + (r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global), + (r'[a-zA-Z_]\w*\:', Name.Label), + (r'\-?\d*\.\d+[@|#]?', Number.Float), + (r'\-?\d+[@|#]', Number.Float), + (r'\-?\d+#?', Number.Integer.Long), + (r'\-?\d+#?', Number.Integer), + (r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator), + (r'[\[\]{}(),;]', Punctuation), + (r'[\w]+', Name.Variable.Global), + ], + # can't use regular \b because of X$() + # XXX: use words() here + 'declarations': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)), + Keyword.Declaration), + ], + 'functions': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)), + Keyword.Reserved), + ], + 'metacommands': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)), + Keyword.Constant), + ], + 'operators': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word), + ], + 'statements': [ + (r'\b(%s)\b' % '|'.join(map(re.escape, statements)), + Keyword.Reserved), + ], + 'keywords': [ + (r'\b(%s)\b' % '|'.join(keywords), Keyword), + ], + } + + def analyse_text(text): + if '$DYNAMIC' in text or '$STATIC' in text: + return 0.9 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/business.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/business.py new file mode 100644 index 0000000..11f0093 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/business.py @@ -0,0 +1,592 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.business + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for "business-oriented" languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +from pygments.lexers._openedge_builtins import OPENEDGEKEYWORDS + +__all__ = ['CobolLexer', 'CobolFreeformatLexer', 'ABAPLexer', 'OpenEdgeLexer', + 'GoodDataCLLexer', 'MaqlLexer'] + + +class CobolLexer(RegexLexer): + """ + Lexer for OpenCOBOL code. + + .. versionadded:: 1.6 + """ + name = 'COBOL' + aliases = ['cobol'] + filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY'] + mimetypes = ['text/x-cobol'] + flags = re.IGNORECASE | re.MULTILINE + + # Data Types: by PICTURE and USAGE + # Operators: **, *, +, -, /, <, >, <=, >=, =, <> + # Logical (?): NOT, AND, OR + + # Reserved words: + # http://opencobol.add1tocobol.com/#reserved-words + # Intrinsics: + # http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions + + tokens = { + 'root': [ + include('comment'), + include('strings'), + include('core'), + include('nums'), + (r'[a-z0-9]([\w\-]*[a-z0-9]+)?', Name.Variable), + # (r'[\s]+', Text), + (r'[ \t]+', Text), + ], + 'comment': [ + (r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment), + ], + 'core': [ + # Figurative constants + (r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?' + r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)' + r'\s*($|(?=[^0-9a-z_\-]))', + Name.Constant), + + # Reserved words STATEMENTS and other bolds + (words(( + 'ACCEPT', 'ADD', 'ALLOCATE', 'CALL', 'CANCEL', 'CLOSE', 'COMPUTE', + 'CONFIGURATION', 'CONTINUE', 'DATA', 'DELETE', 'DISPLAY', 'DIVIDE', + 'DIVISION', 'ELSE', 'END', 'END-ACCEPT', + 'END-ADD', 'END-CALL', 'END-COMPUTE', 'END-DELETE', 'END-DISPLAY', + 'END-DIVIDE', 'END-EVALUATE', 'END-IF', 'END-MULTIPLY', 'END-OF-PAGE', + 'END-PERFORM', 'END-READ', 'END-RETURN', 'END-REWRITE', 'END-SEARCH', + 'END-START', 'END-STRING', 'END-SUBTRACT', 'END-UNSTRING', 'END-WRITE', + 'ENVIRONMENT', 'EVALUATE', 'EXIT', 'FD', 'FILE', 'FILE-CONTROL', 'FOREVER', + 'FREE', 'GENERATE', 'GO', 'GOBACK', 'IDENTIFICATION', 'IF', 'INITIALIZE', + 'INITIATE', 'INPUT-OUTPUT', 'INSPECT', 'INVOKE', 'I-O-CONTROL', 'LINKAGE', + 'LOCAL-STORAGE', 'MERGE', 'MOVE', 'MULTIPLY', 'OPEN', 'PERFORM', + 'PROCEDURE', 'PROGRAM-ID', 'RAISE', 'READ', 'RELEASE', 'RESUME', + 'RETURN', 'REWRITE', 'SCREEN', 'SD', 'SEARCH', 'SECTION', 'SET', + 'SORT', 'START', 'STOP', 'STRING', 'SUBTRACT', 'SUPPRESS', + 'TERMINATE', 'THEN', 'UNLOCK', 'UNSTRING', 'USE', 'VALIDATE', + 'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^0-9a-z_\-]))', + suffix=r'\s*($|(?=[^0-9a-z_\-]))'), + Keyword.Reserved), + + # Reserved words + (words(( + 'ACCESS', 'ADDRESS', 'ADVANCING', 'AFTER', 'ALL', + 'ALPHABET', 'ALPHABETIC', 'ALPHABETIC-LOWER', 'ALPHABETIC-UPPER', + 'ALPHANUMERIC', 'ALPHANUMERIC-EDITED', 'ALSO', 'ALTER', 'ALTERNATE' + 'ANY', 'ARE', 'AREA', 'AREAS', 'ARGUMENT-NUMBER', 'ARGUMENT-VALUE', 'AS', + 'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC', 'AUTOTERMINATE', + 'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL', + 'BLANK', 'BLINK', 'BLOCK', 'BOTTOM', 'BY', 'BYTE-LENGTH', 'CHAINING', + 'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL', 'COLLATING', + 'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE', 'COMMIT', 'COMMON', + 'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL', + 'CONTROLS', 'CONVERTING', 'COPY', 'CORR', 'CORRESPONDING', 'COUNT', 'CRT', + 'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE', 'DEBUGGING', + 'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED', + 'DELIMITER', 'DEPENDING', 'DESCENDING', 'DETAIL', 'DISK', + 'DOWN', 'DUPLICATES', 'DYNAMIC', 'EBCDIC', + 'ENTRY', 'ENVIRONMENT-NAME', 'ENVIRONMENT-VALUE', 'EOL', 'EOP', + 'EOS', 'ERASE', 'ERROR', 'ESCAPE', 'EXCEPTION', + 'EXCLUSIVE', 'EXTEND', 'EXTERNAL', + 'FILE-ID', 'FILLER', 'FINAL', 'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT', + 'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL', 'FUNCTION', + 'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP', + 'HEADING', 'HIGHLIGHT', 'I-O', 'ID', + 'IGNORE', 'IGNORING', 'IN', 'INDEX', 'INDEXED', 'INDICATE', + 'INITIAL', 'INITIALIZED', 'INPUT', + 'INTO', 'INTRINSIC', 'INVALID', 'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL', + 'LAST', 'LEADING', 'LEFT', 'LENGTH', 'LIMIT', 'LIMITS', 'LINAGE', + 'LINAGE-COUNTER', 'LINE', 'LINES', 'LOCALE', 'LOCK', + 'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE', + 'MULTIPLE', 'NATIONAL', 'NATIONAL-EDITED', 'NATIVE', + 'NEGATIVE', 'NEXT', 'NO', 'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC', + 'NUMERIC-EDITED', 'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY', + 'OPTIONAL', 'ORDER', 'ORGANIZATION', 'OTHER', 'OUTPUT', 'OVERFLOW', + 'OVERLINE', 'PACKED-DECIMAL', 'PADDING', 'PAGE', 'PARAGRAPH', + 'PLUS', 'POINTER', 'POSITION', 'POSITIVE', 'PRESENT', 'PREVIOUS', + 'PRINTER', 'PRINTING', 'PROCEDURE-POINTER', 'PROCEDURES', + 'PROCEED', 'PROGRAM', 'PROGRAM-POINTER', 'PROMPT', 'QUOTE', + 'QUOTES', 'RANDOM', 'RD', 'RECORD', 'RECORDING', 'RECORDS', 'RECURSIVE', + 'REDEFINES', 'REEL', 'REFERENCE', 'RELATIVE', 'REMAINDER', 'REMOVAL', + 'RENAMES', 'REPLACING', 'REPORT', 'REPORTING', 'REPORTS', 'REPOSITORY', + 'REQUIRED', 'RESERVE', 'RETURNING', 'REVERSE-VIDEO', 'REWIND', + 'RIGHT', 'ROLLBACK', 'ROUNDED', 'RUN', 'SAME', 'SCROLL', + 'SECURE', 'SEGMENT-LIMIT', 'SELECT', 'SENTENCE', 'SEPARATE', + 'SEQUENCE', 'SEQUENTIAL', 'SHARING', 'SIGN', 'SIGNED', 'SIGNED-INT', + 'SIGNED-LONG', 'SIGNED-SHORT', 'SIZE', 'SORT-MERGE', 'SOURCE', + 'SOURCE-COMPUTER', 'SPECIAL-NAMES', 'STANDARD', + 'STANDARD-1', 'STANDARD-2', 'STATUS', 'SUM', + 'SYMBOLIC', 'SYNC', 'SYNCHRONIZED', 'TALLYING', 'TAPE', + 'TEST', 'THROUGH', 'THRU', 'TIME', 'TIMES', 'TO', 'TOP', 'TRAILING', + 'TRANSFORM', 'TYPE', 'UNDERLINE', 'UNIT', 'UNSIGNED', + 'UNSIGNED-INT', 'UNSIGNED-LONG', 'UNSIGNED-SHORT', 'UNTIL', 'UP', + 'UPDATE', 'UPON', 'USAGE', 'USING', 'VALUE', 'VALUES', 'VARYING', + 'WAIT', 'WHEN', 'WITH', 'WORDS', 'YYYYDDD', 'YYYYMMDD'), + prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'), + Keyword.Pseudo), + + # inactive reserved words + (words(( + 'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE', 'B-AND', + 'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER', 'CF', 'CH', 'CHAIN', 'CLASS-ID', + 'CLASSIFICATION', 'COMMUNICATION', 'CONDITION', 'DATA-POINTER', + 'DESTINATION', 'DISABLE', 'EC', 'EGI', 'EMI', 'ENABLE', 'END-RECEIVE', + 'ENTRY-CONVENTION', 'EO', 'ESI', 'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY', + 'FLOAT-BINARY-16', 'FLOAT-BINARY-34', 'FLOAT-BINARY-7', + 'FLOAT-DECIMAL-16', 'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT', + 'FUNCTION-POINTER', 'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY', + 'INHERITS', 'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE', + 'LC_CTYPE', 'LC_MESSAGES', 'LC_MONETARY', 'LC_NUMERIC', 'LC_TIME', + 'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE', 'NORMAL', + 'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE', 'PAGE-COUNTER', 'PF', 'PH', + 'PROPERTY', 'PROTOTYPE', 'PURGE', 'QUEUE', 'RAISE', 'RAISING', 'RECEIVE', + 'RELATION', 'REPLACE', 'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY', + 'RF', 'RH', 'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT', 'STEP', + 'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3', 'SUPER', 'SYMBOL', + 'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT', 'TYPEDEF', 'UCS-4', 'UNIVERSAL', + 'USER-DEFAULT', 'UTF-16', 'UTF-8', 'VAL-STATUS', 'VALID', 'VALIDATE', + 'VALIDATE-STATUS'), + prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'), + Error), + + # Data Types + (r'(^|(?<=[^0-9a-z_\-]))' + r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|' + r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|' + r'BINARY-C-LONG|' + r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|' + r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type), + + # Operators + (r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator), + + # (r'(::)', Keyword.Declaration), + + (r'([(),;:&%.])', Punctuation), + + # Intrinsics + (r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|' + r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|' + r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|' + r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|' + r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|' + r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG(?:10)?|' + r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|' + r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|' + r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|' + r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|' + r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|' + r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*' + r'($|(?=[^0-9a-z_\-]))', Name.Function), + + # Booleans + (r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin), + # Comparing Operators + (r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|' + r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word), + ], + + # \"[^\"\n]*\"|\'[^\'\n]*\' + 'strings': [ + # apparently strings can be delimited by EOL if they are continued + # in the next line + (r'"[^"\n]*("|\n)', String.Double), + (r"'[^'\n]*('|\n)", String.Single), + ], + + 'nums': [ + (r'\d+(\s*|\.$|$)', Number.Integer), + (r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float), + (r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float), + ], + } + + +class CobolFreeformatLexer(CobolLexer): + """ + Lexer for Free format OpenCOBOL code. + + .. versionadded:: 1.6 + """ + name = 'COBOLFree' + aliases = ['cobolfree'] + filenames = ['*.cbl', '*.CBL'] + mimetypes = [] + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'comment': [ + (r'(\*>.*\n|^\w*\*.*$)', Comment), + ], + } + + +class ABAPLexer(RegexLexer): + """ + Lexer for ABAP, SAP's integrated language. + + .. versionadded:: 1.1 + """ + name = 'ABAP' + aliases = ['abap'] + filenames = ['*.abap'] + mimetypes = ['text/x-abap'] + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'common': [ + (r'\s+', Text), + (r'^\*.*$', Comment.Single), + (r'\".*?\n', Comment.Single), + ], + 'variable-names': [ + (r'<\S+>', Name.Variable), + (r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable), + ], + 'root': [ + include('common'), + # function calls + (r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)', + bygroups(Keyword, Text, Name.Function)), + (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|' + r'TRANSACTION|TRANSFORMATION))\b', + Keyword), + (r'(FORM|PERFORM)(\s+)(\w+)', + bygroups(Keyword, Text, Name.Function)), + (r'(PERFORM)(\s+)(\()(\w+)(\))', + bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation)), + (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)', + bygroups(Keyword, Text, Name.Function, Text, Keyword)), + + # method implementation + (r'(METHOD)(\s+)([\w~]+)', + bygroups(Keyword, Text, Name.Function)), + # method calls + (r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)', + bygroups(Text, Name.Variable, Operator, Name.Function)), + # call methodnames returning style + (r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function), + + # keywords with dashes in them. + # these need to be first, because for instance the -ID part + # of MESSAGE-ID wouldn't get highlighted if MESSAGE was + # first in the list of keywords. + (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|' + r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|' + r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|' + r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|' + r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|' + r'INTERFACE-POOL|INVERTED-DATE|' + r'LOAD-OF-PROGRAM|LOG-POINT|' + r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|' + r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|' + r'OUTPUT-LENGTH|PRINT-CONTROL|' + r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|' + r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|' + r'TYPE-POOL|TYPE-POOLS' + r')\b', Keyword), + + # keyword kombinations + (r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|' + r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|' + r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|' + r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|' + r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|' + r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|' + r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|' + r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|' + r'RUN\s+TIME|TIME\s+(STAMP)?)?|' + r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|' + r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|' + r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|' + r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|' + r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|' + r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|' + r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|' + r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|' + r'DATABASE|SHARED\s+(MEMORY|BUFFER))|' + r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|' + r'FREE\s(MEMORY|OBJECT)?|' + r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|' + r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|' + r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|' + r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|' + r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|' + r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|' + r'SCREEN)|COMMENT|FUNCTION\s+KEY|' + r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|' + r'SKIP|ULINE)|' + r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|' + r'TO LIST-PROCESSING|TO TRANSACTION)' + r'(ENDING|STARTING)\s+AT|' + r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|' + r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|' + r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|' + r'(BEGIN|END)\s+OF|' + r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|' + r'COMPARING(\s+ALL\s+FIELDS)?|' + r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|' + r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|' + r'END-OF-(DEFINITION|PAGE|SELECTION)|' + r'WITH\s+FRAME(\s+TITLE)|' + + # simple kombinations + r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|' + r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|' + r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|' + r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|' + r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|' + r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|' + r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword), + + # single word keywords. + (r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|' + r'ASSIGN(ING)?|AT(\s+FIRST)?|' + r'BACK|BLOCK|BREAK-POINT|' + r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|' + r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|' + r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|' + r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|' + r'DETAIL|DIRECTORY|DIVIDE|DO|' + r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|' + r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|' + r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|' + r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|' + r'HIDE|' + r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|' + r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|' + r'LENGTH|LINES|LOAD|LOCAL|' + r'JOIN|' + r'KEY|' + r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|' + r'NODES|' + r'OBLIGATORY|OF|OFF|ON|OVERLAY|' + r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|' + r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|' + r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|' + r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|' + r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|' + r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|' + r'ULINE|UNDER|UNPACK|UPDATE|USING|' + r'VALUE|VALUES|VIA|' + r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword), + + # builtins + (r'(abs|acos|asin|atan|' + r'boolc|boolx|bit_set|' + r'char_off|charlen|ceil|cmax|cmin|condense|contains|' + r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|' + r'count|count_any_of|count_any_not_of|' + r'dbmaxlen|distance|' + r'escape|exp|' + r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|' + r'insert|' + r'lines|log|log10|' + r'match|matches|' + r'nmax|nmin|numofchar|' + r'repeat|replace|rescale|reverse|round|' + r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|' + r'substring|substring_after|substring_from|substring_before|substring_to|' + r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|' + r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)), + + (r'&[0-9]', Name), + (r'[0-9]+', Number.Integer), + + # operators which look like variable names before + # parsing variable names. + (r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|' + r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|' + r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator), + + include('variable-names'), + + # standard oparators after variable names, + # because < and > are part of field symbols. + (r'[?*<>=\-+]', Operator), + (r"'(''|[^'])*'", String.Single), + (r"`([^`])*`", String.Single), + (r'[/;:()\[\],.]', Punctuation) + ], + } + + +class OpenEdgeLexer(RegexLexer): + """ + Lexer for `OpenEdge ABL (formerly Progress) + `_ source code. + + .. versionadded:: 1.5 + """ + name = 'OpenEdge ABL' + aliases = ['openedge', 'abl', 'progress'] + filenames = ['*.p', '*.cls'] + mimetypes = ['text/x-openedge', 'application/x-openedge'] + + types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|' + r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|' + r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|' + r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|' + r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))') + + keywords = words(OPENEDGEKEYWORDS, + prefix=r'(?i)(^|(?<=[^0-9a-z_\-]))', + suffix=r'\s*($|(?=[^0-9a-z_\-]))') + + tokens = { + 'root': [ + (r'/\*', Comment.Multiline, 'comment'), + (r'\{', Comment.Preproc, 'preprocessor'), + (r'\s*&.*', Comment.Preproc), + (r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration), + (types, Keyword.Type), + (keywords, Name.Builtin), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\s+', Text), + (r'[+*/=-]', Operator), + (r'[.:()]', Punctuation), + (r'.', Name.Variable), # Lazy catch-all + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'preprocessor': [ + (r'[^{}]', Comment.Preproc), + (r'\{', Comment.Preproc, '#push'), + (r'\}', Comment.Preproc, '#pop'), + ], + } + + +class GoodDataCLLexer(RegexLexer): + """ + Lexer for `GoodData-CL + `_ + script files. + + .. versionadded:: 1.4 + """ + + name = 'GoodData-CL' + aliases = ['gooddata-cl'] + filenames = ['*.gdc'] + mimetypes = ['text/x-gooddata-cl'] + + flags = re.IGNORECASE + tokens = { + 'root': [ + # Comments + (r'#.*', Comment.Single), + # Function call + (r'[a-z]\w*', Name.Function), + # Argument list + (r'\(', Punctuation, 'args-list'), + # Punctuation + (r';', Punctuation), + # Space is not significant + (r'\s+', Text) + ], + 'args-list': [ + (r'\)', Punctuation, '#pop'), + (r',', Punctuation), + (r'[a-z]\w*', Name.Variable), + (r'=', Operator), + (r'"', String, 'string-literal'), + (r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number), + # Space is not significant + (r'\s', Text) + ], + 'string-literal': [ + (r'\\[tnrfbae"\\]', String.Escape), + (r'"', String, '#pop'), + (r'[^\\"]+', String) + ] + } + + +class MaqlLexer(RegexLexer): + """ + Lexer for `GoodData MAQL + `_ + scripts. + + .. versionadded:: 1.4 + """ + + name = 'MAQL' + aliases = ['maql'] + filenames = ['*.maql'] + mimetypes = ['text/x-gooddata-maql', 'application/x-gooddata-maql'] + + flags = re.IGNORECASE + tokens = { + 'root': [ + # IDENTITY + (r'IDENTIFIER\b', Name.Builtin), + # IDENTIFIER + (r'\{[^}]+\}', Name.Variable), + # NUMBER + (r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number), + # STRING + (r'"', String, 'string-literal'), + # RELATION + (r'\<\>|\!\=', Operator), + (r'\=|\>\=|\>|\<\=|\<', Operator), + # := + (r'\:\=', Operator), + # OBJECT + (r'\[[^]]+\]', Name.Variable.Class), + # keywords + (words(( + 'DIMENSION', 'DIMENSIONS', 'BOTTOM', 'METRIC', 'COUNT', 'OTHER', + 'FACT', 'WITH', 'TOP', 'OR', 'ATTRIBUTE', 'CREATE', 'PARENT', + 'FALSE', 'ROW', 'ROWS', 'FROM', 'ALL', 'AS', 'PF', 'COLUMN', + 'COLUMNS', 'DEFINE', 'REPORT', 'LIMIT', 'TABLE', 'LIKE', 'AND', + 'BY', 'BETWEEN', 'EXCEPT', 'SELECT', 'MATCH', 'WHERE', 'TRUE', + 'FOR', 'IN', 'WITHOUT', 'FILTER', 'ALIAS', 'WHEN', 'NOT', 'ON', + 'KEYS', 'KEY', 'FULLSET', 'PRIMARY', 'LABELS', 'LABEL', + 'VISUAL', 'TITLE', 'DESCRIPTION', 'FOLDER', 'ALTER', 'DROP', + 'ADD', 'DATASET', 'DATATYPE', 'INT', 'BIGINT', 'DOUBLE', 'DATE', + 'VARCHAR', 'DECIMAL', 'SYNCHRONIZE', 'TYPE', 'DEFAULT', 'ORDER', + 'ASC', 'DESC', 'HYPERLINK', 'INCLUDE', 'TEMPLATE', 'MODIFY'), + suffix=r'\b'), + Keyword), + # FUNCNAME + (r'[a-z]\w*\b', Name.Function), + # Comments + (r'#.*', Comment.Single), + # Punctuation + (r'[,;()]', Punctuation), + # Space is not significant + (r'\s+', Text) + ], + 'string-literal': [ + (r'\\[tnrfbae"\\]', String.Escape), + (r'"', String, '#pop'), + (r'[^\\"]+', String) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/c_cpp.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/c_cpp.py new file mode 100644 index 0000000..128cc6c --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/c_cpp.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.c_cpp + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for C/C++ languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, \ + this, inherit, default, words +from pygments.util import get_bool_opt +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['CLexer', 'CppLexer'] + + +class CFamilyLexer(RegexLexer): + """ + For C family source code. This is used as a base class to avoid repetitious + definitions. + """ + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' + #: only one /* */ style comment + _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' + + tokens = { + 'whitespace': [ + # preprocessor directives: without whitespace + ('^#if\s+0', Comment.Preproc, 'if0'), + ('^#', Comment.Preproc, 'macro'), + # or with whitespace + ('^(' + _ws1 + r')(#if\s+0)', + bygroups(using(this), Comment.Preproc), 'if0'), + ('^(' + _ws1 + ')(#)', + bygroups(using(this), Comment.Preproc), 'macro'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + ], + 'statements': [ + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'0[0-7]+[LlUu]*', Number.Oct), + (r'\d+[LlUu]*', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.]', Punctuation), + (words(('auto', 'break', 'case', 'const', 'continue', 'default', 'do', + 'else', 'enum', 'extern', 'for', 'goto', 'if', 'register', + 'restricted', 'return', 'sizeof', 'static', 'struct', + 'switch', 'typedef', 'union', 'volatile', 'while'), + suffix=r'\b'), Keyword), + (r'(bool|int|long|float|short|double|char|unsigned|signed|void|' + r'[a-z_][a-z0-9_]*_t)\b', + Keyword.Type), + (words(('inline', '_inline', '__inline', 'naked', 'restrict', + 'thread', 'typename'), suffix=r'\b'), Keyword.Reserved), + # Vector intrinsics + (r'(__m(128i|128d|128|64))\b', Keyword.Reserved), + # Microsoft-isms + (words(( + 'asm', 'int8', 'based', 'except', 'int16', 'stdcall', 'cdecl', + 'fastcall', 'int32', 'declspec', 'finally', 'int64', 'try', + 'leave', 'wchar_t', 'w64', 'unaligned', 'raise', 'noop', + 'identifier', 'forceinline', 'assume'), + prefix=r'__', suffix=r'\b'), Keyword.Reserved), + (r'(true|false|NULL)\b', Name.Builtin), + (r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)), + ('[a-zA-Z_]\w*', Name), + ], + 'root': [ + include('whitespace'), + # functions + (r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')?(\{)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation), + 'function'), + # function declarations + (r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')?(;)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation)), + default('statement'), + ], + 'statement': [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'function': [ + include('whitespace'), + include('statements'), + (';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' + r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?)', Text, '#pop'), + ], + } + + def analyse_text(text): + if re.search('#include <[a-z]+>', text): + return 0.2 + if re.search('using namespace ', text): + return 0.4 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/c_like.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/c_like.py new file mode 100644 index 0000000..9617975 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/c_like.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.c_like + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for other C-like languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \ + default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers import _mql_builtins + +__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer', + 'CudaLexer', 'SwigLexer', 'MqlLexer'] + + +class PikeLexer(CppLexer): + """ + For `Pike `_ source code. + + .. versionadded:: 2.0 + """ + name = 'Pike' + aliases = ['pike'] + filenames = ['*.pike', '*.pmod'] + mimetypes = ['text/x-pike'] + + tokens = { + 'statements': [ + (words(( + 'catch', 'new', 'private', 'protected', 'public', 'gauge', + 'throw', 'throws', 'class', 'interface', 'implement', 'abstract', 'extends', 'from', + 'this', 'super', 'constant', 'final', 'static', 'import', 'use', 'extern', + 'inline', 'proto', 'break', 'continue', 'if', 'else', 'for', + 'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null', + '__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__', + '__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__', + '__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__', + '__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'), + Keyword), + (r'(bool|int|long|float|short|double|char|string|object|void|mapping|' + r'array|multiset|program|function|lambda|mixed|' + r'[a-z_][a-z0-9_]*_t)\b', + Keyword.Type), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'[~!%^&*+=|?:<>/@-]', Operator), + inherit, + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + # template specification + (r'\s*(?=>)', Text, '#pop'), + ], + } + + +class NesCLexer(CLexer): + """ + For `nesC `_ source code with preprocessor + directives. + + .. versionadded:: 2.0 + """ + name = 'nesC' + aliases = ['nesc'] + filenames = ['*.nc'] + mimetypes = ['text/x-nescsrc'] + + tokens = { + 'statements': [ + (words(( + 'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component', + 'components', 'configuration', 'event', 'extends', 'generic', + 'implementation', 'includes', 'interface', 'module', 'new', 'norace', + 'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'), + Keyword), + (words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t', + 'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t', + 'nx_uint64_t'), suffix=r'\b'), + Keyword.Type), + inherit, + ], + } + + +class ClayLexer(RegexLexer): + """ + For `Clay `_ source. + + .. versionadded:: 2.0 + """ + name = 'Clay' + filenames = ['*.clay'] + aliases = ['clay'] + mimetypes = ['text/x-clay'] + tokens = { + 'root': [ + (r'\s', Text), + (r'//.*?$', Comment.Singleline), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'\b(public|private|import|as|record|variant|instance' + r'|define|overload|default|external|alias' + r'|rvalue|ref|forward|inline|noinline|forceinline' + r'|enum|var|and|or|not|if|else|goto|return|while' + r'|switch|case|break|continue|for|in|true|false|try|catch|throw' + r'|finally|onerror|staticassert|eval|when|newtype' + r'|__FILE__|__LINE__|__COLUMN__|__ARG__' + r')\b', Keyword), + (r'[~!%^&*+=|:<>/-]', Operator), + (r'[#(){}\[\],;.]', Punctuation), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'\d+[LlUu]*', Number.Integer), + (r'\b(true|false)\b', Name.Builtin), + (r'(?i)[a-z_?][\w?]*', Name), + (r'"""', String, 'tdqs'), + (r'"', String, 'dqs'), + ], + 'strings': [ + (r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape), + (r'.', String), + ], + 'nl': [ + (r'\n', String), + ], + 'dqs': [ + (r'"', String, '#pop'), + include('strings'), + ], + 'tdqs': [ + (r'"""', String, '#pop'), + include('strings'), + include('nl'), + ], + } + + +class ECLexer(CLexer): + """ + For eC source code with preprocessor directives. + + .. versionadded:: 1.5 + """ + name = 'eC' + aliases = ['ec'] + filenames = ['*.ec', '*.eh'] + mimetypes = ['text/x-echdr', 'text/x-ecsrc'] + + tokens = { + 'statements': [ + (words(( + 'virtual', 'class', 'private', 'public', 'property', 'import', + 'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get', + 'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass', + '__on_register_module', 'namespace', 'using', 'typed_object', + 'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers', + 'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset', + 'class_default_property', 'property_category', 'class_data', + 'class_property', 'thisclass', 'dbtable', 'dbindex', + 'database_open', 'dbfield'), suffix=r'\b'), Keyword), + (words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte', + 'unichar', 'int64'), suffix=r'\b'), + Keyword.Type), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'(null|value|this)\b', Name.Builtin), + inherit, + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + # template specification + (r'\s*(?=>)', Text, '#pop'), + ], + } + + +class ValaLexer(RegexLexer): + """ + For Vala source code with preprocessor directives. + + .. versionadded:: 1.1 + """ + name = 'Vala' + aliases = ['vala', 'vapi'] + filenames = ['*.vala', '*.vapi'] + mimetypes = ['text/x-vala'] + + tokens = { + 'whitespace': [ + (r'^\s*#if\s+0', Comment.Preproc, 'if0'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + ], + 'statements': [ + (r'[L@]?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", + String.Char), + (r'(?s)""".*?"""', String), # verbatim strings + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), + (r'0[0-7]+[Ll]?', Number.Oct), + (r'\d+[Ll]?', Number.Integer), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])', + bygroups(Punctuation, Name.Decorator, Punctuation)), + # TODO: "correctly" parse complex code attributes + (r'(\[)(CCode|(?:Integer|Floating)Type)', + bygroups(Punctuation, Name.Decorator)), + (r'[()\[\],.]', Punctuation), + (words(( + 'as', 'base', 'break', 'case', 'catch', 'construct', 'continue', + 'default', 'delete', 'do', 'else', 'enum', 'finally', 'for', + 'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params', + 'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try', + 'typeof', 'while', 'yield'), suffix=r'\b'), + Keyword), + (words(( + 'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern', + 'inline', 'internal', 'override', 'owned', 'private', 'protected', + 'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned', + 'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'), + Keyword.Declaration), + (r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text), + 'namespace'), + (r'(class|errordomain|interface|struct)(\s+)', + bygroups(Keyword.Declaration, Text), 'class'), + (r'(\.)([a-zA-Z_]\w*)', + bygroups(Operator, Name.Attribute)), + # void is an actual keyword, others are in glib-2.0.vapi + (words(( + 'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16', + 'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string', + 'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64', + 'ulong', 'unichar', 'ushort'), suffix=r'\b'), + Keyword.Type), + (r'(true|false|null)\b', Name.Builtin), + ('[a-zA-Z_]\w*', Name), + ], + 'root': [ + include('whitespace'), + default('statement'), + ], + 'statement': [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'if0': [ + (r'^\s*#if.*?(?`_ + source. + + .. versionadded:: 1.6 + """ + name = 'CUDA' + filenames = ['*.cu', '*.cuh'] + aliases = ['cuda', 'cu'] + mimetypes = ['text/x-cuda'] + + function_qualifiers = set(('__device__', '__global__', '__host__', + '__noinline__', '__forceinline__')) + variable_qualifiers = set(('__device__', '__constant__', '__shared__', + '__restrict__')) + vector_types = set(('char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3', + 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2', + 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1', + 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1', + 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4', + 'ulong4', 'longlong1', 'ulonglong1', 'longlong2', + 'ulonglong2', 'float1', 'float2', 'float3', 'float4', + 'double1', 'double2', 'dim3')) + variables = set(('gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize')) + functions = set(('__threadfence_block', '__threadfence', '__threadfence_system', + '__syncthreads', '__syncthreads_count', '__syncthreads_and', + '__syncthreads_or')) + execution_confs = set(('<<<', '>>>')) + + def get_tokens_unprocessed(self, text): + for index, token, value in CLexer.get_tokens_unprocessed(self, text): + if token is Name: + if value in self.variable_qualifiers: + token = Keyword.Type + elif value in self.vector_types: + token = Keyword.Type + elif value in self.variables: + token = Name.Builtin + elif value in self.execution_confs: + token = Keyword.Pseudo + elif value in self.function_qualifiers: + token = Keyword.Reserved + elif value in self.functions: + token = Name.Function + yield index, token, value + + +class SwigLexer(CppLexer): + """ + For `SWIG `_ source code. + + .. versionadded:: 2.0 + """ + name = 'SWIG' + aliases = ['swig'] + filenames = ['*.swg', '*.i'] + mimetypes = ['text/swig'] + priority = 0.04 # Lower than C/C++ and Objective C/C++ + + tokens = { + 'statements': [ + # SWIG directives + (r'(%[a-z_][a-z0-9_]*)', Name.Function), + # Special variables + ('\$\**\&?\w+', Name), + # Stringification / additional preprocessor directives + (r'##*[a-zA-Z_]\w*', Comment.Preproc), + inherit, + ], + } + + # This is a far from complete set of SWIG directives + swig_directives = set(( + # Most common directives + '%apply', '%define', '%director', '%enddef', '%exception', '%extend', + '%feature', '%fragment', '%ignore', '%immutable', '%import', '%include', + '%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma', + '%rename', '%shared_ptr', '%template', '%typecheck', '%typemap', + # Less common directives + '%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear', + '%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum', + '%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor', + '%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor', + '%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments', + '%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv', + '%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception', + '%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar', + '%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend', + '%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall', + '%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof', + '%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', + '%warnfilter')) + + def analyse_text(text): + rv = 0 + # Search for SWIG directives, which are conventionally at the beginning of + # a line. The probability of them being within a line is low, so let another + # lexer win in this case. + matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M) + for m in matches: + if m in SwigLexer.swig_directives: + rv = 0.98 + break + else: + rv = 0.91 # Fraction higher than MatlabLexer + return rv + + +class MqlLexer(CppLexer): + """ + For `MQL4 `_ and + `MQL5 `_ source code. + + .. versionadded:: 2.0 + """ + name = 'MQL' + aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5'] + filenames = ['*.mq4', '*.mq5', '*.mqh'] + mimetypes = ['text/x-mql'] + + tokens = { + 'statements': [ + (words(_mql_builtins.keywords, suffix=r'\b'), Keyword), + (words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type), + (words(_mql_builtins.types, suffix=r'\b'), Name.Function), + (words(_mql_builtins.constants, suffix=r'\b'), Name.Constant), + (words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'), + Name.Constant), + inherit, + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/chapel.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/chapel.py new file mode 100644 index 0000000..417301d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/chapel.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.chapel + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Chapel language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['ChapelLexer'] + + +class ChapelLexer(RegexLexer): + """ + For `Chapel `_ source. + + .. versionadded:: 2.0 + """ + name = 'Chapel' + filenames = ['*.chpl'] + aliases = ['chapel', 'chpl'] + # mimetypes = ['text/x-chapel'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), + + (r'//(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + + (r'(config|const|in|inout|out|param|ref|type|var)\b', + Keyword.Declaration), + (r'(false|nil|true)\b', Keyword.Constant), + (r'(bool|complex|imag|int|opaque|range|real|string|uint)\b', + Keyword.Type), + (words(( + 'align', 'atomic', 'begin', 'break', 'by', 'cobegin', 'coforall', + 'continue', 'delete', 'dmapped', 'do', 'domain', 'else', 'enum', + 'export', 'extern', 'for', 'forall', 'if', 'index', 'inline', + 'iter', 'label', 'lambda', 'let', 'local', 'new', 'noinit', 'on', + 'otherwise', 'pragma', 'reduce', 'return', 'scan', 'select', + 'serial', 'single', 'sparse', 'subdomain', 'sync', 'then', 'use', + 'when', 'where', 'while', 'with', 'yield', 'zip'), suffix=r'\b'), + Keyword), + (r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'), + (r'(class|module|record|union)(\s+)', bygroups(Keyword, Text), + 'classname'), + + # imaginary integers + (r'\d+i', Number), + (r'\d+\.\d*([Ee][-+]\d+)?i', Number), + (r'\.\d+([Ee][-+]\d+)?i', Number), + (r'\d+[Ee][-+]\d+i', Number), + + # reals cannot end with a period due to lexical ambiguity with + # .. operator. See reference for rationale. + (r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float), + (r'\d+[eE][+-]?[0-9]+i?', Number.Float), + + # integer literals + # -- binary + (r'0[bB][01]+', Number.Bin), + # -- hex + (r'0[xX][0-9a-fA-F]+', Number.Hex), + # -- octal + (r'0[oO][0-7]+', Number.Oct), + # -- decimal + (r'[0-9]+', Number.Integer), + + # strings + (r'["\'](\\\\|\\"|[^"\'])*["\']', String), + + # tokens + (r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|' + r'<=>|<~>|\.\.|by|#|\.\.\.|' + r'&&|\|\||!|&|\||\^|~|<<|>>|' + r'==|!=|<=|>=|<|>|' + r'[+\-*/%]|\*\*)', Operator), + (r'[:;,.?()\[\]{}]', Punctuation), + + # identifiers + (r'[a-zA-Z_][\w$]*', Name.Other), + ], + 'classname': [ + (r'[a-zA-Z_][\w$]*', Name.Class, '#pop'), + ], + 'procname': [ + (r'[a-zA-Z_][\w$]*', Name.Function, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/compiled.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/compiled.py new file mode 100644 index 0000000..74326c3 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/compiled.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.compiled + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.jvm import JavaLexer, ScalaLexer +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers.d import DLexer +from pygments.lexers.objective import ObjectiveCLexer, \ + ObjectiveCppLexer, LogosLexer +from pygments.lexers.go import GoLexer +from pygments.lexers.rust import RustLexer +from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer +from pygments.lexers.pascal import DelphiLexer, Modula2Lexer, AdaLexer +from pygments.lexers.business import CobolLexer, CobolFreeformatLexer +from pygments.lexers.fortran import FortranLexer +from pygments.lexers.prolog import PrologLexer +from pygments.lexers.python import CythonLexer +from pygments.lexers.graphics import GLShaderLexer +from pygments.lexers.ml import OcamlLexer +from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer +from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer +from pygments.lexers.ooc import OocLexer +from pygments.lexers.felix import FelixLexer +from pygments.lexers.nimrod import NimrodLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/configs.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/configs.py new file mode 100644 index 0000000..e621c84 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/configs.py @@ -0,0 +1,546 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.configs + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for configuration file formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, default, words, bygroups, include, using +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Whitespace +from pygments.lexers.shell import BashLexer + +__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer', + 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer', + 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer'] + + +class IniLexer(RegexLexer): + """ + Lexer for configuration files in INI style. + """ + + name = 'INI' + aliases = ['ini', 'cfg', 'dosini'] + filenames = ['*.ini', '*.cfg'] + mimetypes = ['text/x-ini'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'[;#].*', Comment.Single), + (r'\[.*?\]$', Keyword), + (r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)', + bygroups(Name.Attribute, Text, Operator, Text, String)) + ] + } + + def analyse_text(text): + npos = text.find('\n') + if npos < 3: + return False + return text[0] == '[' and text[npos-1] == ']' + + +class RegeditLexer(RegexLexer): + """ + Lexer for `Windows Registry + `_ files produced + by regedit. + + .. versionadded:: 1.6 + """ + + name = 'reg' + aliases = ['registry'] + filenames = ['*.reg'] + mimetypes = ['text/x-windows-registry'] + + tokens = { + 'root': [ + (r'Windows Registry Editor.*', Text), + (r'\s+', Text), + (r'[;#].*', Comment.Single), + (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$', + bygroups(Keyword, Operator, Name.Builtin, Keyword)), + # String keys, which obey somewhat normal escaping + (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)', + bygroups(Name.Attribute, Text, Operator, Text), + 'value'), + # Bare keys (includes @) + (r'(.*?)([ \t]*)(=)([ \t]*)', + bygroups(Name.Attribute, Text, Operator, Text), + 'value'), + ], + 'value': [ + (r'-', Operator, '#pop'), # delete value + (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)', + bygroups(Name.Variable, Punctuation, Number), '#pop'), + # As far as I know, .reg files do not support line continuation. + (r'.+', String, '#pop'), + default('#pop'), + ] + } + + def analyse_text(text): + return text.startswith('Windows Registry Editor') + + +class PropertiesLexer(RegexLexer): + """ + Lexer for configuration files in Java's properties format. + + .. versionadded:: 1.4 + """ + + name = 'Properties' + aliases = ['properties', 'jproperties'] + filenames = ['*.properties'] + mimetypes = ['text/x-java-properties'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'(?:[;#]|//).*$', Comment), + (r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)', + bygroups(Name.Attribute, Text, Operator, Text, String)), + ], + } + + +def _rx_indent(level): + # Kconfig *always* interprets a tab as 8 spaces, so this is the default. + # Edit this if you are in an environment where KconfigLexer gets expanded + # input (tabs expanded to spaces) and the expansion tab width is != 8, + # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). + # Value range here is 2 <= {tab_width} <= 8. + tab_width = 8 + # Regex matching a given indentation {level}, assuming that indentation is + # a multiple of {tab_width}. In other cases there might be problems. + if tab_width == 2: + space_repeat = '+' + else: + space_repeat = '{1,%d}' % (tab_width - 1) + if level == 1: + level_repeat = '' + else: + level_repeat = '{%s}' % level + return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat) + + +class KconfigLexer(RegexLexer): + """ + For Linux-style Kconfig files. + + .. versionadded:: 1.6 + """ + + name = 'Kconfig' + aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] + # Adjust this if new kconfig file names appear in your environment + filenames = ['Kconfig', '*Config.in*', 'external.in*', + 'standard-modules.in'] + mimetypes = ['text/x-kconfig'] + # No re.MULTILINE, indentation-aware help text needs line-by-line handling + flags = 0 + + def call_indent(level): + # If indentation >= {level} is detected, enter state 'indent{level}' + return (_rx_indent(level), String.Doc, 'indent%s' % level) + + def do_indent(level): + # Print paragraphs of indentation level >= {level} as String.Doc, + # ignoring blank lines. Then return to 'root' state. + return [ + (_rx_indent(level), String.Doc), + (r'\s*\n', Text), + default('#pop:2') + ] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?\n', Comment.Single), + (words(( + 'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice', + 'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif', + 'source', 'prompt', 'select', 'depends on', 'default', + 'range', 'option'), suffix=r'\b'), + Keyword), + (r'(---help---|help)[\t ]*\n', Keyword, 'help'), + (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', + Name.Builtin), + (r'[!=&|]', Operator), + (r'[()]', Punctuation), + (r'[0-9]+', Number.Integer), + (r"'(''|[^'])*'", String.Single), + (r'"(""|[^"])*"', String.Double), + (r'\S+', Text), + ], + # Help text is indented, multi-line and ends when a lower indentation + # level is detected. + 'help': [ + # Skip blank lines after help token, if any + (r'\s*\n', Text), + # Determine the first help line's indentation level heuristically(!). + # Attention: this is not perfect, but works for 99% of "normal" + # indentation schemes up to a max. indentation level of 7. + call_indent(7), + call_indent(6), + call_indent(5), + call_indent(4), + call_indent(3), + call_indent(2), + call_indent(1), + default('#pop'), # for incomplete help sections without text + ], + # Handle text for indentation levels 7 to 1 + 'indent7': do_indent(7), + 'indent6': do_indent(6), + 'indent5': do_indent(5), + 'indent4': do_indent(4), + 'indent3': do_indent(3), + 'indent2': do_indent(2), + 'indent1': do_indent(1), + } + + +class Cfengine3Lexer(RegexLexer): + """ + Lexer for `CFEngine3 `_ policy files. + + .. versionadded:: 1.5 + """ + + name = 'CFEngine3' + aliases = ['cfengine3', 'cf3'] + filenames = ['*.cf'] + mimetypes = [] + + tokens = { + 'root': [ + (r'#.*?\n', Comment), + (r'(body)(\s+)(\S+)(\s+)(control)', + bygroups(Keyword, Text, Keyword, Text, Keyword)), + (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', + bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation), + 'arglist'), + (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', + bygroups(Keyword, Text, Keyword, Text, Name.Function)), + (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', + bygroups(Punctuation, Name.Variable, Punctuation, + Text, Keyword.Type, Text, Operator, Text)), + (r'(\S+)(\s*)(=>)(\s*)', + bygroups(Keyword.Reserved, Text, Operator, Text)), + (r'"', String, 'string'), + (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), + (r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)), + (r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)), + (r'@[{(][^)}]+[})]', Name.Variable), + (r'[(){},;]', Punctuation), + (r'=>', Operator), + (r'->', Operator), + (r'\d+\.\d+', Number.Float), + (r'\d+', Number.Integer), + (r'\w+', Name.Function), + (r'\s+', Text), + ], + 'string': [ + (r'\$[{(]', String.Interpol, 'interpol'), + (r'\\.', String.Escape), + (r'"', String, '#pop'), + (r'\n', String), + (r'.', String), + ], + 'interpol': [ + (r'\$[{(]', String.Interpol, '#push'), + (r'[})]', String.Interpol, '#pop'), + (r'[^${()}]+', String.Interpol), + ], + 'arglist': [ + (r'\)', Punctuation, '#pop'), + (r',', Punctuation), + (r'\w+', Name.Variable), + (r'\s+', Text), + ], + } + + +class ApacheConfLexer(RegexLexer): + """ + Lexer for configuration files following the Apache config file + format. + + .. versionadded:: 0.6 + """ + + name = 'ApacheConf' + aliases = ['apacheconf', 'aconf', 'apache'] + filenames = ['.htaccess', 'apache.conf', 'apache2.conf'] + mimetypes = ['text/x-apacheconf'] + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + (r'\s+', Text), + (r'(#.*?)$', Comment), + (r'(<[^\s>]+)(?:(\s+)(.*?))?(>)', + bygroups(Name.Tag, Text, String, Name.Tag)), + (r'([a-z]\w*)(\s+)', + bygroups(Name.Builtin, Text), 'value'), + (r'\.+', Text), + ], + 'value': [ + (r'\\\n', Text), + (r'$', Text, '#pop'), + (r'\\', Text), + (r'[^\S\n]+', Text), + (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), + (r'\d+', Number), + (r'/([a-z0-9][\w./-]+)', String.Other), + (r'(on|off|none|any|all|double|email|dns|min|minimal|' + r'os|productonly|full|emerg|alert|crit|error|warn|' + r'notice|info|debug|registry|script|inetd|standalone|' + r'user|group)\b', Keyword), + (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), + (r'[^\s"\\]+', Text) + ], + } + + +class SquidConfLexer(RegexLexer): + """ + Lexer for `squid `_ configuration files. + + .. versionadded:: 0.9 + """ + + name = 'SquidConf' + aliases = ['squidconf', 'squid.conf', 'squid'] + filenames = ['squid.conf'] + mimetypes = ['text/x-squidconf'] + flags = re.IGNORECASE + + keywords = ( + "access_log", "acl", "always_direct", "announce_host", + "announce_period", "announce_port", "announce_to", "anonymize_headers", + "append_domain", "as_whois_server", "auth_param_basic", + "authenticate_children", "authenticate_program", "authenticate_ttl", + "broken_posts", "buffered_logs", "cache_access_log", "cache_announce", + "cache_dir", "cache_dns_program", "cache_effective_group", + "cache_effective_user", "cache_host", "cache_host_acl", + "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high", + "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer", + "cache_peer_access", "cahce_replacement_policy", "cache_stoplist", + "cache_stoplist_pattern", "cache_store_log", "cache_swap", + "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db", + "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir", + "dead_peer_timeout", "debug_options", "delay_access", "delay_class", + "delay_initial_bucket_level", "delay_parameters", "delay_pools", + "deny_info", "dns_children", "dns_defnames", "dns_nameservers", + "dns_testnames", "emulate_httpd_log", "err_html_text", + "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port", + "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width", + "ftp_passive", "ftp_user", "half_closed_clients", "header_access", + "header_replace", "hierarchy_stoplist", "high_response_time_warning", + "high_page_fault_warning", "hosts_file", "htcp_port", "http_access", + "http_anonymizer", "httpd_accel", "httpd_accel_host", + "httpd_accel_port", "httpd_accel_uses_host_header", + "httpd_accel_with_proxy", "http_port", "http_reply_access", + "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout", + "ident_lookup", "ident_lookup_access", "ident_timeout", + "incoming_http_average", "incoming_icp_average", "inside_firewall", + "ipcache_high", "ipcache_low", "ipcache_size", "local_domain", + "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries", + "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries", + "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr", + "mcast_miss_encode_key", "mcast_miss_port", "memory_pools", + "memory_pools_limit", "memory_replacement_policy", "mime_table", + "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops", + "minimum_object_size", "minimum_retry_timeout", "miss_access", + "negative_dns_ttl", "negative_ttl", "neighbor_timeout", + "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period", + "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy", + "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl", + "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp", + "quick_abort", "quick_abort_max", "quick_abort_min", + "quick_abort_pct", "range_offset_limit", "read_timeout", + "redirect_children", "redirect_program", + "redirect_rewrites_host_header", "reference_age", + "refresh_pattern", "reload_into_ims", "request_body_max_size", + "request_size", "request_timeout", "shutdown_lifetime", + "single_parent_bypass", "siteselect_timeout", "snmp_access", + "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy", + "store_avg_object_size", "store_objects_per_bucket", + "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs", + "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize", + "test_reachability", "udp_hit_obj", "udp_hit_obj_size", + "udp_incoming_address", "udp_outgoing_address", "unique_hostname", + "unlinkd_program", "uri_whitespace", "useragent_log", + "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port", + ) + + opts = ( + "proxy-only", "weight", "ttl", "no-query", "default", "round-robin", + "multicast-responder", "on", "off", "all", "deny", "allow", "via", + "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2", + "credentialsttl", "none", "disable", "offline_toggle", "diskd", + ) + + actions = ( + "shutdown", "info", "parameter", "server_list", "client_list", + r'squid.conf', + ) + + actions_stats = ( + "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns", + "redirector", "io", "reply_headers", "filedescriptors", "netdb", + ) + + actions_log = ("status", "enable", "disable", "clear") + + acls = ( + "url_regex", "urlpath_regex", "referer_regex", "port", "proto", + "req_mime_type", "rep_mime_type", "method", "browser", "user", "src", + "dst", "time", "dstdomain", "ident", "snmp_community", + ) + + ip_re = ( + r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|' + r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|' + r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|' + r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?`_ configuration files. + + .. versionadded:: 0.11 + """ + name = 'Nginx configuration file' + aliases = ['nginx'] + filenames = [] + mimetypes = ['text/x-nginx-conf'] + + tokens = { + 'root': [ + (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)), + (r'[^\s;#]+', Keyword, 'stmt'), + include('base'), + ], + 'block': [ + (r'\}', Punctuation, '#pop:2'), + (r'[^\s;#]+', Keyword.Namespace, 'stmt'), + include('base'), + ], + 'stmt': [ + (r'\{', Punctuation, 'block'), + (r';', Punctuation, '#pop'), + include('base'), + ], + 'base': [ + (r'#.*\n', Comment.Single), + (r'on|off', Name.Constant), + (r'\$[^\s;#()]+', Name.Variable), + (r'([a-z0-9.-]+)(:)([0-9]+)', + bygroups(Name, Punctuation, Number.Integer)), + (r'[a-z-]+/[a-z-+]+', String), # mimetype + # (r'[a-zA-Z._-]+', Keyword), + (r'[0-9]+[km]?\b', Number.Integer), + (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)), + (r'[:=~]', Punctuation), + (r'[^\s;#{}$]+', String), # catch all + (r'/[^\s;#]*', Name), # pathname + (r'\s+', Text), + (r'[$;]', Text), # leftover characters + ], + } + + +class LighttpdConfLexer(RegexLexer): + """ + Lexer for `Lighttpd `_ configuration files. + + .. versionadded:: 0.11 + """ + name = 'Lighttpd configuration file' + aliases = ['lighty', 'lighttpd'] + filenames = [] + mimetypes = ['text/x-lighttpd-conf'] + + tokens = { + 'root': [ + (r'#.*\n', Comment.Single), + (r'/\S*', Name), # pathname + (r'[a-zA-Z._-]+', Keyword), + (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), + (r'[0-9]+', Number), + (r'=>|=~|\+=|==|=|\+', Operator), + (r'\$[A-Z]+', Name.Builtin), + (r'[(){}\[\],]', Punctuation), + (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), + (r'\s+', Text), + ], + + } + + +class DockerLexer(RegexLexer): + """ + Lexer for `Docker `_ configuration files. + + .. versionadded:: 2.0 + """ + name = 'Docker' + aliases = ['docker', 'dockerfile'] + filenames = ['Dockerfile', '*.docker'] + mimetypes = ['text/x-dockerfile-config'] + + _keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|' + r'VOLUME|WORKDIR)') + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + (r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,), + bygroups(Name.Keyword, Whitespace, Keyword)), + (r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)), + (r'#.*', Comment), + (r'RUN', Keyword), # Rest of line falls through + (r'(.*\\\n)*.+', using(BashLexer)), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/console.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/console.py new file mode 100644 index 0000000..5612058 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/console.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.console + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for misc console output. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Generic, Comment, String, Text, Keyword, Name, \ + Punctuation, Number + +__all__ = ['VCTreeStatusLexer', 'PyPyLogLexer'] + + +class VCTreeStatusLexer(RegexLexer): + """ + For colorizing output of version control status commans, like "hg + status" or "svn status". + + .. versionadded:: 2.0 + """ + name = 'VCTreeStatus' + aliases = ['vctreestatus'] + filenames = [] + mimetypes = [] + + tokens = { + 'root': [ + (r'^A \+ C\s+', Generic.Error), + (r'^A\s+\+?\s+', String), + (r'^M\s+', Generic.Inserted), + (r'^C\s+', Generic.Error), + (r'^D\s+', Generic.Deleted), + (r'^[?!]\s+', Comment.Preproc), + (r' >\s+.*\n', Comment.Preproc), + (r'.*\n', Text) + ] + } + + +class PyPyLogLexer(RegexLexer): + """ + Lexer for PyPy log files. + + .. versionadded:: 1.5 + """ + name = "PyPy Log" + aliases = ["pypylog", "pypy"] + filenames = ["*.pypylog"] + mimetypes = ['application/x-pypylog'] + + tokens = { + "root": [ + (r"\[\w+\] \{jit-log-.*?$", Keyword, "jit-log"), + (r"\[\w+\] \{jit-backend-counts$", Keyword, "jit-backend-counts"), + include("extra-stuff"), + ], + "jit-log": [ + (r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"), + (r"^\+\d+: ", Comment), + (r"--end of the loop--", Comment), + (r"[ifp]\d+", Name), + (r"ptr\d+", Name), + (r"(\()(\w+(?:\.\w+)?)(\))", + bygroups(Punctuation, Name.Builtin, Punctuation)), + (r"[\[\]=,()]", Punctuation), + (r"(\d+\.\d+|inf|-inf)", Number.Float), + (r"-?\d+", Number.Integer), + (r"'.*'", String), + (r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name), + (r"<.*?>+", Name.Builtin), + (r"(label|debug_merge_point|jump|finish)", Name.Class), + (r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|" + r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|" + r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|" + r"int_is_true|" + r"uint_floordiv|uint_ge|uint_lt|" + r"float_add|float_sub|float_mul|float_truediv|float_neg|" + r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|" + r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|" + r"cast_int_to_float|cast_float_to_int|" + r"force_token|quasiimmut_field|same_as|virtual_ref_finish|" + r"virtual_ref|mark_opaque_ptr|" + r"call_may_force|call_assembler|call_loopinvariant|" + r"call_release_gil|call_pure|call|" + r"new_with_vtable|new_array|newstr|newunicode|new|" + r"arraylen_gc|" + r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|" + r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|" + r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|" + r"getfield_raw|setfield_gc|setfield_raw|" + r"strgetitem|strsetitem|strlen|copystrcontent|" + r"unicodegetitem|unicodesetitem|unicodelen|" + r"guard_true|guard_false|guard_value|guard_isnull|" + r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|" + r"guard_not_forced|guard_no_exception|guard_not_invalidated)", + Name.Builtin), + include("extra-stuff"), + ], + "jit-backend-counts": [ + (r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"), + (r":", Punctuation), + (r"\d+", Number), + include("extra-stuff"), + ], + "extra-stuff": [ + (r"\s+", Text), + (r"#.*?$", Comment), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/css.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/css.py new file mode 100644 index 0000000..eeb0a4a --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/css.py @@ -0,0 +1,498 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.css + ~~~~~~~~~~~~~~~~~~~ + + Lexers for CSS and related stylesheet formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import copy + +from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \ + default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation +from pygments.util import iteritems + +__all__ = ['CssLexer', 'SassLexer', 'ScssLexer'] + + +class CssLexer(RegexLexer): + """ + For CSS (Cascading Style Sheets). + """ + + name = 'CSS' + aliases = ['css'] + filenames = ['*.css'] + mimetypes = ['text/css'] + + tokens = { + 'root': [ + include('basics'), + ], + 'basics': [ + (r'\s+', Text), + (r'/\*(?:.|\n)*?\*/', Comment), + (r'\{', Punctuation, 'content'), + (r'\:[\w-]+', Name.Decorator), + (r'\.[\w-]+', Name.Class), + (r'\#[\w-]+', Name.Function), + (r'@[\w-]+', Keyword, 'atrule'), + (r'[\w-]+', Name.Tag), + (r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single) + ], + 'atrule': [ + (r'\{', Punctuation, 'atcontent'), + (r';', Punctuation, '#pop'), + include('basics'), + ], + 'atcontent': [ + include('basics'), + (r'\}', Punctuation, '#pop:2'), + ], + 'content': [ + (r'\s+', Text), + (r'\}', Punctuation, '#pop'), + (r'url\(.*?\)', String.Other), + (r'^@.*?$', Comment.Preproc), + (words(( + 'azimuth', 'background-attachment', 'background-color', + 'background-image', 'background-position', 'background-repeat', + 'background', 'border-bottom-color', 'border-bottom-style', + 'border-bottom-width', 'border-left-color', 'border-left-style', + 'border-left-width', 'border-right', 'border-right-color', + 'border-right-style', 'border-right-width', 'border-top-color', + 'border-top-style', 'border-top-width', 'border-bottom', + 'border-collapse', 'border-left', 'border-width', 'border-color', + 'border-spacing', 'border-style', 'border-top', 'border', 'caption-side', + 'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset', + 'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display', + 'elevation', 'empty-cells', 'float', 'font-family', 'font-size', + 'font-size-adjust', 'font-stretch', 'font-style', 'font-variant', + 'font-weight', 'font', 'height', 'letter-spacing', 'line-height', + 'list-style-type', 'list-style-image', 'list-style-position', + 'list-style', 'margin-bottom', 'margin-left', 'margin-right', + 'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width', + 'min-height', 'min-width', 'opacity', 'orphans', 'outline-color', + 'outline-style', 'outline-width', 'outline', 'overflow', 'overflow-x', + 'overflow-y', 'padding-bottom', 'padding-left', 'padding-right', 'padding-top', + 'padding', 'page', 'page-break-after', 'page-break-before', 'page-break-inside', + 'pause-after', 'pause-before', 'pause', 'pitch-range', 'pitch', + 'play-during', 'position', 'quotes', 'richness', 'right', 'size', + 'speak-header', 'speak-numeral', 'speak-punctuation', 'speak', + 'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration', + 'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi', + 'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space', + 'widows', 'width', 'word-spacing', 'z-index', 'bottom', + 'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline', + 'behind', 'below', 'bidi-override', 'blink', 'block', 'bolder', 'bold', 'both', + 'capitalize', 'center-left', 'center-right', 'center', 'circle', + 'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous', + 'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero', + 'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed', + 'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left', + 'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help', + 'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon', + 'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic', + 'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large', + 'left-side', 'leftwards', 'left', 'level', 'lighter', 'line-through', 'list-item', + 'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr', + 'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace', + 'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote', + 'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once', + 'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px', + 'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side', + 'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize', + 'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent', + 'slower', 'slow', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid', + 'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize', + 'table-caption', 'table-cell', 'table-column', 'table-column-group', + 'table-footer-group', 'table-header-group', 'table-row', + 'table-row-group', 'text-bottom', 'text-top', 'text', 'thick', 'thin', + 'transparent', 'ultra-condensed', 'ultra-expanded', 'underline', + 'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url', + 'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud', + 'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'), + Keyword), + (words(( + 'indigo', 'gold', 'firebrick', 'indianred', 'yellow', 'darkolivegreen', + 'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse', + 'mediumslateblue', 'black', 'springgreen', 'crimson', 'lightsalmon', 'brown', + 'turquoise', 'olivedrab', 'cyan', 'silver', 'skyblue', 'gray', 'darkturquoise', + 'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', 'teal', + 'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle', + 'violet', 'navy', 'orchid', 'blue', 'ghostwhite', 'honeydew', 'cornflowerblue', + 'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'red', 'bisque', 'slategray', + 'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue', + 'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'purple', 'lightgrey', + 'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace', + 'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna', + 'lightcoral', 'orangered', 'navajowhite', 'lime', 'palegreen', 'burlywood', + 'seashell', 'mediumspringgreen', 'fuchsia', 'papayawhip', 'blanchedalmond', + 'peru', 'aquamarine', 'white', 'darkslategray', 'ivory', 'dodgerblue', + 'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', 'olive', + 'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin', + 'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink', + 'plum', 'aqua', 'darkgoldenrod', 'maroon', 'sandybrown', 'magenta', 'tan', + 'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen', + 'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue', + 'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon', + 'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink', + 'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', 'green', + 'blueviolet', 'peachpuff'), suffix=r'\b'), + Name.Builtin), + (r'\!important', Comment.Preproc), + (r'/\*(?:.|\n)*?\*/', Comment), + (r'\#[a-zA-Z0-9]{1,6}', Number), + (r'[.-]?[0-9]*[.]?[0-9]+(em|px|pt|pc|in|mm|cm|ex|s)\b', Number), + # Separate regex for percentages, as can't do word boundaries with % + (r'[.-]?[0-9]*[.]?[0-9]+%', Number), + (r'-?[0-9]+', Number), + (r'[~^*!%&<>|+=@:,./?-]+', Operator), + (r'[\[\]();]+', Punctuation), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + (r'[a-zA-Z_]\w*', Name) + ] + } + + +common_sass_tokens = { + 'value': [ + (r'[ \t]+', Text), + (r'[!$][\w-]+', Name.Variable), + (r'url\(', String.Other, 'string-url'), + (r'[a-z_-][\w-]*(?=\()', Name.Function), + (words(( + 'azimuth', 'background-attachment', 'background-color', + 'background-image', 'background-position', 'background-repeat', + 'background', 'border-bottom-color', 'border-bottom-style', + 'border-bottom-width', 'border-left-color', 'border-left-style', + 'border-left-width', 'border-right', 'border-right-color', + 'border-right-style', 'border-right-width', 'border-top-color', + 'border-top-style', 'border-top-width', 'border-bottom', + 'border-collapse', 'border-left', 'border-width', 'border-color', + 'border-spacing', 'border-style', 'border-top', 'border', 'caption-side', + 'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset', + 'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display', + 'elevation', 'empty-cells', 'float', 'font-family', 'font-size', + 'font-size-adjust', 'font-stretch', 'font-style', 'font-variant', + 'font-weight', 'font', 'height', 'letter-spacing', 'line-height', + 'list-style-type', 'list-style-image', 'list-style-position', + 'list-style', 'margin-bottom', 'margin-left', 'margin-right', + 'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width', + 'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color', + 'outline-style', 'outline-width', 'overflow', 'padding-bottom', + 'padding-left', 'padding-right', 'padding-top', 'padding', 'page', + 'page-break-after', 'page-break-before', 'page-break-inside', + 'pause-after', 'pause-before', 'pause', 'pitch', 'pitch-range', + 'play-during', 'position', 'quotes', 'richness', 'right', 'size', + 'speak-header', 'speak-numeral', 'speak-punctuation', 'speak', + 'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration', + 'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi', + 'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space', + 'widows', 'width', 'word-spacing', 'z-index', 'bottom', 'left', + 'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline', + 'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both', + 'capitalize', 'center-left', 'center-right', 'center', 'circle', + 'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous', + 'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero', + 'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed', + 'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left', + 'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help', + 'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon', + 'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic', + 'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large', + 'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item', + 'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr', + 'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace', + 'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote', + 'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once', + 'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px', + 'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side', + 'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize', + 'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent', + 'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid', + 'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize', + 'table-caption', 'table-cell', 'table-column', 'table-column-group', + 'table-footer-group', 'table-header-group', 'table-row', + 'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin', + 'transparent', 'ultra-condensed', 'ultra-expanded', 'underline', + 'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url', + 'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud', + 'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'), + Name.Constant), + (words(( + 'indigo', 'gold', 'firebrick', 'indianred', 'darkolivegreen', + 'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse', + 'mediumslateblue', 'springgreen', 'crimson', 'lightsalmon', 'brown', + 'turquoise', 'olivedrab', 'cyan', 'skyblue', 'darkturquoise', + 'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', + 'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle', + 'violet', 'orchid', 'ghostwhite', 'honeydew', 'cornflowerblue', + 'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'bisque', 'slategray', + 'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue', + 'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'lightgrey', + 'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace', + 'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna', + 'lightcoral', 'orangered', 'navajowhite', 'palegreen', 'burlywood', + 'seashell', 'mediumspringgreen', 'papayawhip', 'blanchedalmond', + 'peru', 'aquamarine', 'darkslategray', 'ivory', 'dodgerblue', + 'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', + 'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin', + 'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink', + 'plum', 'darkgoldenrod', 'sandybrown', 'magenta', 'tan', + 'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen', + 'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue', + 'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon', + 'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink', + 'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', + 'blueviolet', 'peachpuff'), suffix=r'\b'), + Name.Entity), + (words(( + 'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', + 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'), + Name.Builtin), + (r'\!(important|default)', Name.Exception), + (r'(true|false)', Name.Pseudo), + (r'(and|or|not)', Operator.Word), + (r'/\*', Comment.Multiline, 'inline-comment'), + (r'//[^\n]*', Comment.Single), + (r'\#[a-z0-9]{1,6}', Number.Hex), + (r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)), + (r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)), + (r'#\{', String.Interpol, 'interpolation'), + (r'[~^*!&%<>|+=@:,./?-]+', Operator), + (r'[\[\]()]+', Punctuation), + (r'"', String.Double, 'string-double'), + (r"'", String.Single, 'string-single'), + (r'[a-z_-][\w-]*', Name), + ], + + 'interpolation': [ + (r'\}', String.Interpol, '#pop'), + include('value'), + ], + + 'selector': [ + (r'[ \t]+', Text), + (r'\:', Name.Decorator, 'pseudo-class'), + (r'\.', Name.Class, 'class'), + (r'\#', Name.Namespace, 'id'), + (r'[\w-]+', Name.Tag), + (r'#\{', String.Interpol, 'interpolation'), + (r'&', Keyword), + (r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator), + (r'"', String.Double, 'string-double'), + (r"'", String.Single, 'string-single'), + ], + + 'string-double': [ + (r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double), + (r'#\{', String.Interpol, 'interpolation'), + (r'"', String.Double, '#pop'), + ], + + 'string-single': [ + (r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double), + (r'#\{', String.Interpol, 'interpolation'), + (r"'", String.Double, '#pop'), + ], + + 'string-url': [ + (r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other), + (r'#\{', String.Interpol, 'interpolation'), + (r'\)', String.Other, '#pop'), + ], + + 'pseudo-class': [ + (r'[\w-]+', Name.Decorator), + (r'#\{', String.Interpol, 'interpolation'), + default('#pop'), + ], + + 'class': [ + (r'[\w-]+', Name.Class), + (r'#\{', String.Interpol, 'interpolation'), + default('#pop'), + ], + + 'id': [ + (r'[\w-]+', Name.Namespace), + (r'#\{', String.Interpol, 'interpolation'), + default('#pop'), + ], + + 'for': [ + (r'(from|to|through)', Operator.Word), + include('value'), + ], +} + + +def _indentation(lexer, match, ctx): + indentation = match.group(0) + yield match.start(), Text, indentation + ctx.last_indentation = indentation + ctx.pos = match.end() + + if hasattr(ctx, 'block_state') and ctx.block_state and \ + indentation.startswith(ctx.block_indentation) and \ + indentation != ctx.block_indentation: + ctx.stack.append(ctx.block_state) + else: + ctx.block_state = None + ctx.block_indentation = None + ctx.stack.append('content') + + +def _starts_block(token, state): + def callback(lexer, match, ctx): + yield match.start(), token, match.group(0) + + if hasattr(ctx, 'last_indentation'): + ctx.block_indentation = ctx.last_indentation + else: + ctx.block_indentation = '' + + ctx.block_state = state + ctx.pos = match.end() + + return callback + + +class SassLexer(ExtendedRegexLexer): + """ + For Sass stylesheets. + + .. versionadded:: 1.3 + """ + + name = 'Sass' + aliases = ['sass'] + filenames = ['*.sass'] + mimetypes = ['text/x-sass'] + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + (r'[ \t]*\n', Text), + (r'[ \t]*', _indentation), + ], + + 'content': [ + (r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'), + 'root'), + (r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'), + 'root'), + (r'@import', Keyword, 'import'), + (r'@for', Keyword, 'for'), + (r'@(debug|warn|if|while)', Keyword, 'value'), + (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), + (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), + (r'@extend', Keyword, 'selector'), + (r'@[\w-]+', Keyword, 'selector'), + (r'=[\w-]+', Name.Function, 'value'), + (r'\+[\w-]+', Name.Decorator, 'value'), + (r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))', + bygroups(Name.Variable, Operator), 'value'), + (r':', Name.Attribute, 'old-style-attr'), + (r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'), + default('selector'), + ], + + 'single-comment': [ + (r'.+', Comment.Single), + (r'\n', Text, 'root'), + ], + + 'multi-comment': [ + (r'.+', Comment.Multiline), + (r'\n', Text, 'root'), + ], + + 'import': [ + (r'[ \t]+', Text), + (r'\S+', String), + (r'\n', Text, 'root'), + ], + + 'old-style-attr': [ + (r'[^\s:="\[]+', Name.Attribute), + (r'#\{', String.Interpol, 'interpolation'), + (r'[ \t]*=', Operator, 'value'), + default('value'), + ], + + 'new-style-attr': [ + (r'[^\s:="\[]+', Name.Attribute), + (r'#\{', String.Interpol, 'interpolation'), + (r'[ \t]*[=:]', Operator, 'value'), + ], + + 'inline-comment': [ + (r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline), + (r'#\{', String.Interpol, 'interpolation'), + (r"\*/", Comment, '#pop'), + ], + } + for group, common in iteritems(common_sass_tokens): + tokens[group] = copy.copy(common) + tokens['value'].append((r'\n', Text, 'root')) + tokens['selector'].append((r'\n', Text, 'root')) + + +class ScssLexer(RegexLexer): + """ + For SCSS stylesheets. + """ + + name = 'SCSS' + aliases = ['scss'] + filenames = ['*.scss'] + mimetypes = ['text/x-scss'] + + flags = re.IGNORECASE | re.DOTALL + tokens = { + 'root': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'@import', Keyword, 'value'), + (r'@for', Keyword, 'for'), + (r'@(debug|warn|if|while)', Keyword, 'value'), + (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), + (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), + (r'@extend', Keyword, 'selector'), + (r'(@media)(\s+)', bygroups(Keyword, Text), 'value'), + (r'@[\w-]+', Keyword, 'selector'), + (r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'), + (r'(?=[^;{}][;}])', Name.Attribute, 'attr'), + (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'), + default('selector'), + ], + + 'attr': [ + (r'[^\s:="\[]+', Name.Attribute), + (r'#\{', String.Interpol, 'interpolation'), + (r'[ \t]*:', Operator, 'value'), + ], + + 'inline-comment': [ + (r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline), + (r'#\{', String.Interpol, 'interpolation'), + (r"\*/", Comment, '#pop'), + ], + } + for group, common in iteritems(common_sass_tokens): + tokens[group] = copy.copy(common) + tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')]) + tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')]) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/d.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/d.py new file mode 100644 index 0000000..efd97be --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/d.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.d + ~~~~~~~~~~~~~~~~~ + + Lexers for D languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['DLexer', 'CrocLexer', 'MiniDLexer'] + + +class DLexer(RegexLexer): + """ + For D source. + + .. versionadded:: 1.2 + """ + name = 'D' + filenames = ['*.d', '*.di'] + aliases = ['d'] + mimetypes = ['text/x-dsrc'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + # (r'\\\n', Text), # line continuations + # Comments + (r'//(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'/\+', Comment.Multiline, 'nested_comment'), + # Keywords + (words(( + 'abstract', 'alias', 'align', 'asm', 'assert', 'auto', 'body', + 'break', 'case', 'cast', 'catch', 'class', 'const', 'continue', + 'debug', 'default', 'delegate', 'delete', 'deprecated', 'do', 'else', + 'enum', 'export', 'extern', 'finally', 'final', 'foreach_reverse', + 'foreach', 'for', 'function', 'goto', 'if', 'immutable', 'import', + 'interface', 'invariant', 'inout', 'in', 'is', 'lazy', 'mixin', + 'module', 'new', 'nothrow', 'out', 'override', 'package', 'pragma', + 'private', 'protected', 'public', 'pure', 'ref', 'return', 'scope', + 'shared', 'static', 'struct', 'super', 'switch', 'synchronized', + 'template', 'this', 'throw', 'try', 'typedef', 'typeid', 'typeof', + 'union', 'unittest', 'version', 'volatile', 'while', 'with', + '__gshared', '__traits', '__vector', '__parameters'), + suffix=r'\b'), + Keyword), + (words(( + 'bool', 'byte', 'cdouble', 'cent', 'cfloat', 'char', 'creal', + 'dchar', 'double', 'float', 'idouble', 'ifloat', 'int', 'ireal', + 'long', 'real', 'short', 'ubyte', 'ucent', 'uint', 'ulong', + 'ushort', 'void', 'wchar'), suffix=r'\b'), + Keyword.Type), + (r'(false|true|null)\b', Keyword.Constant), + (words(( + '__FILE__', '__MODULE__', '__LINE__', '__FUNCTION__', '__PRETTY_FUNCTION__' + '', '__DATE__', '__EOF__', '__TIME__', '__TIMESTAMP__', '__VENDOR__', + '__VERSION__'), suffix=r'\b'), + Keyword.Pseudo), + (r'macro\b', Keyword.Reserved), + (r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin), + # FloatLiteral + # -- HexFloat + (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' + r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float), + # -- DecimalFloat + (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float), + (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float), + # IntegerLiteral + # -- Binary + (r'0[Bb][01_]+', Number.Bin), + # -- Octal + (r'0[0-7_]+', Number.Oct), + # -- Hexadecimal + (r'0[xX][0-9a-fA-F_]+', Number.Hex), + # -- Decimal + (r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer), + # CharacterLiteral + (r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""", + String.Char), + # StringLiteral + # -- WysiwygString + (r'r"[^"]*"[cwd]?', String), + # -- AlternateWysiwygString + (r'`[^`]*`[cwd]?', String), + # -- DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"[cwd]?', String), + # -- EscapeSequence + (r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}" + r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)", + String), + # -- HexString + (r'x"[0-9a-fA-F_\s]*"[cwd]?', String), + # -- DelimitedString + (r'q"\[', String, 'delimited_bracket'), + (r'q"\(', String, 'delimited_parenthesis'), + (r'q"<', String, 'delimited_angle'), + (r'q"\{', String, 'delimited_curly'), + (r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String), + (r'q"(.).*?\1"', String), + # -- TokenString + (r'q\{', String, 'token_string'), + # Attributes + (r'@([a-zA-Z_]\w*)?', Name.Decorator), + # Tokens + (r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>=' + r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)' + r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation), + # Identifier + (r'[a-zA-Z_]\w*', Name), + # Line + (r'#line\s.*\n', Comment.Special), + ], + 'nested_comment': [ + (r'[^+/]+', Comment.Multiline), + (r'/\+', Comment.Multiline, '#push'), + (r'\+/', Comment.Multiline, '#pop'), + (r'[+/]', Comment.Multiline), + ], + 'token_string': [ + (r'\{', Punctuation, 'token_string_nest'), + (r'\}', String, '#pop'), + include('root'), + ], + 'token_string_nest': [ + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + include('root'), + ], + 'delimited_bracket': [ + (r'[^\[\]]+', String), + (r'\[', String, 'delimited_inside_bracket'), + (r'\]"', String, '#pop'), + ], + 'delimited_inside_bracket': [ + (r'[^\[\]]+', String), + (r'\[', String, '#push'), + (r'\]', String, '#pop'), + ], + 'delimited_parenthesis': [ + (r'[^()]+', String), + (r'\(', String, 'delimited_inside_parenthesis'), + (r'\)"', String, '#pop'), + ], + 'delimited_inside_parenthesis': [ + (r'[^()]+', String), + (r'\(', String, '#push'), + (r'\)', String, '#pop'), + ], + 'delimited_angle': [ + (r'[^<>]+', String), + (r'<', String, 'delimited_inside_angle'), + (r'>"', String, '#pop'), + ], + 'delimited_inside_angle': [ + (r'[^<>]+', String), + (r'<', String, '#push'), + (r'>', String, '#pop'), + ], + 'delimited_curly': [ + (r'[^{}]+', String), + (r'\{', String, 'delimited_inside_curly'), + (r'\}"', String, '#pop'), + ], + 'delimited_inside_curly': [ + (r'[^{}]+', String), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + } + + +class CrocLexer(RegexLexer): + """ + For `Croc `_ source. + """ + name = 'Croc' + filenames = ['*.croc'] + aliases = ['croc'] + mimetypes = ['text/x-crocsrc'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + # Comments + (r'//(.*?)\n', Comment.Single), + (r'/\*', Comment.Multiline, 'nestedcomment'), + # Keywords + (words(( + 'as', 'assert', 'break', 'case', 'catch', 'class', 'continue', + 'default', 'do', 'else', 'finally', 'for', 'foreach', 'function', + 'global', 'namespace', 'if', 'import', 'in', 'is', 'local', + 'module', 'return', 'scope', 'super', 'switch', 'this', 'throw', + 'try', 'vararg', 'while', 'with', 'yield'), suffix=r'\b'), + Keyword), + (r'(false|true|null)\b', Keyword.Constant), + # FloatLiteral + (r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?', + Number.Float), + # IntegerLiteral + # -- Binary + (r'0[bB][01][01_]*', Number.Bin), + # -- Hexadecimal + (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), + # -- Decimal + (r'([0-9][0-9_]*)(?![.eE])', Number.Integer), + # CharacterLiteral + (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", + String.Char), + # StringLiteral + # -- WysiwygString + (r'@"(""|[^"])*"', String), + (r'@`(``|[^`])*`', String), + (r"@'(''|[^'])*'", String), + # -- DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"', String), + # Tokens + (r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>' + r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)' + r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation), + # Identifier + (r'[a-zA-Z_]\w*', Name), + ], + 'nestedcomment': [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + } + + +class MiniDLexer(CrocLexer): + """ + For MiniD source. MiniD is now known as Croc. + """ + name = 'MiniD' + filenames = [] # don't lex .md as MiniD, reserve for Markdown + aliases = ['minid'] + mimetypes = ['text/x-minidsrc'] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/dalvik.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dalvik.py similarity index 65% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/dalvik.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dalvik.py index de9b11f..5f5c6c7 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/dalvik.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dalvik.py @@ -5,13 +5,15 @@ Pygments lexers for Dalvik VM-related languages. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ +import re + from pygments.lexer import RegexLexer, include, bygroups from pygments.token import Keyword, Text, Comment, Name, String, Number, \ - Punctuation + Punctuation __all__ = ['SmaliLexer'] @@ -21,7 +23,7 @@ class SmaliLexer(RegexLexer): For `Smali `_ (Android/Dalvik) assembly code. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'Smali' aliases = ['smali'] @@ -63,8 +65,8 @@ class SmaliLexer(RegexLexer): (r'\s+', Text), ], 'instruction': [ - (r'\b[vp]\d+\b', Name.Builtin), # registers - (r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions + (r'\b[vp]\d+\b', Name.Builtin), # registers + (r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions ], 'literal': [ (r'".*"', String), @@ -73,27 +75,27 @@ class SmaliLexer(RegexLexer): (r'[0-9]+L?', Number.Integer), ], 'field': [ - (r'(\$?\b)([A-Za-z0-9_$]*)(:)', + (r'(\$?\b)([\w$]*)(:)', bygroups(Punctuation, Name.Variable, Punctuation)), ], 'method': [ - (r'<(?:cl)?init>', Name.Function), # constructor - (r'(\$?\b)([A-Za-z0-9_$]*)(\()', + (r'<(?:cl)?init>', Name.Function), # constructor + (r'(\$?\b)([\w$]*)(\()', bygroups(Punctuation, Name.Function, Punctuation)), ], 'label': [ - (r':[A-Za-z0-9_]+', Name.Label), + (r':\w+', Name.Label), ], 'class': [ # class names in the form Lcom/namespace/ClassName; # I only want to color the ClassName part, so the namespace part is # treated as 'Text' - (r'(L)((?:[A-Za-z0-9_$]+/)*)([A-Za-z0-9_$]+)(;)', + (r'(L)((?:[\w$]+/)*)([\w$]+)(;)', bygroups(Keyword.Type, Text, Name.Class, Text)), ], 'punctuation': [ (r'->', Punctuation), - (r'[{},\(\):=\.-]', Punctuation), + (r'[{},():=.-]', Punctuation), ], 'type': [ (r'[ZBSCIJFDV\[]+', Keyword.Type), @@ -102,3 +104,22 @@ class SmaliLexer(RegexLexer): (r'#.*?\n', Comment), ], } + + def analyse_text(text): + score = 0 + if re.search(r'^\s*\.class\s', text, re.MULTILINE): + score += 0.5 + if re.search(r'\b((check-cast|instance-of|throw-verification-error' + r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|' + r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|' + r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE): + score += 0.3 + if re.search(r'(\.(catchall|epilogue|restart local|prologue)|' + r'\b(array-data|class-change-error|declared-synchronized|' + r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|' + r'illegal-class-access|illegal-field-access|' + r'illegal-method-access|instantiation-error|no-error|' + r'no-such-class|no-such-field|no-such-method|' + r'packed-switch|sparse-switch))\b', text, re.MULTILINE): + score += 0.6 + return score diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/data.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/data.py new file mode 100644 index 0000000..f41a51e --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/data.py @@ -0,0 +1,530 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.data + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for data file format. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \ + include, bygroups, inherit +from pygments.token import Text, Comment, Keyword, Name, String, Number, \ + Punctuation, Literal + +__all__ = ['YamlLexer', 'JsonLexer', 'JsonLdLexer'] + + +class YamlLexerContext(LexerContext): + """Indentation context for the YAML lexer.""" + + def __init__(self, *args, **kwds): + super(YamlLexerContext, self).__init__(*args, **kwds) + self.indent_stack = [] + self.indent = -1 + self.next_indent = 0 + self.block_scalar_indent = None + + +class YamlLexer(ExtendedRegexLexer): + """ + Lexer for `YAML `_, a human-friendly data serialization + language. + + .. versionadded:: 0.11 + """ + + name = 'YAML' + aliases = ['yaml'] + filenames = ['*.yaml', '*.yml'] + mimetypes = ['text/x-yaml'] + + def something(token_class): + """Do not produce empty tokens.""" + def callback(lexer, match, context): + text = match.group() + if not text: + return + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def reset_indent(token_class): + """Reset the indentation levels.""" + def callback(lexer, match, context): + text = match.group() + context.indent_stack = [] + context.indent = -1 + context.next_indent = 0 + context.block_scalar_indent = None + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def save_indent(token_class, start=False): + """Save a possible indentation level.""" + def callback(lexer, match, context): + text = match.group() + extra = '' + if start: + context.next_indent = len(text) + if context.next_indent < context.indent: + while context.next_indent < context.indent: + context.indent = context.indent_stack.pop() + if context.next_indent > context.indent: + extra = text[context.indent:] + text = text[:context.indent] + else: + context.next_indent += len(text) + if text: + yield match.start(), token_class, text + if extra: + yield match.start()+len(text), token_class.Error, extra + context.pos = match.end() + return callback + + def set_indent(token_class, implicit=False): + """Set the previously saved indentation level.""" + def callback(lexer, match, context): + text = match.group() + if context.indent < context.next_indent: + context.indent_stack.append(context.indent) + context.indent = context.next_indent + if not implicit: + context.next_indent += len(text) + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def set_block_scalar_indent(token_class): + """Set an explicit indentation level for a block scalar.""" + def callback(lexer, match, context): + text = match.group() + context.block_scalar_indent = None + if not text: + return + increment = match.group(1) + if increment: + current_indent = max(context.indent, 0) + increment = int(increment) + context.block_scalar_indent = current_indent + increment + if text: + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def parse_block_scalar_empty_line(indent_token_class, content_token_class): + """Process an empty line in a block scalar.""" + def callback(lexer, match, context): + text = match.group() + if (context.block_scalar_indent is None or + len(text) <= context.block_scalar_indent): + if text: + yield match.start(), indent_token_class, text + else: + indentation = text[:context.block_scalar_indent] + content = text[context.block_scalar_indent:] + yield match.start(), indent_token_class, indentation + yield (match.start()+context.block_scalar_indent, + content_token_class, content) + context.pos = match.end() + return callback + + def parse_block_scalar_indent(token_class): + """Process indentation spaces in a block scalar.""" + def callback(lexer, match, context): + text = match.group() + if context.block_scalar_indent is None: + if len(text) <= max(context.indent, 0): + context.stack.pop() + context.stack.pop() + return + context.block_scalar_indent = len(text) + else: + if len(text) < context.block_scalar_indent: + context.stack.pop() + context.stack.pop() + return + if text: + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def parse_plain_scalar_indent(token_class): + """Process indentation spaces in a plain scalar.""" + def callback(lexer, match, context): + text = match.group() + if len(text) <= context.indent: + context.stack.pop() + context.stack.pop() + return + if text: + yield match.start(), token_class, text + context.pos = match.end() + return callback + + tokens = { + # the root rules + 'root': [ + # ignored whitespaces + (r'[ ]+(?=#|$)', Text), + # line breaks + (r'\n+', Text), + # a comment + (r'#[^\n]*', Comment.Single), + # the '%YAML' directive + (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'), + # the %TAG directive + (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'), + # document start and document end indicators + (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace), + 'block-line'), + # indentation spaces + (r'[ ]*(?!\s|$)', save_indent(Text, start=True), + ('block-line', 'indentation')), + ], + + # trailing whitespaces after directives or a block scalar indicator + 'ignored-line': [ + # ignored whitespaces + (r'[ ]+(?=#|$)', Text), + # a comment + (r'#[^\n]*', Comment.Single), + # line break + (r'\n', Text, '#pop:2'), + ], + + # the %YAML directive + 'yaml-directive': [ + # the version number + (r'([ ]+)([0-9]+\.[0-9]+)', + bygroups(Text, Number), 'ignored-line'), + ], + + # the %YAG directive + 'tag-directive': [ + # a tag handle and the corresponding prefix + (r'([ ]+)(!|![\w-]*!)' + r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)', + bygroups(Text, Keyword.Type, Text, Keyword.Type), + 'ignored-line'), + ], + + # block scalar indicators and indentation spaces + 'indentation': [ + # trailing whitespaces are ignored + (r'[ ]*$', something(Text), '#pop:2'), + # whitespaces preceeding block collection indicators + (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)), + # block collection indicators + (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)), + # the beginning a block line + (r'[ ]*', save_indent(Text), '#pop'), + ], + + # an indented line in the block context + 'block-line': [ + # the line end + (r'[ ]*(?=#|$)', something(Text), '#pop'), + # whitespaces separating tokens + (r'[ ]+', Text), + # tags, anchors and aliases, + include('descriptors'), + # block collections and scalars + include('block-nodes'), + # flow collections and quoted scalars + include('flow-nodes'), + # a plain scalar + (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)', + something(Name.Variable), + 'plain-scalar-in-block-context'), + ], + + # tags, anchors, aliases + 'descriptors': [ + # a full-form tag + (r'!<[\w;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type), + # a tag in the form '!', '!suffix' or '!handle!suffix' + (r'!(?:[\w-]+)?' + r'(?:![\w;/?:@&=+$,.!~*\'()\[\]%-]+)?', Keyword.Type), + # an anchor + (r'&[\w-]+', Name.Label), + # an alias + (r'\*[\w-]+', Name.Variable), + ], + + # block collections and scalars + 'block-nodes': [ + # implicit key + (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)), + # literal and folded scalars + (r'[|>]', Punctuation.Indicator, + ('block-scalar-content', 'block-scalar-header')), + ], + + # flow collections and quoted scalars + 'flow-nodes': [ + # a flow sequence + (r'\[', Punctuation.Indicator, 'flow-sequence'), + # a flow mapping + (r'\{', Punctuation.Indicator, 'flow-mapping'), + # a single-quoted scalar + (r'\'', String, 'single-quoted-scalar'), + # a double-quoted scalar + (r'\"', String, 'double-quoted-scalar'), + ], + + # the content of a flow collection + 'flow-collection': [ + # whitespaces + (r'[ ]+', Text), + # line breaks + (r'\n+', Text), + # a comment + (r'#[^\n]*', Comment.Single), + # simple indicators + (r'[?:,]', Punctuation.Indicator), + # tags, anchors and aliases + include('descriptors'), + # nested collections and quoted scalars + include('flow-nodes'), + # a plain scalar + (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])', + something(Name.Variable), + 'plain-scalar-in-flow-context'), + ], + + # a flow sequence indicated by '[' and ']' + 'flow-sequence': [ + # include flow collection rules + include('flow-collection'), + # the closing indicator + (r'\]', Punctuation.Indicator, '#pop'), + ], + + # a flow mapping indicated by '{' and '}' + 'flow-mapping': [ + # include flow collection rules + include('flow-collection'), + # the closing indicator + (r'\}', Punctuation.Indicator, '#pop'), + ], + + # block scalar lines + 'block-scalar-content': [ + # line break + (r'\n', Text), + # empty line + (r'^[ ]+$', + parse_block_scalar_empty_line(Text, Name.Constant)), + # indentation spaces (we may leave the state here) + (r'^[ ]*', parse_block_scalar_indent(Text)), + # line content + (r'[\S\t ]+', Name.Constant), + ], + + # the content of a literal or folded scalar + 'block-scalar-header': [ + # indentation indicator followed by chomping flag + (r'([1-9])?[+-]?(?=[ ]|$)', + set_block_scalar_indent(Punctuation.Indicator), + 'ignored-line'), + # chomping flag followed by indentation indicator + (r'[+-]?([1-9])?(?=[ ]|$)', + set_block_scalar_indent(Punctuation.Indicator), + 'ignored-line'), + ], + + # ignored and regular whitespaces in quoted scalars + 'quoted-scalar-whitespaces': [ + # leading and trailing whitespaces are ignored + (r'^[ ]+', Text), + (r'[ ]+$', Text), + # line breaks are ignored + (r'\n+', Text), + # other whitespaces are a part of the value + (r'[ ]+', Name.Variable), + ], + + # single-quoted scalars + 'single-quoted-scalar': [ + # include whitespace and line break rules + include('quoted-scalar-whitespaces'), + # escaping of the quote character + (r'\'\'', String.Escape), + # regular non-whitespace characters + (r'[^\s\']+', String), + # the closing quote + (r'\'', String, '#pop'), + ], + + # double-quoted scalars + 'double-quoted-scalar': [ + # include whitespace and line break rules + include('quoted-scalar-whitespaces'), + # escaping of special characters + (r'\\[0abt\tn\nvfre "\\N_LP]', String), + # escape codes + (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})', + String.Escape), + # regular non-whitespace characters + (r'[^\s"\\]+', String), + # the closing quote + (r'"', String, '#pop'), + ], + + # the beginning of a new line while scanning a plain scalar + 'plain-scalar-in-block-context-new-line': [ + # empty lines + (r'^[ ]+$', Text), + # line breaks + (r'\n+', Text), + # document start and document end indicators + (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'), + # indentation spaces (we may leave the block line state here) + (r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'), + ], + + # a plain scalar in the block context + 'plain-scalar-in-block-context': [ + # the scalar ends with the ':' indicator + (r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'), + # the scalar ends with whitespaces followed by a comment + (r'[ ]+(?=#)', Text, '#pop'), + # trailing whitespaces are ignored + (r'[ ]+$', Text), + # line breaks are ignored + (r'\n+', Text, 'plain-scalar-in-block-context-new-line'), + # other whitespaces are a part of the value + (r'[ ]+', Literal.Scalar.Plain), + # regular non-whitespace characters + (r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain), + ], + + # a plain scalar is the flow context + 'plain-scalar-in-flow-context': [ + # the scalar ends with an indicator character + (r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'), + # the scalar ends with a comment + (r'[ ]+(?=#)', Text, '#pop'), + # leading and trailing whitespaces are ignored + (r'^[ ]+', Text), + (r'[ ]+$', Text), + # line breaks are ignored + (r'\n+', Text), + # other whitespaces are a part of the value + (r'[ ]+', Name.Variable), + # regular non-whitespace characters + (r'[^\s,:?\[\]{}]+', Name.Variable), + ], + + } + + def get_tokens_unprocessed(self, text=None, context=None): + if context is None: + context = YamlLexerContext(text, 0) + return super(YamlLexer, self).get_tokens_unprocessed(text, context) + + +class JsonLexer(RegexLexer): + """ + For JSON data structures. + + .. versionadded:: 1.5 + """ + + name = 'JSON' + aliases = ['json'] + filenames = ['*.json'] + mimetypes = ['application/json'] + + flags = re.DOTALL + + # integer part of a number + int_part = r'-?(0|[1-9]\d*)' + + # fractional part of a number + frac_part = r'\.\d+' + + # exponential part of a number + exp_part = r'[eE](\+|-)?\d+' + + tokens = { + 'whitespace': [ + (r'\s+', Text), + ], + + # represents a simple terminal value + 'simplevalue': [ + (r'(true|false|null)\b', Keyword.Constant), + (('%(int_part)s(%(frac_part)s%(exp_part)s|' + '%(exp_part)s|%(frac_part)s)') % vars(), + Number.Float), + (int_part, Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String.Double), + ], + + + # the right hand side of an object, after the attribute name + 'objectattribute': [ + include('value'), + (r':', Punctuation), + # comma terminates the attribute but expects more + (r',', Punctuation, '#pop'), + # a closing bracket terminates the entire object, so pop twice + (r'\}', Punctuation, ('#pop', '#pop')), + ], + + # a json object - { attr, attr, ... } + 'objectvalue': [ + include('whitespace'), + (r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'), + (r'\}', Punctuation, '#pop'), + ], + + # json array - [ value, value, ... } + 'arrayvalue': [ + include('whitespace'), + include('value'), + (r',', Punctuation), + (r'\]', Punctuation, '#pop'), + ], + + # a json value - either a simple value or a complex value (object or array) + 'value': [ + include('whitespace'), + include('simplevalue'), + (r'\{', Punctuation, 'objectvalue'), + (r'\[', Punctuation, 'arrayvalue'), + ], + + # the root of a json document whould be a value + 'root': [ + include('value'), + ], + } + +class JsonLdLexer(JsonLexer): + """ + For `JSON-LD `_ linked data. + + .. versionadded:: 2.0 + """ + + name = 'JSON-LD' + aliases = ['jsonld', 'json-ld'] + filenames = ['*.jsonld'] + mimetypes = ['application/ld+json'] + + tokens = { + 'objectvalue': [ + (r'"@(context|id|value|language|type|container|list|set|' + r'reverse|index|base|vocab|graph)"', Name.Decorator, + 'objectattribute'), + inherit, + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/diff.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/diff.py new file mode 100644 index 0000000..a910a97 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/diff.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.diff + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for diff/patch formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \ + Literal + +__all__ = ['DiffLexer', 'DarcsPatchLexer'] + + +class DiffLexer(RegexLexer): + """ + Lexer for unified or context-style diffs or patches. + """ + + name = 'Diff' + aliases = ['diff', 'udiff'] + filenames = ['*.diff', '*.patch'] + mimetypes = ['text/x-diff', 'text/x-patch'] + + tokens = { + 'root': [ + (r' .*\n', Text), + (r'\+.*\n', Generic.Inserted), + (r'-.*\n', Generic.Deleted), + (r'!.*\n', Generic.Strong), + (r'@.*\n', Generic.Subheading), + (r'([Ii]ndex|diff).*\n', Generic.Heading), + (r'=.*\n', Generic.Heading), + (r'.*\n', Text), + ] + } + + def analyse_text(text): + if text[:7] == 'Index: ': + return True + if text[:5] == 'diff ': + return True + if text[:4] == '--- ': + return 0.9 + + +class DarcsPatchLexer(RegexLexer): + """ + DarcsPatchLexer is a lexer for the various versions of the darcs patch + format. Examples of this format are derived by commands such as + ``darcs annotate --patch`` and ``darcs send``. + + .. versionadded:: 0.10 + """ + + name = 'Darcs Patch' + aliases = ['dpatch'] + filenames = ['*.dpatch', '*.darcspatch'] + + DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move', + 'replace') + + tokens = { + 'root': [ + (r'<', Operator), + (r'>', Operator), + (r'\{', Operator), + (r'\}', Operator), + (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])', + bygroups(Operator, Keyword, Name, Text, Name, Operator, + Literal.Date, Text, Operator)), + (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)', + bygroups(Operator, Keyword, Name, Text, Name, Operator, + Literal.Date, Text), 'comment'), + (r'New patches:', Generic.Heading), + (r'Context:', Generic.Heading), + (r'Patch bundle hash:', Generic.Heading), + (r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS), + bygroups(Text, Keyword, Text)), + (r'\+', Generic.Inserted, "insert"), + (r'-', Generic.Deleted, "delete"), + (r'.*\n', Text), + ], + 'comment': [ + (r'[^\]].*\n', Comment), + (r'\]', Operator, "#pop"), + ], + 'specialText': [ # darcs add [_CODE_] special operators for clarity + (r'\n', Text, "#pop"), # line-based + (r'\[_[^_]*_]', Operator), + ], + 'insert': [ + include('specialText'), + (r'\[', Generic.Inserted), + (r'[^\n\[]+', Generic.Inserted), + ], + 'delete': [ + include('specialText'), + (r'\[', Generic.Deleted), + (r'[^\n\[]+', Generic.Deleted), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/dotnet.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dotnet.py similarity index 80% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/dotnet.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dotnet.py index 9f38922..39c03d4 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/dotnet.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dotnet.py @@ -5,19 +5,19 @@ Lexers for .net languages. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \ - using, this + using, this, default from pygments.token import Punctuation, \ - Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other -from pygments.util import get_choice_opt + Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other +from pygments.util import get_choice_opt, iteritems from pygments import unistring as uni -from pygments.lexers.web import XmlLexer +from pygments.lexers.html import XmlLexer __all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer', 'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer'] @@ -44,24 +44,24 @@ class CSharpLexer(RegexLexer): The default value is ``basic``. - *New in Pygments 0.8.* + .. versionadded:: 0.8 """ name = 'C#' aliases = ['csharp', 'c#'] filenames = ['*.cs'] - mimetypes = ['text/x-csharp'] # inferred + mimetypes = ['text/x-csharp'] # inferred flags = re.MULTILINE | re.DOTALL | re.UNICODE - # for the range of allowed unicode characters in identifiers, - # see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf + # for the range of allowed unicode characters in identifiers, see + # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf levels = { - 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*', - 'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' + - '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + - uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'), + 'none': '@?[_a-zA-Z]\w*', + 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', + 'Cf', 'Mn', 'Mc') + ']*'), 'full': ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', @@ -71,17 +71,17 @@ class CSharpLexer(RegexLexer): tokens = {} token_variants = True - for levelname, cs_ident in list(levels.items()): + for levelname, cs_ident in iteritems(levels): tokens[levelname] = { 'root': [ # method names - (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type - r'(' + cs_ident + ')' # method name + (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type + r'(' + cs_ident + ')' # method name r'(\s*)(\()', # signature start bygroups(using(this), Name.Function, Text, Punctuation)), (r'^\s*\[.*?\]', Name.Attribute), (r'[^\S\n]+', Text), - (r'\\\n', Text), # line continuation + (r'\\\n', Text), # line continuation (r'//.*?\n', Comment.Single), (r'/[*].*?[*]/', Comment.Multiline), (r'\n', Text), @@ -117,16 +117,17 @@ class CSharpLexer(RegexLexer): (cs_ident, Name), ], 'class': [ - (cs_ident, Name.Class, '#pop') + (cs_ident, Name.Class, '#pop'), + default('#pop'), ], 'namespace': [ - (r'(?=\()', Text, '#pop'), # using (resource) - ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop') + (r'(?=\()', Text, '#pop'), # using (resource) + ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'), ] } def __init__(self, **options): - level = get_choice_opt(options, 'unicodelevel', list(self.tokens.keys()), 'basic') + level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic') if level not in self._all_tokens: # compile the regexes now self._tokens = self.__class__.process_tokendef(level) @@ -156,44 +157,44 @@ class NemerleLexer(RegexLexer): The default value is ``basic``. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'Nemerle' aliases = ['nemerle'] filenames = ['*.n'] - mimetypes = ['text/x-nemerle'] # inferred + mimetypes = ['text/x-nemerle'] # inferred flags = re.MULTILINE | re.DOTALL | re.UNICODE # for the range of allowed unicode characters in identifiers, see # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf - levels = dict( - none = '@?[_a-zA-Z][a-zA-Z0-9_]*', - basic = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' + - '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + - uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'), - full = ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', - 'Nl') + '])' - + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', - 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), - ) + levels = { + 'none': '@?[_a-zA-Z]\w*', + 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', + 'Cf', 'Mn', 'Mc') + ']*'), + 'full': ('@?(?:_|[^' + + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', + 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), + } tokens = {} token_variants = True - for levelname, cs_ident in list(levels.items()): + for levelname, cs_ident in iteritems(levels): tokens[levelname] = { 'root': [ # method names - (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type - r'(' + cs_ident + ')' # method name + (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type + r'(' + cs_ident + ')' # method name r'(\s*)(\()', # signature start bygroups(using(this), Name.Function, Text, Punctuation)), (r'^\s*\[.*?\]', Name.Attribute), (r'[^\S\n]+', Text), - (r'\\\n', Text), # line continuation + (r'\\\n', Text), # line continuation (r'//.*?\n', Comment.Single), (r'/[*].*?[*]/', Comment.Multiline), (r'\n', Text), @@ -249,7 +250,7 @@ class NemerleLexer(RegexLexer): (cs_ident, Name.Class, '#pop') ], 'namespace': [ - (r'(?=\()', Text, '#pop'), # using (resource) + (r'(?=\()', Text, '#pop'), # using (resource) ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop') ], 'splice-string': [ @@ -284,7 +285,7 @@ class NemerleLexer(RegexLexer): } def __init__(self, **options): - level = get_choice_opt(options, 'unicodelevel', list(self.tokens.keys()), + level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic') if level not in self._all_tokens: # compile the regexes now @@ -336,9 +337,9 @@ class BooLexer(RegexLexer): (r'"""(\\\\|\\"|.*?)"""', String.Double), (r'"(\\\\|\\"|[^"]*?)"', String.Double), (r"'(\\\\|\\'|[^']*?)'", String.Single), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), + (r'[a-zA-Z_]\w*', Name), (r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float), - (r'[0-9][0-9\.]*(ms?|d|h|s)', Number), + (r'[0-9][0-9.]*(ms?|d|h|s)', Number), (r'0\d+', Number.Oct), (r'0x[a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), @@ -351,13 +352,13 @@ class BooLexer(RegexLexer): ('[*/]', Comment.Multiline) ], 'funcname': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop') + ('[a-zA-Z_]\w*', Name.Function, '#pop') ], 'classname': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') + ('[a-zA-Z_]\w*', Name.Class, '#pop') ], 'namespace': [ - ('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop') + ('[a-zA-Z_][\w.]*', Name.Namespace, '#pop') ] } @@ -372,7 +373,11 @@ class VbNetLexer(RegexLexer): name = 'VB.net' aliases = ['vb.net', 'vbnet'] filenames = ['*.vb', '*.bas'] - mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?) + mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?) + + uni_name = '[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + \ + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', + 'Cf', 'Mn', 'Mc') + ']*' flags = re.MULTILINE | re.IGNORECASE tokens = { @@ -382,11 +387,11 @@ class VbNetLexer(RegexLexer): (r'\n', Text), (r'rem\b.*?\n', Comment), (r"'.*?\n", Comment), - (r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#End\s+If|#Const|' + (r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|' r'#ExternalSource.*?\n|#End\s+ExternalSource|' r'#Region.*?\n|#End\s+Region|#ExternalChecksum', Comment.Preproc), - (r'[\(\){}!#,.:]', Punctuation), + (r'[(){}!#,.:]', Punctuation), (r'Option\s+(Strict|Explicit|Compare)\s+' r'(On|Off|Binary|Text)', Keyword.Declaration), (r'(?>=|<<|>>|:=|' - r'<=|>=|<>|[-&*/\\^+=<>]', + r'<=|>=|<>|[-&*/\\^+=<>\[\]]', Operator), ('"', String, 'string'), - ('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name), + (r'_\n', Text), # Line continuation (must be before Name) + (uni_name + '[%&@!#$]?', Name), ('#.*?#', Literal.Date), - (r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float), + (r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float), (r'\d+([SILDFR]|US|UI|UL)?', Number.Integer), (r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer), (r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer), - (r'_\n', Text), # Line continuation ], 'string': [ (r'""', String), @@ -439,26 +444,32 @@ class VbNetLexer(RegexLexer): (r'[^"]+', String), ], 'dim': [ - (r'[a-z_][a-z0-9_]*', Name.Variable, '#pop'), - (r'', Text, '#pop'), # any other syntax + (uni_name, Name.Variable, '#pop'), + default('#pop'), # any other syntax ], 'funcname': [ - (r'[a-z_][a-z0-9_]*', Name.Function, '#pop'), + (uni_name, Name.Function, '#pop'), ], 'classname': [ - (r'[a-z_][a-z0-9_]*', Name.Class, '#pop'), + (uni_name, Name.Class, '#pop'), ], 'namespace': [ - (r'[a-z_][a-z0-9_.]*', Name.Namespace, '#pop'), + (uni_name, Name.Namespace), + (r'\.', Name.Namespace), + default('#pop'), ], 'end': [ (r'\s+', Text), (r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b', Keyword, '#pop'), - (r'', Text, '#pop'), + default('#pop'), ] } + def analyse_text(text): + if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE): + return 0.5 + class GenericAspxLexer(RegexLexer): """ @@ -483,7 +494,7 @@ class GenericAspxLexer(RegexLexer): } -#TODO support multiple languages within the same source file +# TODO support multiple languages within the same source file class CSharpAspxLexer(DelegatingLexer): """ Lexer for highligting C# within ASP.NET pages. @@ -495,7 +506,7 @@ class CSharpAspxLexer(DelegatingLexer): mimetypes = [] def __init__(self, **options): - super(CSharpAspxLexer, self).__init__(CSharpLexer,GenericAspxLexer, + super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer, **options) def analyse_text(text): @@ -516,8 +527,8 @@ class VbNetAspxLexer(DelegatingLexer): mimetypes = [] def __init__(self, **options): - super(VbNetAspxLexer, self).__init__(VbNetLexer,GenericAspxLexer, - **options) + super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer, + **options) def analyse_text(text): if re.search(r'Page\s*Language="Vb"', text, re.I) is not None: @@ -531,7 +542,10 @@ class FSharpLexer(RegexLexer): """ For the F# language (version 3.0). - *New in Pygments 1.5.* + AAAAACK Strings + http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775 + + .. versionadded:: 1.5 """ name = 'FSharp' @@ -540,15 +554,15 @@ class FSharpLexer(RegexLexer): mimetypes = ['text/x-fsharp'] keywords = [ - 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default', - 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else', - 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function', - 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal', - 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable', - 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public', - 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to', - 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when', - 'while', 'with', 'yield!', 'yield', + 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default', + 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else', + 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function', + 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal', + 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable', + 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public', + 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to', + 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when', + 'while', 'with', 'yield!', 'yield', ] # Reserved words; cannot hurt to color them as keywords too. keywords += [ @@ -559,10 +573,10 @@ class FSharpLexer(RegexLexer): 'virtual', 'volatile', ] keyopts = [ - '!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.', - '->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-', - '<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]', - '_', '`', '{', '\|\]', '\|', '}', '~', '<@@', '<@', '=', '@>', '@@>', + '!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.', + '->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-', + '<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]', + '_', '`', '\{', '\|\]', '\|', '\}', '~', '<@@', '<@', '=', '@>', '@@>', ] operators = r'[!$%&*+\./:<=>?@^|~-]' @@ -581,7 +595,7 @@ class FSharpLexer(RegexLexer): tokens = { 'escape-sequence': [ - (r'\\[\\\"\'ntbrafv]', String.Escape), + (r'\\[\\"\'ntbrafv]', String.Escape), (r'\\[0-9]{3}', String.Escape), (r'\\u[0-9a-fA-F]{4}', String.Escape), (r'\\U[0-9a-fA-F]{8}', String.Escape), @@ -589,9 +603,9 @@ class FSharpLexer(RegexLexer): 'root': [ (r'\s+', Text), (r'\(\)|\[\]', Name.Builtin.Pseudo), - (r'\b(?`_ + definition files. + + .. versionadded:: 1.4 + """ + + name = 'Protocol Buffer' + aliases = ['protobuf', 'proto'] + filenames = ['*.proto'] + + tokens = { + 'root': [ + (r'[ \t]+', Text), + (r'[,;{}\[\]()]', Punctuation), + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), + (words(( + 'import', 'option', 'optional', 'required', 'repeated', 'default', + 'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns', + 'oneof'), prefix=r'\b', suffix=r'\b'), + Keyword), + (words(( + 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64', + 'fixed32', 'fixed64', 'sfixed32', 'sfixed64', + 'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'), + Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + (r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'), + (r'(message|extend)(\s+)', + bygroups(Keyword.Declaration, Text), 'message'), + (r'(enum|group|service)(\s+)', + bygroups(Keyword.Declaration, Text), 'type'), + (r'\".*?\"', String), + (r'\'.*?\'', String), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'(\-?(inf|nan))\b', Number.Float), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'0[0-7]+[LlUu]*', Number.Oct), + (r'\d+[LlUu]*', Number.Integer), + (r'[+-=]', Operator), + (r'([a-zA-Z_][\w.]*)([ \t]*)(=)', + bygroups(Name.Attribute, Text, Operator)), + ('[a-zA-Z_][\w.]*', Name), + ], + 'package': [ + (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'), + default('#pop'), + ], + 'message': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + default('#pop'), + ], + 'type': [ + (r'[a-zA-Z_]\w*', Name, '#pop'), + default('#pop'), + ], + } + + +class BroLexer(RegexLexer): + """ + For `Bro `_ scripts. + + .. versionadded:: 1.5 + """ + name = 'Bro' + aliases = ['bro'] + filenames = ['*.bro'] + + _hex = r'[0-9a-fA-F_]' + _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?' + _h = r'[A-Za-z0-9][-A-Za-z0-9]*' + + tokens = { + 'root': [ + # Whitespace + (r'^@.*?\n', Comment.Preproc), + (r'#.*?\n', Comment.Single), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), + # Keywords + (r'(add|alarm|break|case|const|continue|delete|do|else|enum|event' + r'|export|for|function|if|global|hook|local|module|next' + r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword), + (r'(addr|any|bool|count|counter|double|file|int|interval|net' + r'|pattern|port|record|set|string|subnet|table|time|timer' + r'|vector)\b', Keyword.Type), + (r'(T|F)\b', Keyword.Constant), + (r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire' + r'|default|disable_print_hook|raw_output|encrypt|group|log' + r'|mergeable|optional|persistent|priority|redef' + r'|rotate_(?:interval|size)|synchronized)\b', + bygroups(Punctuation, Keyword)), + (r'\s+module\b', Keyword.Namespace), + # Addresses, ports and networks + (r'\d+/(tcp|udp|icmp|unknown)\b', Number), + (r'(\d+\.){3}\d+', Number), + (r'(' + _hex + r'){7}' + _hex, Number), + (r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number), + (r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number), + (r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number), + # Hostnames + (_h + r'(\.' + _h + r')+', String), + # Numeric + (_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date), + (r'0[xX]' + _hex, Number.Hex), + (_float, Number.Float), + (r'\d+', Number.Integer), + (r'/', String.Regex, 'regex'), + (r'"', String, 'string'), + # Operators + (r'[!%*/+:<=>?~|-]', Operator), + (r'([-+=&|]{2}|[+=!><-]=)', Operator), + (r'(in|match)\b', Operator.Word), + (r'[{}()\[\]$.,;]', Punctuation), + # Identfier + (r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)), + (r'[a-zA-Z_]\w*', Name) + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), + (r'\\\n', String), + (r'\\', String) + ], + 'regex': [ + (r'/', String.Regex, '#pop'), + (r'\\[\\nt/]', String.Regex), # String.Escape is too intense here. + (r'[^\\/\n]+', String.Regex), + (r'\\\n', String.Regex), + (r'\\', String.Regex) + ] + } + + +class PuppetLexer(RegexLexer): + """ + For `Puppet `__ configuration DSL. + + .. versionadded:: 1.6 + """ + name = 'Puppet' + aliases = ['puppet'] + filenames = ['*.pp'] + + tokens = { + 'root': [ + include('comments'), + include('keywords'), + include('names'), + include('numbers'), + include('operators'), + include('strings'), + + (r'[]{}:(),;[]', Punctuation), + (r'[^\S\n]+', Text), + ], + + 'comments': [ + (r'\s*#.*$', Comment), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + ], + + 'operators': [ + (r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator), + (r'(in|and|or|not)\b', Operator.Word), + ], + + 'names': [ + ('[a-zA-Z_]\w*', Name.Attribute), + (r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation, + String, Punctuation)), + (r'\$\S+', Name.Variable), + ], + + 'numbers': [ + # Copypasta from the Python lexer + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), + (r'\d+[eE][+-]?[0-9]+j?', Number.Float), + (r'0[0-7]+j?', Number.Oct), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+j?', Number.Integer) + ], + + 'keywords': [ + # Left out 'group' and 'require' + # Since they're often used as attributes + (words(( + 'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case', + 'check', 'class', 'computer', 'configured', 'contained', + 'create_resources', 'crit', 'cron', 'debug', 'default', + 'define', 'defined', 'directory', 'else', 'elsif', 'emerg', + 'err', 'exec', 'extlookup', 'fail', 'false', 'file', + 'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import', + 'include', 'info', 'inherits', 'inline_template', 'installed', + 'interface', 'k5login', 'latest', 'link', 'loglevel', + 'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5', + 'mount', 'mounted', 'nagios_command', 'nagios_contact', + 'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency', + 'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup', + 'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation', + 'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod', + 'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged', + 'realize', 'regsubst', 'resources', 'role', 'router', 'running', + 'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule', + 'service', 'sha1', 'shellquote', 'split', 'sprintf', + 'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe', + 'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted', + 'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone', + 'zpool'), prefix='(?i)', suffix=r'\b'), + Keyword), + ], + + 'strings': [ + (r'"([^"])*"', String), + (r"'(\\'|[^'])*'", String), + ], + + } + + +class RslLexer(RegexLexer): + """ + `RSL `_ is the formal specification + language used in RAISE (Rigorous Approach to Industrial Software Engineering) + method. + + .. versionadded:: 2.0 + """ + name = 'RSL' + aliases = ['rsl'] + filenames = ['*.rsl'] + mimetypes = ['text/rsl'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'root': [ + (words(( + 'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs', + 'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel', + 'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif', + 'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if', + 'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len', + 'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post', + 'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap', + 'then', 'theory', 'test_case', 'tl', 'transition_system', 'true', + 'type', 'union', 'until', 'use', 'value', 'variable', 'while', + 'with', 'write', '~isin', '-inflist', '-infset', '-list', + '-set'), prefix=r'\b', suffix=r'\b'), + Keyword), + (r'(variable|value)\b', Keyword.Declaration), + (r'--.*?\n', Comment), + (r'<:.*?:>', Comment), + (r'\{!.*?!\}', Comment), + (r'/\*.*?\*/', Comment), + (r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function), + (r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)', + bygroups(Text, Name.Function, Text, Keyword)), + (r'\b[A-Z]\w*\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + (r'".*"', String), + (r'\'.\'', String.Char), + (r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|' + r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)', + Operator), + (r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-f]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'.', Text), + ], + } + + def analyse_text(text): + """ + Check for the most common text in the beginning of a RSL file. + """ + if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None: + return 1.0 + + +class MscgenLexer(RegexLexer): + """ + For `Mscgen `_ files. + + .. versionadded:: 1.6 + """ + name = 'Mscgen' + aliases = ['mscgen', 'msc'] + filenames = ['*.msc'] + + _var = r'(\w+|"(?:\\"|[^"])*")' + + tokens = { + 'root': [ + (r'msc\b', Keyword.Type), + # Options + (r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS' + r'|arcgradient|ARCGRADIENT)\b', Name.Property), + # Operators + (r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word), + (r'(\.|-|\|){3}', Keyword), + (r'(?:-|=|\.|:){2}' + r'|<<=>>|<->|<=>|<<>>|<:>' + r'|->|=>>|>>|=>|:>|-x|-X' + r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator), + # Names + (r'\*', Name.Builtin), + (_var, Name.Variable), + # Other + (r'\[', Punctuation, 'attrs'), + (r'\{|\}|,|;', Punctuation), + include('comments') + ], + 'attrs': [ + (r'\]', Punctuation, '#pop'), + (_var + r'(\s*)(=)(\s*)' + _var, + bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace, + String)), + (r',', Punctuation), + include('comments') + ], + 'comments': [ + (r'(?://|#).*?\n', Comment.Single), + (r'/\*(?:.|\n)*?\*/', Comment.Multiline), + (r'[ \t\r\n]+', Text.Whitespace) + ] + } + + +class VGLLexer(RegexLexer): + """ + For `SampleManager VGL `_ + source code. + + .. versionadded:: 1.6 + """ + name = 'VGL' + aliases = ['vgl'] + filenames = ['*.rpf'] + + flags = re.MULTILINE | re.DOTALL | re.IGNORECASE + + tokens = { + 'root': [ + (r'\{[^}]*\}', Comment.Multiline), + (r'declare', Keyword.Constant), + (r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object' + r'|create|on|line|with|global|routine|value|endroutine|constant' + r'|global|set|join|library|compile_option|file|exists|create|copy' + r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])', + Keyword), + (r'(true|false|null|empty|error|locked)', Keyword.Constant), + (r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator), + (r'"[^"]*"', String), + (r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)), + (r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number), + (r'[a-z_$][\w$]*', Name), + (r'[\r\n]+', Text), + (r'\s+', Text) + ] + } + + +class AlloyLexer(RegexLexer): + """ + For `Alloy `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Alloy' + aliases = ['alloy'] + filenames = ['*.als'] + mimetypes = ['text/x-alloy'] + + flags = re.MULTILINE | re.DOTALL + + iden_rex = r'[a-zA-Z_][\w\']*' + text_tuple = (r'[^\S\n]+', Text) + + tokens = { + 'sig': [ + (r'(extends)\b', Keyword, '#pop'), + (iden_rex, Name), + text_tuple, + (r',', Punctuation), + (r'\{', Operator, '#pop'), + ], + 'module': [ + text_tuple, + (iden_rex, Name, '#pop'), + ], + 'fun': [ + text_tuple, + (r'\{', Operator, '#pop'), + (iden_rex, Name, '#pop'), + ], + 'root': [ + (r'--.*?$', Comment.Single), + (r'//.*?$', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + text_tuple, + (r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text), + 'module'), + (r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'), + (r'(iden|univ|none)\b', Keyword.Constant), + (r'(int|Int)\b', Keyword.Type), + (r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword), + (r'(all|some|no|sum|disj|when|else)\b', Keyword), + (r'(run|check|for|but|exactly|expect|as)\b', Keyword), + (r'(and|or|implies|iff|in)\b', Operator.Word), + (r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'), + (r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator), + (r'[-+/*%=<>&!^|~{}\[\]().]', Operator), + (iden_rex, Name), + (r'[:,]', Punctuation), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String), + (r'\n', Text), + ] + } + + +class PanLexer(RegexLexer): + """ + Lexer for `pan `_ source files. + + Based on tcsh lexer. + + .. versionadded:: 2.0 + """ + + name = 'Pan' + aliases = ['pan'] + filenames = ['*.pan'] + + tokens = { + 'root': [ + include('basic'), + (r'\(', Keyword, 'paren'), + (r'\{', Keyword, 'curly'), + include('data'), + ], + 'basic': [ + (words(( + 'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final', 'prefix', + 'unique', 'object', 'foreach', 'include', 'template', 'function', 'variable', + 'structure', 'extensible', 'declaration'), prefix=r'\b', suffix=r'\s*\b'), + Keyword), + (words(( + 'file_contents', 'format', 'index', 'length', 'match', 'matches', 'replace', + 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase', 'debug', 'error', + 'traceback', 'deprecated', 'base64_decode', 'base64_encode', 'digest', 'escape', + 'unescape', 'append', 'create', 'first', 'nlist', 'key', 'list', 'merge', 'next', + 'prepend', 'is_boolean', 'is_defined', 'is_double', 'is_list', 'is_long', + 'is_nlist', 'is_null', 'is_number', 'is_property', 'is_resource', 'is_string', + 'to_boolean', 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists', + 'path_exists', 'if_exists', 'return', 'value'), prefix=r'\b', suffix=r'\s*\b'), + Name.Builtin), + (r'#.*', Comment), + (r'\\[\w\W]', String.Escape), + (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), + (r'[\[\]{}()=]+', Operator), + (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), + (r';', Punctuation), + ], + 'data': [ + (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), + (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r'\s+', Text), + (r'[^=\s\[\]{}()$"\'`\\;#]+', Text), + (r'\d+(?= |\Z)', Number), + ], + 'curly': [ + (r'\}', Keyword, '#pop'), + (r':-', Keyword), + (r'\w+', Name.Variable), + (r'[^}:"\'`$]+', Punctuation), + (r':', Punctuation), + include('root'), + ], + 'paren': [ + (r'\)', Keyword, '#pop'), + include('root'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dylan.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dylan.py new file mode 100644 index 0000000..9875fc0 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/dylan.py @@ -0,0 +1,289 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.dylan + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Dylan language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Literal + +__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer'] + + +class DylanLexer(RegexLexer): + """ + For the `Dylan `_ language. + + .. versionadded:: 0.7 + """ + + name = 'Dylan' + aliases = ['dylan'] + filenames = ['*.dylan', '*.dyl', '*.intr'] + mimetypes = ['text/x-dylan'] + + flags = re.IGNORECASE + + builtins = set(( + 'subclass', 'abstract', 'block', 'concrete', 'constant', 'class', + 'compiler-open', 'compiler-sideways', 'domain', 'dynamic', + 'each-subclass', 'exception', 'exclude', 'function', 'generic', + 'handler', 'inherited', 'inline', 'inline-only', 'instance', + 'interface', 'import', 'keyword', 'library', 'macro', 'method', + 'module', 'open', 'primary', 'required', 'sealed', 'sideways', + 'singleton', 'slot', 'thread', 'variable', 'virtual')) + + keywords = set(( + 'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup', + 'create', 'define', 'else', 'elseif', 'end', 'export', 'finally', + 'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename', + 'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when', + 'while')) + + operators = set(( + '~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=', + '>', '>=', '&', '|')) + + functions = set(( + 'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!', + 'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply', + 'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!', + 'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol', + 'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose', + 'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as', + 'condition-format-arguments', 'condition-format-string', 'conjoin', + 'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions', + 'direct-subclasses', 'direct-superclasses', 'disjoin', 'do', + 'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?', + 'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first', + 'first-setter', 'floor', 'floor/', 'forward-iteration-protocol', + 'function-arguments', 'function-return-values', + 'function-specializers', 'gcd', 'generic-function-mandatory-keywords', + 'generic-function-methods', 'head', 'head-setter', 'identity', + 'initialize', 'instance?', 'integral?', 'intersection', + 'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited', + 'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make', + 'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes', + 'min', 'modulo', 'negative', 'negative?', 'next-method', + 'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop', + 'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank', + 'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!', + 'remove-duplicates', 'remove-duplicates!', 'remove-key!', + 'remove-method', 'replace-elements!', 'replace-subsequence!', + 'restart-query', 'return-allowed?', 'return-description', + 'return-query', 'reverse', 'reverse!', 'round', 'round/', + 'row-major-index', 'second', 'second-setter', 'shallow-copy', + 'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?', + 'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position', + 'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third', + 'third-setter', 'truncate', 'truncate/', 'type-error-expected-type', + 'type-error-value', 'type-for-copy', 'type-union', 'union', 'values', + 'vector', 'zero?')) + + valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+' + + def get_tokens_unprocessed(self, text): + for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): + if token is Name: + lowercase_value = value.lower() + if lowercase_value in self.builtins: + yield index, Name.Builtin, value + continue + if lowercase_value in self.keywords: + yield index, Keyword, value + continue + if lowercase_value in self.functions: + yield index, Name.Builtin, value + continue + if lowercase_value in self.operators: + yield index, Operator, value + continue + yield index, token, value + + tokens = { + 'root': [ + # Whitespace + (r'\s+', Text), + + # single line comment + (r'//.*?\n', Comment.Single), + + # lid header + (r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)', + bygroups(Name.Attribute, Operator, Text, String)), + + default('code') # no header match, switch to code + ], + 'code': [ + # Whitespace + (r'\s+', Text), + + # single line comment + (r'//.*?\n', Comment.Single), + + # multi-line comment + (r'/\*', Comment.Multiline, 'comment'), + + # strings and characters + (r'"', String, 'string'), + (r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char), + + # binary integer + (r'#b[01]+', Number.Bin), + + # octal integer + (r'#o[0-7]+', Number.Oct), + + # floating point + (r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float), + + # decimal integer + (r'[-+]?\d+', Number.Integer), + + # hex integer + (r'#x[0-9a-f]+', Number.Hex), + + # Macro parameters + (r'(\?' + valid_name + ')(:)' + r'(token|name|variable|expression|body|case-body|\*)', + bygroups(Name.Tag, Operator, Name.Builtin)), + (r'(\?)(:)(token|name|variable|expression|body|case-body|\*)', + bygroups(Name.Tag, Operator, Name.Builtin)), + (r'\?' + valid_name, Name.Tag), + + # Punctuation + (r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation), + + # Most operators are picked up as names and then re-flagged. + # This one isn't valid in a name though, so we pick it up now. + (r':=', Operator), + + # Pick up #t / #f before we match other stuff with #. + (r'#[tf]', Literal), + + # #"foo" style keywords + (r'#"', String.Symbol, 'keyword'), + + # #rest, #key, #all-keys, etc. + (r'#[a-z0-9-]+', Keyword), + + # required-init-keyword: style keywords. + (valid_name + ':', Keyword), + + # class names + (r'<' + valid_name + '>', Name.Class), + + # define variable forms. + (r'\*' + valid_name + '\*', Name.Variable.Global), + + # define constant forms. + (r'\$' + valid_name, Name.Constant), + + # everything else. We re-flag some of these in the method above. + (valid_name, Name), + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'keyword': [ + (r'"', String.Symbol, '#pop'), + (r'[^\\"]+', String.Symbol), # all other characters + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ] + } + + +class DylanLidLexer(RegexLexer): + """ + For Dylan LID (Library Interchange Definition) files. + + .. versionadded:: 1.6 + """ + + name = 'DylanLID' + aliases = ['dylan-lid', 'lid'] + filenames = ['*.lid', '*.hdp'] + mimetypes = ['text/x-dylan-lid'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + # Whitespace + (r'\s+', Text), + + # single line comment + (r'//.*?\n', Comment.Single), + + # lid header + (r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)', + bygroups(Name.Attribute, Operator, Text, String)), + ] + } + + +class DylanConsoleLexer(Lexer): + """ + For Dylan interactive console output like: + + .. sourcecode:: dylan-console + + ? let a = 1; + => 1 + ? a + => 1 + + This is based on a copy of the RubyConsoleLexer. + + .. versionadded:: 1.6 + """ + name = 'Dylan session' + aliases = ['dylan-console', 'dylan-repl'] + filenames = ['*.dylan-console'] + mimetypes = ['text/x-dylan-console'] + + _line_re = re.compile('.*?\n') + _prompt_re = re.compile('\?| ') + + def get_tokens_unprocessed(self, text): + dylexer = DylanLexer(**self.options) + + curcode = '' + insertions = [] + for match in self._line_re.finditer(text): + line = match.group() + m = self._prompt_re.match(line) + if m is not None: + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions(insertions, + dylexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions(insertions, + dylexer.get_tokens_unprocessed(curcode)): + yield item diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ecl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ecl.py new file mode 100644 index 0000000..5c9b3bd --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ecl.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ecl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the ECL language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['ECLLexer'] + + +class ECLLexer(RegexLexer): + """ + Lexer for the declarative big-data `ECL + `_ + language. + + .. versionadded:: 1.5 + """ + + name = 'ECL' + aliases = ['ecl'] + filenames = ['*.ecl'] + mimetypes = ['application/x-ecl'] + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + include('whitespace'), + include('statements'), + ], + 'whitespace': [ + (r'\s+', Text), + (r'\/\/.*', Comment.Single), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), + ], + 'statements': [ + include('types'), + include('keywords'), + include('functions'), + include('hash'), + (r'"', String, 'string'), + (r'\'', String, 'string'), + (r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float), + (r'0x[0-9a-f]+[lu]*', Number.Hex), + (r'0[0-7]+[lu]*', Number.Oct), + (r'\d+[lu]*', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]+', Operator), + (r'[{}()\[\],.;]', Punctuation), + (r'[a-z_]\w*', Name), + ], + 'hash': [ + (r'^#.*$', Comment.Preproc), + ], + 'types': [ + (r'(RECORD|END)\D', Keyword.Declaration), + (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|' + r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|' + r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)', + bygroups(Keyword.Type, Text)), + ], + 'keywords': [ + (words(( + 'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL', + 'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT', + 'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED', + 'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT', + 'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS', + 'WAIT', 'WHEN'), suffix=r'\b'), + Keyword.Reserved), + # These are classed differently, check later + (words(( + 'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE', + 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT', + 'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', + 'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED', + 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO', + 'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', + 'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE', + 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP', + 'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', + 'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM', + 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', + 'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'), + Keyword.Reserved), + ], + 'functions': [ + (words(( + 'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE', + 'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', + 'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE', + 'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE', + 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE', + 'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32', + 'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID', + 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', + 'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', + 'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE', + 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED', + 'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', + 'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF', + 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', + 'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP', + 'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE', + 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'), + Name.Function), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\'', String, '#pop'), + (r'[^"\']+', String), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/eiffel.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/eiffel.py new file mode 100644 index 0000000..e3bf81f --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/eiffel.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.eiffel + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Eiffel language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['EiffelLexer'] + + +class EiffelLexer(RegexLexer): + """ + For `Eiffel `_ source code. + + .. versionadded:: 2.0 + """ + name = 'Eiffel' + aliases = ['eiffel'] + filenames = ['*.e'] + mimetypes = ['text/x-eiffel'] + + tokens = { + 'root': [ + (r'[^\S\n]+', Text), + (r'--.*?\n', Comment.Single), + (r'[^\S\n]+', Text), + # Please note that keyword and operator are case insensitive. + (r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant), + (r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word), + (words(( + 'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached', + 'attribute', 'check', 'class', 'convert', 'create', 'debug', + 'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure', + 'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if', + 'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none', + 'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename', + 'require', 'rescue', 'retry', 'select', 'separate', 'then', + 'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'), + Keyword.Reserved), + (r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String), + (r'"([^"%\n]|%.)*?"', String), + include('numbers'), + (r"'([^'%]|%'|%%)'", String.Char), + (r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator), + (r"([{}():;,.])", Punctuation), + (r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name), + (r'([A-Z][A-Z0-9_]*)', Name.Class), + (r'\n+', Text), + ], + 'numbers': [ + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'0[bB][01]+', Number.Bin), + (r'0[cC][0-7]+', Number.Oct), + (r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float), + (r'[0-9]+', Number.Integer), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/erlang.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/erlang.py new file mode 100644 index 0000000..563f774 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/erlang.py @@ -0,0 +1,511 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.erlang + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Erlang. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \ + include, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic + +__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer', + 'ElixirLexer'] + + +line_re = re.compile('.*?\n') + + +class ErlangLexer(RegexLexer): + """ + For the Erlang functional programming language. + + Blame Jeremy Thurgood (http://jerith.za.net/). + + .. versionadded:: 0.9 + """ + + name = 'Erlang' + aliases = ['erlang'] + filenames = ['*.erl', '*.hrl', '*.es', '*.escript'] + mimetypes = ['text/x-erlang'] + + keywords = ( + 'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if', + 'let', 'of', 'query', 'receive', 'try', 'when', + ) + + builtins = ( # See erlang(3) man page + 'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list', + 'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions', + 'byte_size', 'cancel_timer', 'check_process_code', 'delete_module', + 'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit', + 'float', 'float_to_list', 'fun_info', 'fun_to_list', + 'function_exported', 'garbage_collect', 'get', 'get_keys', + 'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary', + 'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean', + 'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list', + 'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record', + 'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom', + 'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom', + 'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple', + 'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5', + 'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor', + 'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2', + 'pid_to_list', 'port_close', 'port_command', 'port_connect', + 'port_control', 'port_call', 'port_info', 'port_to_list', + 'process_display', 'process_flag', 'process_info', 'purge_module', + 'put', 'read_timer', 'ref_to_list', 'register', 'resume_process', + 'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie', + 'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor', + 'spawn_opt', 'split_binary', 'start_timer', 'statistics', + 'suspend_process', 'system_flag', 'system_info', 'system_monitor', + 'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered', + 'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list', + 'universaltime_to_localtime', 'unlink', 'unregister', 'whereis' + ) + + operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)' + word_operators = ( + 'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor', + 'div', 'not', 'or', 'orelse', 'rem', 'xor' + ) + + atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')" + + variable_re = r'(?:[A-Z_]\w*)' + + escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))' + + macro_re = r'(?:'+variable_re+r'|'+atom_re+r')' + + base_re = r'(?:[2-9]|[12][0-9]|3[0-6])' + + tokens = { + 'root': [ + (r'\s+', Text), + (r'%.*\n', Comment), + (words(keywords, suffix=r'\b'), Keyword), + (words(builtins, suffix=r'\b'), Name.Builtin), + (words(word_operators, suffix=r'\b'), Operator.Word), + (r'^-', Punctuation, 'directive'), + (operators, Operator), + (r'"', String, 'string'), + (r'<<', Name.Label), + (r'>>', Name.Label), + ('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)), + ('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()', + bygroups(Name.Function, Text, Punctuation)), + (r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer), + (r'[+-]?\d+', Number.Integer), + (r'[+-]?\d+.\d+', Number.Float), + (r'[]\[:_@\".{}()|;,]', Punctuation), + (variable_re, Name.Variable), + (atom_re, Name), + (r'\?'+macro_re, Name.Constant), + (r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char), + (r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label), + ], + 'string': [ + (escape_re, String.Escape), + (r'"', String, '#pop'), + (r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol), + (r'[^"\\~]+', String), + (r'~', String), + ], + 'directive': [ + (r'(define)(\s*)(\()('+macro_re+r')', + bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'), + (r'(record)(\s*)(\()('+macro_re+r')', + bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'), + (atom_re, Name.Entity, '#pop'), + ], + } + + +class ErlangShellLexer(Lexer): + """ + Shell sessions in erl (for Erlang code). + + .. versionadded:: 1.1 + """ + name = 'Erlang erl session' + aliases = ['erl'] + filenames = ['*.erl-sh'] + mimetypes = ['text/x-erl-shellsession'] + + _prompt_re = re.compile(r'\d+>(?=\s|\Z)') + + def get_tokens_unprocessed(self, text): + erlexer = ErlangLexer(**self.options) + + curcode = '' + insertions = [] + for match in line_re.finditer(text): + line = match.group() + m = self._prompt_re.match(line) + if m is not None: + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions(insertions, + erlexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + if line.startswith('*'): + yield match.start(), Generic.Traceback, line + else: + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions(insertions, + erlexer.get_tokens_unprocessed(curcode)): + yield item + + +def gen_elixir_string_rules(name, symbol, token): + states = {} + states['string_' + name] = [ + (r'[^#%s\\]+' % (symbol,), token), + include('escapes'), + (r'\\.', token), + (r'(%s)' % (symbol,), bygroups(token), "#pop"), + include('interpol') + ] + return states + + +def gen_elixir_sigstr_rules(term, token, interpol=True): + if interpol: + return [ + (r'[^#%s\\]+' % (term,), token), + include('escapes'), + (r'\\.', token), + (r'%s[a-zA-Z]*' % (term,), token, '#pop'), + include('interpol') + ] + else: + return [ + (r'[^%s\\]+' % (term,), token), + (r'\\.', token), + (r'%s[a-zA-Z]*' % (term,), token, '#pop'), + ] + + +class ElixirLexer(RegexLexer): + """ + For the `Elixir language `_. + + .. versionadded:: 1.5 + """ + + name = 'Elixir' + aliases = ['elixir', 'ex', 'exs'] + filenames = ['*.ex', '*.exs'] + mimetypes = ['text/x-elixir'] + + KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch') + KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in') + BUILTIN = ( + 'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise', + 'quote', 'unquote', 'unquote_splicing', 'throw', 'super' + ) + BUILTIN_DECLARATION = ( + 'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop', + 'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback' + ) + + BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias') + CONSTANT = ('nil', 'true', 'false') + + PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__') + + OPERATORS3 = ( + '<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==', + '~>>', '<~>', '|~>', '<|>', + ) + OPERATORS2 = ( + '==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~', + '->', '<-', '|', '.', '=', '~>', '<~', + ) + OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&') + + PUNCTUATION = ( + '\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']' + ) + + def get_tokens_unprocessed(self, text): + for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): + if token is Name: + if value in self.KEYWORD: + yield index, Keyword, value + elif value in self.KEYWORD_OPERATOR: + yield index, Operator.Word, value + elif value in self.BUILTIN: + yield index, Keyword, value + elif value in self.BUILTIN_DECLARATION: + yield index, Keyword.Declaration, value + elif value in self.BUILTIN_NAMESPACE: + yield index, Keyword.Namespace, value + elif value in self.CONSTANT: + yield index, Name.Constant, value + elif value in self.PSEUDO_VAR: + yield index, Name.Builtin.Pseudo, value + else: + yield index, token, value + else: + yield index, token, value + + def gen_elixir_sigil_rules(): + # all valid sigil terminators (excluding heredocs) + terminators = [ + (r'\{', r'\}', 'cb'), + (r'\[', r'\]', 'sb'), + (r'\(', r'\)', 'pa'), + (r'<', r'>', 'ab'), + (r'/', r'/', 'slas'), + (r'\|', r'\|', 'pipe'), + ('"', '"', 'quot'), + ("'", "'", 'apos'), + ] + + # heredocs have slightly different rules + triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')] + + token = String.Other + states = {'sigils': []} + + for term, name in triquotes: + states['sigils'] += [ + (r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc), + (name + '-end', name + '-intp')), + (r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc), + (name + '-end', name + '-no-intp')), + ] + + states[name + '-end'] = [ + (r'[a-zA-Z]+', token, '#pop'), + default('#pop'), + ] + states[name + '-intp'] = [ + (r'^\s*' + term, String.Heredoc, '#pop'), + include('heredoc_interpol'), + ] + states[name + '-no-intp'] = [ + (r'^\s*' + term, String.Heredoc, '#pop'), + include('heredoc_no_interpol'), + ] + + for lterm, rterm, name in terminators: + states['sigils'] += [ + (r'~[a-z]' + lterm, token, name + '-intp'), + (r'~[A-Z]' + lterm, token, name + '-no-intp'), + ] + states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token) + states[name + '-no-intp'] = \ + gen_elixir_sigstr_rules(rterm, token, interpol=False) + + return states + + op3_re = "|".join(re.escape(s) for s in OPERATORS3) + op2_re = "|".join(re.escape(s) for s in OPERATORS2) + op1_re = "|".join(re.escape(s) for s in OPERATORS1) + ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re) + punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION) + alnum = '\w' + name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum + modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum} + complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re) + special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})' + + long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})' + hex_char_re = r'(\\x[\da-fA-F]{1,2})' + escape_char_re = r'(\\[abdefnrstv])' + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*$', Comment.Single), + + # Various kinds of characters + (r'(\?)' + long_hex_char_re, + bygroups(String.Char, + String.Escape, Number.Hex, String.Escape)), + (r'(\?)' + hex_char_re, + bygroups(String.Char, String.Escape)), + (r'(\?)' + escape_char_re, + bygroups(String.Char, String.Escape)), + (r'\?\\?.', String.Char), + + # '::' has to go before atoms + (r':::', String.Symbol), + (r'::', Operator), + + # atoms + (r':' + special_atom_re, String.Symbol), + (r':' + complex_name_re, String.Symbol), + (r':"', String.Symbol, 'string_double_atom'), + (r":'", String.Symbol, 'string_single_atom'), + + # [keywords: ...] + (r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re), + bygroups(String.Symbol, Punctuation)), + + # @attributes + (r'@' + name_re, Name.Attribute), + + # identifiers + (name_re, Name), + (r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)), + + # operators and punctuation + (op3_re, Operator), + (op2_re, Operator), + (punctuation_re, Punctuation), + (r'&\d', Name.Entity), # anon func arguments + (op1_re, Operator), + + # numbers + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[\da-fA-F]+', Number.Hex), + (r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float), + (r'\d(_?\d)*', Number.Integer), + + # strings and heredocs + (r'"""\s*', String.Heredoc, 'heredoc_double'), + (r"'''\s*$", String.Heredoc, 'heredoc_single'), + (r'"', String.Double, 'string_double'), + (r"'", String.Single, 'string_single'), + + include('sigils'), + + (r'%\{', Punctuation, 'map_key'), + (r'\{', Punctuation, 'tuple'), + ], + 'heredoc_double': [ + (r'^\s*"""', String.Heredoc, '#pop'), + include('heredoc_interpol'), + ], + 'heredoc_single': [ + (r"^\s*'''", String.Heredoc, '#pop'), + include('heredoc_interpol'), + ], + 'heredoc_interpol': [ + (r'[^#\\\n]+', String.Heredoc), + include('escapes'), + (r'\\.', String.Heredoc), + (r'\n+', String.Heredoc), + include('interpol'), + ], + 'heredoc_no_interpol': [ + (r'[^\\\n]+', String.Heredoc), + (r'\\.', String.Heredoc), + (r'\n+', String.Heredoc), + ], + 'escapes': [ + (long_hex_char_re, + bygroups(String.Escape, Number.Hex, String.Escape)), + (hex_char_re, String.Escape), + (escape_char_re, String.Escape), + ], + 'interpol': [ + (r'#\{', String.Interpol, 'interpol_string'), + ], + 'interpol_string': [ + (r'\}', String.Interpol, "#pop"), + include('root') + ], + 'map_key': [ + include('root'), + (r':', Punctuation, 'map_val'), + (r'=>', Punctuation, 'map_val'), + (r'\}', Punctuation, '#pop'), + ], + 'map_val': [ + include('root'), + (r',', Punctuation, '#pop'), + (r'(?=\})', Punctuation, '#pop'), + ], + 'tuple': [ + include('root'), + (r'\}', Punctuation, '#pop'), + ], + } + tokens.update(gen_elixir_string_rules('double', '"', String.Double)) + tokens.update(gen_elixir_string_rules('single', "'", String.Single)) + tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol)) + tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol)) + tokens.update(gen_elixir_sigil_rules()) + + +class ElixirConsoleLexer(Lexer): + """ + For Elixir interactive console (iex) output like: + + .. sourcecode:: iex + + iex> [head | tail] = [1,2,3] + [1,2,3] + iex> head + 1 + iex> tail + [2,3] + iex> [head | tail] + [1,2,3] + iex> length [head | tail] + 3 + + .. versionadded:: 1.5 + """ + + name = 'Elixir iex session' + aliases = ['iex'] + mimetypes = ['text/x-elixir-shellsession'] + + _prompt_re = re.compile('(iex|\.{3})(\(\d+\))?> ') + + def get_tokens_unprocessed(self, text): + exlexer = ElixirLexer(**self.options) + + curcode = '' + in_error = False + insertions = [] + for match in line_re.finditer(text): + line = match.group() + if line.startswith(u'** '): + in_error = True + insertions.append((len(curcode), + [(0, Generic.Error, line[:-1])])) + curcode += line[-1:] + else: + m = self._prompt_re.match(line) + if m is not None: + in_error = False + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions( + insertions, exlexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + token = Generic.Error if in_error else Generic.Output + yield match.start(), token, line + if curcode: + for item in do_insertions( + insertions, exlexer.get_tokens_unprocessed(curcode)): + yield item diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/esoteric.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/esoteric.py new file mode 100644 index 0000000..7a026ae --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/esoteric.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.esoteric + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for esoteric languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer'] + + +class BrainfuckLexer(RegexLexer): + """ + Lexer for the esoteric `BrainFuck `_ + language. + """ + + name = 'Brainfuck' + aliases = ['brainfuck', 'bf'] + filenames = ['*.bf', '*.b'] + mimetypes = ['application/x-brainfuck'] + + tokens = { + 'common': [ + # use different colors for different instruction types + (r'[.,]+', Name.Tag), + (r'[+-]+', Name.Builtin), + (r'[<>]+', Name.Variable), + (r'[^.,+\-<>\[\]]+', Comment), + ], + 'root': [ + (r'\[', Keyword, 'loop'), + (r'\]', Error), + include('common'), + ], + 'loop': [ + (r'\[', Keyword, '#push'), + (r'\]', Keyword, '#pop'), + include('common'), + ] + } + + +class BefungeLexer(RegexLexer): + """ + Lexer for the esoteric `Befunge `_ + language. + + .. versionadded:: 0.7 + """ + name = 'Befunge' + aliases = ['befunge'] + filenames = ['*.befunge'] + mimetypes = ['application/x-befunge'] + + tokens = { + 'root': [ + (r'[0-9a-f]', Number), + (r'[+*/%!`-]', Operator), # Traditional math + (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives + (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives + (r'[|_mw]', Keyword), + (r'[{}]', Name.Tag), # Befunge-98 stack ops + (r'".*?"', String.Double), # Strings don't appear to allow escapes + (r'\'.', String.Single), # Single character + (r'[#;]', Comment), # Trampoline... depends on direction hit + (r'[pg&~=@iotsy]', Keyword), # Misc + (r'[()A-Z]', Comment), # Fingerprints + (r'\s+', Text), # Whitespace doesn't matter + ], + } + + +class RedcodeLexer(RegexLexer): + """ + A simple Redcode lexer based on ICWS'94. + Contributed by Adam Blinkinsop . + + .. versionadded:: 0.8 + """ + name = 'Redcode' + aliases = ['redcode'] + filenames = ['*.cw'] + + opcodes = ('DAT', 'MOV', 'ADD', 'SUB', 'MUL', 'DIV', 'MOD', + 'JMP', 'JMZ', 'JMN', 'DJN', 'CMP', 'SLT', 'SPL', + 'ORG', 'EQU', 'END') + modifiers = ('A', 'B', 'AB', 'BA', 'F', 'X', 'I') + + tokens = { + 'root': [ + # Whitespace: + (r'\s+', Text), + (r';.*$', Comment.Single), + # Lexemes: + # Identifiers + (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), + (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), + (r'[A-Za-z_]\w+', Name), + # Operators + (r'[-+*/%]', Operator), + (r'[#$@<>]', Operator), # mode + (r'[.,]', Punctuation), # mode + # Numbers + (r'[-+]?\d+', Number.Integer), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/factor.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/factor.py new file mode 100644 index 0000000..402fc12 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/factor.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.factor + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Factor language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, default, words +from pygments.token import Text, Comment, Keyword, Name, String, Number + +__all__ = ['FactorLexer'] + + +class FactorLexer(RegexLexer): + """ + Lexer for the `Factor `_ language. + + .. versionadded:: 1.4 + """ + name = 'Factor' + aliases = ['factor'] + filenames = ['*.factor'] + mimetypes = ['text/x-factor'] + + flags = re.MULTILINE | re.UNICODE + + builtin_kernel = words(( + '-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip', + '2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep', + '3tri', '4dip', '4drop', '4dup', '4keep', '', '=', '>boolean', 'clone', + '?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry', + 'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?', + 'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)', + 'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop', + 'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*', + 'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*', + 'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over', + 'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw', + 'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple', + 'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with', + 'wrapper', 'wrapper?', 'xor'), suffix=r'\s') + + builtin_assocs = words(( + '2cache', '', '>alist', '?at', '?of', 'assoc', 'assoc-all?', + 'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff', + 'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?', + 'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find', + 'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map', + 'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size', + 'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!', + 'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at', + 'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys', + 'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of', + 'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute', + 'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'\s') + + builtin_combinators = words(( + '2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave', + '4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find', + 'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot', + 'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond', + 'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread', + 'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'\s') + + builtin_math = words(( + '-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '', '>', + '>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)', + '(each-integer)', '(find-integer)', '*', '+', '?1+', + 'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand', + 'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex', + 'complex?', 'denominator', 'double>bits', 'each-integer', 'even?', + 'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float', + 'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload', + 'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?', + 'if-zero', 'imaginary-part', 'integer', 'integer>fixnum', + 'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive', + 'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float', + 'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?', + 'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?', + 'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real', + 'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times', + 'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero', + 'zero?'), suffix=r'\s') + + builtin_sequences = words(( + '1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce', + '2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as', + '3sequence', '4sequence', '', '', '', '?first', + '?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!', + 'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as', + 'assert-sequence', 'assert-sequence=', 'assert-sequence?', + 'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error', + 'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each', + 'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice', + 'check-slice-error', 'clone-like', 'collapse-slice', 'collector', + 'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice', + 'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from', + 'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find', + 'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from', + 'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves', + 'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?', + 'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?', + 'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by', + 'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join', + 'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen', + 'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find', + 'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum', + 'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length', + 'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence', + 'non-negative-integer-expected', 'non-negative-integer-expected?', + 'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*', + 'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push', + 'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove', + 'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition', + 'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest', + 'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second', + 'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=', + 'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth', + 'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?', + 'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?', + 'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix', + 'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail', + 'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim', + 'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice', + 'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty', + 'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@', + 'when-empty'), suffix=r'\s') + + builtin_namespaces = words(( + '+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global', + 'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc', + 'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack', + 'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'), + suffix=r'\s') + + builtin_arrays = words(( + '1array', '2array', '3array', '4array', '', '>array', 'array', + 'array?', 'pair', 'pair?', 'resize-array'), suffix=r'\s') + + builtin_io = words(( + '(each-stream-block-slice)', '(each-stream-block)', + '(stream-contents-by-block)', '(stream-contents-by-element)', + '(stream-contents-by-length-or-block)', + '(stream-contents-by-length)', '+byte+', '+character+', + 'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block', + 'each-block-size', 'each-block-slice', 'each-line', 'each-morsel', + 'each-stream-block', 'each-stream-block-slice', 'each-stream-line', + 'error-stream', 'flush', 'input-stream', 'input-stream?', + 'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl', + 'output-stream', 'output-stream?', 'print', 'read', 'read-into', + 'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln', + 'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?', + 'seek-input', 'seek-output', 'seek-relative', 'seek-relative?', + 'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy', + 'stream-copy*', 'stream-element-type', 'stream-flush', + 'stream-length', 'stream-lines', 'stream-nl', 'stream-print', + 'stream-read', 'stream-read-into', 'stream-read-partial', + 'stream-read-partial-into', 'stream-read-partial-unsafe', + 'stream-read-unsafe', 'stream-read-until', 'stream-read1', + 'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell', + 'stream-write', 'stream-write1', 'tell-input', 'tell-output', + 'with-error-stream', 'with-error-stream*', 'with-error>output', + 'with-input-output+error-streams', + 'with-input-output+error-streams*', 'with-input-stream', + 'with-input-stream*', 'with-output-stream', 'with-output-stream*', + 'with-output>error', 'with-output+error-stream', + 'with-output+error-stream*', 'with-streams', 'with-streams*', + 'write', 'write1'), suffix=r'\s') + + builtin_strings = words(( + '1string', '', '>string', 'resize-string', 'string', + 'string?'), suffix=r'\s') + + builtin_vectors = words(( + '1vector', '', '>vector', '?push', 'vector', 'vector?'), + suffix=r'\s') + + builtin_continuations = words(( + '', '', '', 'attempt-all', + 'attempt-all-error', 'attempt-all-error?', 'callback-error-hook', + 'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition', + 'condition?', 'continuation', 'continuation?', 'continue', + 'continue-restart', 'continue-with', 'current-continuation', + 'error', 'error-continuation', 'error-in-thread', 'error-thread', + 'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover', + 'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts', + 'return', 'return-continuation', 'thread-error-hook', 'throw-continue', + 'throw-restarts', 'with-datastack', 'with-return'), suffix=r'\s') + + tokens = { + 'root': [ + # factor allows a file to start with a shebang + (r'#!.*$', Comment.Preproc), + default('base'), + ], + 'base': [ + (r'\s+', Text), + + # defining words + (r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function)), + (r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Function)), + (r'(C:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function, Text, Name.Class)), + (r'(GENERIC:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function)), + (r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function, Text, Name.Function)), + (r'\(\s', Name.Function, 'stackeffect'), + (r';\s', Keyword), + + # imports and namespaces + (r'(USING:)(\s+)', + bygroups(Keyword.Namespace, Text), 'vocabs'), + (r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Namespace)), + (r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Name.Namespace)), + (r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)', + bygroups(Keyword.Namespace, Text, Name.Namespace, Text), 'words'), + (r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+=>\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Namespace, Text, Name.Function)), + (r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function)), + (r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Function)), + + # tuples and classes + (r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+<\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'), + (r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class), 'slots'), + (r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class)), + (r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Class)), + (r'(C:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function, Text, Name.Class)), + (r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Class)), + (r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)), + (r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)), + (r'SINGLETONS:', Keyword, 'classes'), + + # other syntax + (r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function)), + (r'SYMBOLS:\s', Keyword, 'words'), + (r'SYNTAX:\s', Keyword), + (r'ALIEN:\s', Keyword), + (r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)), + (r'(FUNCTION:)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)', + bygroups(Keyword.Namespace, Text, Name.Function, Text)), + (r'(FUNCTION-ALIAS:)(\s+)(\S+)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)', + bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function, Text)), + + # vocab.private + (r'(?:)\s', Keyword.Namespace), + + # strings + (r'"""\s+(?:.|\n)*?\s+"""', String), + (r'"(?:\\\\|\\"|[^"])*"', String), + (r'\S+"\s+(?:\\\\|\\"|[^"])*"', String), + (r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char), + + # comments + (r'!\s+.*$', Comment), + (r'#!\s+.*$', Comment), + (r'/\*\s+(?:.|\n)*?\s\*/\s', Comment), + + # boolean constants + (r'[tf]\s', Name.Constant), + + # symbols and literals + (r'[\\$]\s+\S+', Name.Constant), + (r'M\\\s+\S+\s+\S+', Name.Constant), + + # numbers + (r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number), + (r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number), + (r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number), + (r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number), + (r'0b[01]+\s', Number.Bin), + (r'0o[0-7]+\s', Number.Oct), + (r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number), + (r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number), + + # keywords + (r'(?:deprecated|final|foldable|flushable|inline|recursive)\s', + Keyword), + + # builtins + (builtin_kernel, Name.Builtin), + (builtin_assocs, Name.Builtin), + (builtin_combinators, Name.Builtin), + (builtin_math, Name.Builtin), + (builtin_sequences, Name.Builtin), + (builtin_namespaces, Name.Builtin), + (builtin_arrays, Name.Builtin), + (builtin_io, Name.Builtin), + (builtin_strings, Name.Builtin), + (builtin_vectors, Name.Builtin), + (builtin_continuations, Name.Builtin), + + # everything else is text + (r'\S+', Text), + ], + 'stackeffect': [ + (r'\s+', Text), + (r'\(\s+', Name.Function, 'stackeffect'), + (r'\)\s', Name.Function, '#pop'), + (r'--\s', Name.Function), + (r'\S+', Name.Variable), + ], + 'slots': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)', + bygroups(Text, Name.Variable, Text)), + (r'\S+', Name.Variable), + ], + 'vocabs': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'\S+', Name.Namespace), + ], + 'classes': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'\S+', Name.Class), + ], + 'words': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'\S+', Name.Function), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/fantom.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/fantom.py new file mode 100644 index 0000000..953b324 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/fantom.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.fantom + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Fantom language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from string import Template + +from pygments.lexer import RegexLexer, include, bygroups, using, \ + this, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal + +__all__ = ['FantomLexer'] + + +class FantomLexer(RegexLexer): + """ + For Fantom source code. + + .. versionadded:: 1.5 + """ + name = 'Fantom' + aliases = ['fan'] + filenames = ['*.fan'] + mimetypes = ['application/x-fantom'] + + # often used regexes + def s(str): + return Template(str).substitute( + dict( + pod=r'[\"\w\.]+', + eos=r'\n|;', + id=r'[a-zA-Z_]\w*', + # all chars which can be part of type definition. Starts with + # either letter, or [ (maps), or | (funcs) + type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?', + ) + ) + + tokens = { + 'comments': [ + (r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline + (r'//.*?\n', Comment.Single), # Single line + # TODO: highlight references in fandocs + (r'\*\*.*?\n', Comment.Special), # Fandoc + (r'#.*\n', Comment.Single) # Shell-style + ], + 'literals': [ + (r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration + (r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot + (r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal + (r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex + (r'\b-?[\d_]+', Number.Integer), # Int + (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char + (r'"', Punctuation, 'insideStr'), # Opening quote + (r'`', Punctuation, 'insideUri'), # Opening accent + (r'\b(true|false|null)\b', Keyword.Constant), # Bool & null + (r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL + bygroups(Name.Namespace, Punctuation, Name.Class, + Punctuation, String, Punctuation)), + (r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal + bygroups(Name.Namespace, Punctuation, Name.Class, + Punctuation, Name.Function)), + (r'\[,\]', Literal), # Empty list + (s(r'($type)(\[,\])'), # Typed empty list + bygroups(using(this, state='inType'), Literal)), + (r'\[:\]', Literal), # Empty Map + (s(r'($type)(\[:\])'), + bygroups(using(this, state='inType'), Literal)), + ], + 'insideStr': [ + (r'\\\\', String.Escape), # Escaped backslash + (r'\\"', String.Escape), # Escaped " + (r'\\`', String.Escape), # Escaped ` + (r'\$\w+', String.Interpol), # Subst var + (r'\$\{.*?\}', String.Interpol), # Subst expr + (r'"', Punctuation, '#pop'), # Closing quot + (r'.', String) # String content + ], + 'insideUri': [ # TODO: remove copy/paste str/uri + (r'\\\\', String.Escape), # Escaped backslash + (r'\\"', String.Escape), # Escaped " + (r'\\`', String.Escape), # Escaped ` + (r'\$\w+', String.Interpol), # Subst var + (r'\$\{.*?\}', String.Interpol), # Subst expr + (r'`', Punctuation, '#pop'), # Closing tick + (r'.', String.Backtick) # URI content + ], + 'protectionKeywords': [ + (r'\b(public|protected|private|internal)\b', Keyword), + ], + 'typeKeywords': [ + (r'\b(abstract|final|const|native|facet|enum)\b', Keyword), + ], + 'methodKeywords': [ + (r'\b(abstract|native|once|override|static|virtual|final)\b', + Keyword), + ], + 'fieldKeywords': [ + (r'\b(abstract|const|final|native|override|static|virtual|' + r'readonly)\b', Keyword) + ], + 'otherKeywords': [ + (words(( + 'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while', + 'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue', + 'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'), + Keyword), + (r'\b(it|this|super)\b', Name.Builtin.Pseudo), + ], + 'operators': [ + (r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator) + ], + 'inType': [ + (r'[\[\]|\->:?]', Punctuation), + (s(r'$id'), Name.Class), + default('#pop'), + + ], + 'root': [ + include('comments'), + include('protectionKeywords'), + include('typeKeywords'), + include('methodKeywords'), + include('fieldKeywords'), + include('literals'), + include('otherKeywords'), + include('operators'), + (r'using\b', Keyword.Namespace, 'using'), # Using stmt + (r'@\w+', Name.Decorator, 'facet'), # Symbol + (r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class), + 'inheritance'), # Inheritance list + + # Type var := val + (s(r'($type)([ \t]+)($id)(\s*)(:=)'), + bygroups(using(this, state='inType'), Text, + Name.Variable, Text, Operator)), + + # var := val + (s(r'($id)(\s*)(:=)'), + bygroups(Name.Variable, Text, Operator)), + + # .someId( or ->someId( ### + (s(r'(\.|(?:\->))($id)(\s*)(\()'), + bygroups(Operator, Name.Function, Text, Punctuation), + 'insideParen'), + + # .someId or ->someId + (s(r'(\.|(?:\->))($id)'), + bygroups(Operator, Name.Function)), + + # new makeXXX ( + (r'(new)(\s+)(make\w*)(\s*)(\()', + bygroups(Keyword, Text, Name.Function, Text, Punctuation), + 'insideMethodDeclArgs'), + + # Type name ( + (s(r'($type)([ \t]+)' # Return type and whitespace + r'($id)(\s*)(\()'), # method name + open brace + bygroups(using(this, state='inType'), Text, + Name.Function, Text, Punctuation), + 'insideMethodDeclArgs'), + + # ArgType argName, + (s(r'($type)(\s+)($id)(\s*)(,)'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation)), + + # ArgType argName) + # Covered in 'insideParen' state + + # ArgType argName -> ArgType| + (s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation, Text, using(this, state='inType'), + Punctuation)), + + # ArgType argName| + (s(r'($type)(\s+)($id)(\s*)(\|)'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation)), + + # Type var + (s(r'($type)([ \t]+)($id)'), + bygroups(using(this, state='inType'), Text, + Name.Variable)), + + (r'\(', Punctuation, 'insideParen'), + (r'\{', Punctuation, 'insideBrace'), + (r'.', Text) + ], + 'insideParen': [ + (r'\)', Punctuation, '#pop'), + include('root'), + ], + 'insideMethodDeclArgs': [ + (r'\)', Punctuation, '#pop'), + (s(r'($type)(\s+)($id)(\s*)(\))'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation), '#pop'), + include('root'), + ], + 'insideBrace': [ + (r'\}', Punctuation, '#pop'), + include('root'), + ], + 'inheritance': [ + (r'\s+', Text), # Whitespace + (r':|,', Punctuation), + (r'(?:(\w+)(::))?(\w+)', + bygroups(Name.Namespace, Punctuation, Name.Class)), + (r'\{', Punctuation, '#pop') + ], + 'using': [ + (r'[ \t]+', Text), # consume whitespaces + (r'(\[)(\w+)(\])', + bygroups(Punctuation, Comment.Special, Punctuation)), # ffi + (r'(\")?([\w.]+)(\")?', + bygroups(Punctuation, Name.Namespace, Punctuation)), # podname + (r'::', Punctuation, 'usingClass'), + default('#pop') + ], + 'usingClass': [ + (r'[ \t]+', Text), # consume whitespaces + (r'(as)(\s+)(\w+)', + bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'), + (r'[\w$]+', Name.Class), + default('#pop:2') # jump out to root state + ], + 'facet': [ + (r'\s+', Text), + (r'\{', Punctuation, 'facetFields'), + default('#pop') + ], + 'facetFields': [ + include('comments'), + include('literals'), + include('operators'), + (r'\s+', Text), + (r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)), + (r'\}', Punctuation, '#pop'), + (r'.', Text) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/felix.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/felix.py new file mode 100644 index 0000000..ca8df57 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/felix.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.felix + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Felix language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, default, words, \ + combined +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['FelixLexer'] + + +class FelixLexer(RegexLexer): + """ + For `Felix `_ source code. + + .. versionadded:: 1.2 + """ + + name = 'Felix' + aliases = ['felix', 'flx'] + filenames = ['*.flx', '*.flxh'] + mimetypes = ['text/x-felix'] + + preproc = ( + 'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef', + ) + + keywords = ( + '_', '_deref', 'all', 'as', + 'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass', + 'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else', + 'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except', + 'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork', + 'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance', + 'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace', + 'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise', + 'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then', + 'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto', + 'when', 'whilst', 'with', 'yield', + ) + + keyword_directives = ( + '_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export', + 'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn', + 'package', 'private', 'pod', 'property', 'public', 'publish', + 'requires', 'todo', 'virtual', 'use', + ) + + keyword_declarations = ( + 'def', 'let', 'ref', 'val', 'var', + ) + + keyword_types = ( + 'unit', 'void', 'any', 'bool', + 'byte', 'offset', + 'address', 'caddress', 'cvaddress', 'vaddress', + 'tiny', 'short', 'int', 'long', 'vlong', + 'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong', + 'int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float', 'double', 'ldouble', + 'complex', 'dcomplex', 'lcomplex', + 'imaginary', 'dimaginary', 'limaginary', + 'char', 'wchar', 'uchar', + 'charp', 'charcp', 'ucharp', 'ucharcp', + 'string', 'wstring', 'ustring', + 'cont', + 'array', 'varray', 'list', + 'lvalue', 'opt', 'slice', + ) + + keyword_constants = ( + 'false', 'true', + ) + + operator_words = ( + 'and', 'not', 'in', 'is', 'isin', 'or', 'xor', + ) + + name_builtins = ( + '_svc', 'while', + ) + + name_pseudo = ( + 'root', 'self', 'this', + ) + + decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?' + + tokens = { + 'root': [ + include('whitespace'), + + # Keywords + (words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce', + 'union'), suffix=r'\b'), + Keyword, 'funcname'), + (words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'), + Keyword, 'classname'), + (r'(instance|module|typeclass)\b', Keyword, 'modulename'), + + (words(keywords, suffix=r'\b'), Keyword), + (words(keyword_directives, suffix=r'\b'), Name.Decorator), + (words(keyword_declarations, suffix=r'\b'), Keyword.Declaration), + (words(keyword_types, suffix=r'\b'), Keyword.Type), + (words(keyword_constants, suffix=r'\b'), Keyword.Constant), + + # Operators + include('operators'), + + # Float Literal + # -- Hex Float + (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' + r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float), + # -- DecimalFloat + (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float), + (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?', + Number.Float), + + # IntegerLiteral + # -- Binary + (r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin), + # -- Octal + (r'0[0-7_]+%s' % decimal_suffixes, Number.Oct), + # -- Hexadecimal + (r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex), + # -- Decimal + (r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer), + + # Strings + ('([rR][cC]?|[cC][rR])"""', String, 'tdqs'), + ("([rR][cC]?|[cC][rR])'''", String, 'tsqs'), + ('([rR][cC]?|[cC][rR])"', String, 'dqs'), + ("([rR][cC]?|[cC][rR])'", String, 'sqs'), + ('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')), + ("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')), + ('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')), + ("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')), + + # Punctuation + (r'[\[\]{}:(),;?]', Punctuation), + + # Labels + (r'[a-zA-Z_]\w*:>', Name.Label), + + # Identifiers + (r'(%s)\b' % '|'.join(name_builtins), Name.Builtin), + (r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo), + (r'[a-zA-Z_]\w*', Name), + ], + 'whitespace': [ + (r'\n', Text), + (r'\s+', Text), + + include('comment'), + + # Preprocessor + (r'#\s*if\s+0', Comment.Preproc, 'if0'), + (r'#', Comment.Preproc, 'macro'), + ], + 'operators': [ + (r'(%s)\b' % '|'.join(operator_words), Operator.Word), + (r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator), + ], + 'comment': [ + (r'//(.*?)\n', Comment.Single), + (r'/[*]', Comment.Multiline, 'comment2'), + ], + 'comment2': [ + (r'[^/*]', Comment.Multiline), + (r'/[*]', Comment.Multiline, '#push'), + (r'[*]/', Comment.Multiline, '#pop'), + (r'[/*]', Comment.Multiline), + ], + 'if0': [ + (r'^\s*#if.*?(?]*?>)', + bygroups(Comment.Preproc, Text, String), '#pop'), + (r'(import|include)(\s+)("[^"]*?")', + bygroups(Comment.Preproc, Text, String), '#pop'), + (r"(import|include)(\s+)('[^']*?')", + bygroups(Comment.Preproc, Text, String), '#pop'), + (r'[^/\n]+', Comment.Preproc), + # (r'/[*](.|\n)*?[*]/', Comment), + # (r'//.*?\n', Comment, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'funcname': [ + include('whitespace'), + (r'[a-zA-Z_]\w*', Name.Function, '#pop'), + # anonymous functions + (r'(?=\()', Text, '#pop'), + ], + 'classname': [ + include('whitespace'), + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + # anonymous classes + (r'(?=\{)', Text, '#pop'), + ], + 'modulename': [ + include('whitespace'), + (r'\[', Punctuation, ('modulename2', 'tvarlist')), + default('modulename2'), + ], + 'modulename2': [ + include('whitespace'), + (r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'), + ], + 'tvarlist': [ + include('whitespace'), + include('operators'), + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), + (r',', Punctuation), + (r'(with|where)\b', Keyword), + (r'[a-zA-Z_]\w*', Name), + ], + 'stringescape': [ + (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' + r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) + ], + 'strings': [ + (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol), + (r'[^\\\'"%\n]+', String), + # quotes, percents and backslashes must be parsed one at a time + (r'[\'"\\]', String), + # unhandled string formatting sign + (r'%', String) + # newlines are an error (use "nl" state) + ], + 'nl': [ + (r'\n', String) + ], + 'dqs': [ + (r'"', String, '#pop'), + # included here again for raw strings + (r'\\\\|\\"|\\\n', String.Escape), + include('strings') + ], + 'sqs': [ + (r"'", String, '#pop'), + # included here again for raw strings + (r"\\\\|\\'|\\\n", String.Escape), + include('strings') + ], + 'tdqs': [ + (r'"""', String, '#pop'), + include('strings'), + include('nl') + ], + 'tsqs': [ + (r"'''", String, '#pop'), + include('strings'), + include('nl') + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/fortran.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/fortran.py new file mode 100644 index 0000000..3ef6ff4 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/fortran.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.fortran + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Fortran languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['FortranLexer'] + + +class FortranLexer(RegexLexer): + """ + Lexer for FORTRAN 90 code. + + .. versionadded:: 0.10 + """ + name = 'Fortran' + aliases = ['fortran'] + filenames = ['*.f', '*.f90', '*.F', '*.F90'] + mimetypes = ['text/x-fortran'] + flags = re.IGNORECASE | re.MULTILINE + + # Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION + # Operators: **, *, +, -, /, <, >, <=, >=, ==, /= + # Logical (?): NOT, AND, OR, EQV, NEQV + + # Builtins: + # http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html + + tokens = { + 'root': [ + (r'^#.*\n', Comment.Preproc), + (r'!.*\n', Comment), + include('strings'), + include('core'), + (r'[a-z][\w$]*', Name), + include('nums'), + (r'[\s]+', Text), + ], + 'core': [ + # Statements + (words(( + 'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE', + 'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND', + 'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE', + 'CODIMENSION', 'COMMON', 'CONCURRRENT', 'CONTIGUOUS', 'CONTAINS', + 'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE', + 'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END', + 'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'EXIT', 'EXTENDS', + 'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT', + 'FUNCTION', 'GENERIC', 'GOTO', 'IF', 'IMAGES', 'IMPLICIT', + 'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE', + 'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY', + 'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'OPEN', 'OPTIONAL', + 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE', + 'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ', + 'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE', + 'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES', + 'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE', + 'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'), + Keyword), + + # Data Types + (words(( + 'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER', + 'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG', 'C_SIGNED_CHAR', + 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T', 'C_INT64_T', 'C_INT_LEAST8_T', + 'C_INT_LEAST16_T', 'C_INT_LEAST32_T', 'C_INT_LEAST64_T', 'C_INT_FAST8_T', + 'C_INT_FAST16_T', 'C_INT_FAST32_T', 'C_INT_FAST64_T', 'C_INTMAX_T', + 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE', 'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', + 'C_DOUBLE_COMPLEX', 'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', + 'C_FUNPTR'), prefix=r'\b', suffix=r'\s*\b'), + Keyword.Type), + + # Operators + (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator), + + (r'(::)', Keyword.Declaration), + + (r'[()\[\],:&%;.]', Punctuation), + # Intrinsics + (words(( + 'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL', + 'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog', + 'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH', + 'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref', + 'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0', + 'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE', + 'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp', + 'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count', + 'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift', + 'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated', + 'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer', + 'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc', + 'C_Sizeof', 'C_New_Line', 'C_Carriage_Return', + 'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin', + 'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY', + 'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF', + 'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax', + 'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH', + 'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime', + 'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime', + 'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of', + 'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush', + 'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell', + 'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument', + 'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog', + 'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs', + 'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits', + 'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr', + 'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index', + 'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC', + 'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End', + 'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound', + 'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk', + 'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat', + 'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent', + 'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc', + 'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits', + 'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images', + 'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product', + 'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real', + 'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift', + 'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind', + 'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape', + 'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH', + 'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand', + 'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock', + 'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer', + 'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask', + 'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp', + 'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'), + Name.Builtin), + + # Booleans + (r'\.(true|false)\.', Name.Builtin), + # Comparing Operators + (r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word), + ], + + 'strings': [ + (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), + (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + ], + + 'nums': [ + (r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer), + (r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), + (r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/foxpro.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/foxpro.py similarity index 99% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/foxpro.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/foxpro.py index 51cd499..99a65ce 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/foxpro.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/foxpro.py @@ -5,7 +5,7 @@ Simple lexer for Microsoft Visual FoxPro source code. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -24,11 +24,11 @@ class FoxProLexer(RegexLexer): FoxPro syntax allows to shorten all keywords and function names to 4 characters. Shortened forms are not recognized by this lexer. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'FoxPro' - aliases = ['Clipper', 'XBase'] + aliases = ['foxpro', 'vfp', 'clipper', 'xbase'] filenames = ['*.PRG', '*.prg'] mimetype = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/functional.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/functional.py new file mode 100644 index 0000000..791e8b6 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/functional.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.functional + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \ + NewLispLexer +from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \ + KokaLexer +from pygments.lexers.theorem import CoqLexer +from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \ + ElixirConsoleLexer, ElixirLexer +from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/go.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/go.py new file mode 100644 index 0000000..11e2935 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/go.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.go + ~~~~~~~~~~~~~~~~~~ + + Lexers for the Google Go language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['GoLexer'] + + +class GoLexer(RegexLexer): + """ + For `Go `_ source. + + .. versionadded:: 1.2 + """ + name = 'Go' + filenames = ['*.go'] + aliases = ['go'] + mimetypes = ['text/x-gosrc'] + + flags = re.MULTILINE | re.UNICODE + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuations + (r'//(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'(import|package)\b', Keyword.Namespace), + (r'(var|func|struct|map|chan|type|interface|const)\b', + Keyword.Declaration), + (words(( + 'break', 'default', 'select', 'case', 'defer', 'go', + 'else', 'goto', 'switch', 'fallthrough', 'if', 'range', + 'continue', 'for', 'return'), suffix=r'\b'), + Keyword), + (r'(true|false|iota|nil)\b', Keyword.Constant), + # It seems the builtin types aren't actually keywords, but + # can be used as functions. So we need two declarations. + (words(( + 'uint', 'uint8', 'uint16', 'uint32', 'uint64', + 'int', 'int8', 'int16', 'int32', 'int64', + 'float', 'float32', 'float64', + 'complex64', 'complex128', 'byte', 'rune', + 'string', 'bool', 'error', 'uintptr', + 'print', 'println', 'panic', 'recover', 'close', 'complex', + 'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete', + 'new', 'make'), suffix=r'\b(\()'), + bygroups(Name.Builtin, Punctuation)), + (words(( + 'uint', 'uint8', 'uint16', 'uint32', 'uint64', + 'int', 'int8', 'int16', 'int32', 'int64', + 'float', 'float32', 'float64', + 'complex64', 'complex128', 'byte', 'rune', + 'string', 'bool', 'error', 'uintptr'), suffix=r'\b'), + Keyword.Type), + # imaginary_lit + (r'\d+i', Number), + (r'\d+\.\d*([Ee][-+]\d+)?i', Number), + (r'\.\d+([Ee][-+]\d+)?i', Number), + (r'\d+[Ee][-+]\d+i', Number), + # float_lit + (r'\d+(\.\d+[eE][+\-]?\d+|' + r'\.\d*|[eE][+\-]?\d+)', Number.Float), + (r'\.\d+([eE][+\-]?\d+)?', Number.Float), + # int_lit + # -- octal_lit + (r'0[0-7]+', Number.Oct), + # -- hex_lit + (r'0[xX][0-9a-fA-F]+', Number.Hex), + # -- decimal_lit + (r'(0|[1-9][0-9]*)', Number.Integer), + # char_lit + (r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""", + String.Char), + # StringLiteral + # -- raw_string_lit + (r'`[^`]*`', String), + # -- interpreted_string_lit + (r'"(\\\\|\\"|[^"])*"', String), + # Tokens + (r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|' + r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator), + (r'[|^<>=!()\[\]{}.,;:]', Punctuation), + # identifier + (r'[^\W\d]\w*', Name.Other), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/graph.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/graph.py new file mode 100644 index 0000000..6137363 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/graph.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.graph + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for graph query languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, this +from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\ + String, Number, Whitespace + + +__all__ = ['CypherLexer'] + + +class CypherLexer(RegexLexer): + """ + For `Cypher Query Language + `_ + + For the Cypher version in Neo4J 2.0 + + .. versionadded:: 2.0 + """ + name = 'Cypher' + aliases = ['cypher'] + filenames = ['*.cyp', '*.cypher'] + + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + include('comment'), + include('keywords'), + include('clauses'), + include('relations'), + include('strings'), + include('whitespace'), + include('barewords'), + ], + 'comment': [ + (r'^.*//.*\n', Comment.Single), + ], + 'keywords': [ + (r'(create|order|match|limit|set|skip|start|return|with|where|' + r'delete|foreach|not|by)\b', Keyword), + ], + 'clauses': [ + # TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/ + (r'(all|any|as|asc|create|create\s+unique|delete|' + r'desc|distinct|foreach|in|is\s+null|limit|match|none|' + r'order\s+by|return|set|skip|single|start|union|where|with)\b', + Keyword), + ], + 'relations': [ + (r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)), + (r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)), + (r'-->|<--|\[|\]', Operator), + (r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation), + (r'[.*{}]', Punctuation), + ], + 'strings': [ + (r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String), + (r'`(?:``|[^`])+`', Name.Variable), + ], + 'whitespace': [ + (r'\s+', Whitespace), + ], + 'barewords': [ + (r'[a-z]\w*', Name), + (r'\d+', Number), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/graphics.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/graphics.py new file mode 100644 index 0000000..0b8bf5d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/graphics.py @@ -0,0 +1,553 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.graphics + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for computer graphics and plotting related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words, include, bygroups, using, \ + this, default +from pygments.token import Text, Comment, Operator, Keyword, Name, \ + Number, Punctuation, String + +__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer', + 'PovrayLexer'] + + +class GLShaderLexer(RegexLexer): + """ + GLSL (OpenGL Shader) lexer. + + .. versionadded:: 1.1 + """ + name = 'GLSL' + aliases = ['glsl'] + filenames = ['*.vert', '*.frag', '*.geo'] + mimetypes = ['text/x-glslsrc'] + + tokens = { + 'root': [ + (r'^#.*', Comment.Preproc), + (r'//.*', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?', + Operator), + (r'[?:]', Operator), # quick hack for ternary + (r'\bdefined\b', Operator), + (r'[;{}(),\[\]]', Punctuation), + # FIXME when e is present, no decimal point needed + (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float), + (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float), + (r'0[xX][0-9a-fA-F]*', Number.Hex), + (r'0[0-7]*', Number.Oct), + (r'[1-9][0-9]*', Number.Integer), + (words(( + 'attribute', 'const', 'uniform', 'varying', 'centroid', 'break', + 'continue', 'do', 'for', 'while', 'if', 'else', 'in', 'out', + 'inout', 'float', 'int', 'void', 'bool', 'true', 'false', + 'invariant', 'discard', 'return', 'mat2', 'mat3' 'mat4', + 'mat2x2', 'mat3x2', 'mat4x2', 'mat2x3', 'mat3x3', 'mat4x3', + 'mat2x4', 'mat3x4', 'mat4x4', 'vec2', 'vec3', 'vec4', + 'ivec2', 'ivec3', 'ivec4', 'bvec2', 'bvec3', 'bvec4', + 'sampler1D', 'sampler2D', 'sampler3D' 'samplerCube', + 'sampler1DShadow', 'sampler2DShadow', 'struct'), + prefix=r'\b', suffix=r'\b'), + Keyword), + (words(( + 'asm', 'class', 'union', 'enum', 'typedef', 'template', 'this', + 'packed', 'goto', 'switch', 'default', 'inline', 'noinline', + 'volatile', 'public', 'static', 'extern', 'external', 'interface', + 'long', 'short', 'double', 'half', 'fixed', 'unsigned', 'lowp', + 'mediump', 'highp', 'precision', 'input', 'output', + 'hvec2', 'hvec3', 'hvec4', 'dvec2', 'dvec3', 'dvec4', + 'fvec2', 'fvec3', 'fvec4', 'sampler2DRect', 'sampler3DRect', + 'sampler2DRectShadow', 'sizeof', 'cast', 'namespace', 'using'), + prefix=r'\b', suffix=r'\b'), + Keyword), # future use + (r'[a-zA-Z_]\w*', Name), + (r'\.', Punctuation), + (r'\s+', Text), + ], + } + + +class PostScriptLexer(RegexLexer): + """ + Lexer for PostScript files. + + The PostScript Language Reference published by Adobe at + + is the authority for this. + + .. versionadded:: 1.4 + """ + name = 'PostScript' + aliases = ['postscript', 'postscr'] + filenames = ['*.ps', '*.eps'] + mimetypes = ['application/postscript'] + + delimiter = r'()<>\[\]{}/%\s' + delimiter_end = r'(?=[%s])' % delimiter + + valid_name_chars = r'[^%s]' % delimiter + valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) + + tokens = { + 'root': [ + # All comment types + (r'^%!.+\n', Comment.Preproc), + (r'%%.*\n', Comment.Special), + (r'(^%.*\n){2,}', Comment.Multiline), + (r'%.*\n', Comment.Single), + + # String literals are awkward; enter separate state. + (r'\(', String, 'stringliteral'), + + (r'[{}<>\[\]]', Punctuation), + + # Numbers + (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex), + # Slight abuse: use Oct to signify any explicit base system + (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)' + r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct), + (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + + delimiter_end, Number.Float), + (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer), + + # References + (r'\/%s' % valid_name, Name.Variable), + + # Names + (valid_name, Name.Function), # Anything else is executed + + # These keywords taken from + # + # Is there an authoritative list anywhere that doesn't involve + # trawling documentation? + + (r'(false|true)' + delimiter_end, Keyword.Constant), + + # Conditionals / flow control + (r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)' + + delimiter_end, Keyword.Reserved), + + (words(( + 'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin', + 'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat', + 'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix', + 'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix', + 'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end', + 'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get', + 'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv', + 'idtransform', 'index', 'invertmatrix', 'itransform', 'length', + 'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto', + 'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print', + 'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat', + 'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run', + 'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray', + 'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix', + 'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt', + 'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror', + 'transform', 'translate', 'truncate', 'typecheck', 'undefined', + 'undefinedfilename', 'undefinedresult'), suffix=delimiter_end), + Name.Builtin), + + (r'\s+', Text), + ], + + 'stringliteral': [ + (r'[^()\\]+', String), + (r'\\', String.Escape, 'escape'), + (r'\(', String, '#push'), + (r'\)', String, '#pop'), + ], + + 'escape': [ + (r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'), + default('#pop'), + ], + } + + +class AsymptoteLexer(RegexLexer): + """ + For `Asymptote `_ source code. + + .. versionadded:: 1.2 + """ + name = 'Asymptote' + aliases = ['asy', 'asymptote'] + filenames = ['*.asy'] + mimetypes = ['text/x-asymptote'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' + + tokens = { + 'whitespace': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), + ], + 'statements': [ + # simple string (TeX friendly) + (r'"(\\\\|\\"|[^"])*"', String), + # C style string (with character escapes) + (r"'", String, 'string'), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), + (r'0[0-7]+[Ll]?', Number.Oct), + (r'\d+[Ll]?', Number.Integer), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.]', Punctuation), + (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), + (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' + r'return|break|continue|struct|typedef|new|access|import|' + r'unravel|from|include|quote|static|public|private|restricted|' + r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), + # Since an asy-type-name can be also an asy-function-name, + # in the following we test if the string " [a-zA-Z]" follows + # the Keyword.Type. + # Of course it is not perfect ! + (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' + r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' + r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' + r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' + r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' + r'path3|pen|picture|point|position|projection|real|revolution|' + r'scaleT|scientific|segment|side|slice|splitface|string|surface|' + r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' + r'transformation|tree|triangle|trilinear|triple|vector|' + r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type), + # Now the asy-type-name which are not asy-function-name + # except yours ! + # Perhaps useless + (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' + r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' + r'picture|position|real|revolution|slice|splitface|ticksgridT|' + r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), + ('[a-zA-Z_]\w*:(?!:)', Name.Label), + ('[a-zA-Z_]\w*', Name), + ], + 'root': [ + include('whitespace'), + # functions + (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')(\{)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation), + 'function'), + # function declarations + (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')(;)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation)), + default('statement'), + ], + 'statement': [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'function': [ + include('whitespace'), + include('statements'), + (';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'string': [ + (r"'", String, '#pop'), + (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'\n', String), + (r"[^\\'\n]+", String), # all other characters + (r'\\\n', String), + (r'\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + } + + def get_tokens_unprocessed(self, text): + from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name and value in ASYFUNCNAME: + token = Name.Function + elif token is Name and value in ASYVARNAME: + token = Name.Variable + yield index, token, value + + +def _shortened(word): + dpos = word.find('$') + return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b' + for i in range(len(word), dpos, -1)) + + +def _shortened_many(*words): + return '|'.join(map(_shortened, words)) + + +class GnuplotLexer(RegexLexer): + """ + For `Gnuplot `_ plotting scripts. + + .. versionadded:: 0.11 + """ + + name = 'Gnuplot' + aliases = ['gnuplot'] + filenames = ['*.plot', '*.plt'] + mimetypes = ['text/x-gnuplot'] + + tokens = { + 'root': [ + include('whitespace'), + (_shortened('bi$nd'), Keyword, 'bind'), + (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), + (_shortened('f$it'), Keyword, 'fit'), + (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), + (r'else\b', Keyword), + (_shortened('pa$use'), Keyword, 'pause'), + (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), + (_shortened('sa$ve'), Keyword, 'save'), + (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), + (_shortened_many('sh$ow', 'uns$et'), + Keyword, ('noargs', 'optionarg')), + (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', + 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', + 'pwd$', 're$read', 'res$et', 'scr$eendump', + 'she$ll', 'sy$stem', 'up$date'), + Keyword, 'genericargs'), + (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', + 'she$ll', 'test$'), + Keyword, 'noargs'), + ('([a-zA-Z_]\w*)(\s*)(=)', + bygroups(Name.Variable, Text, Operator), 'genericargs'), + ('([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)', + bygroups(Name.Function, Text, Operator), 'genericargs'), + (r'@[a-zA-Z_]\w*', Name.Constant), # macros + (r';', Keyword), + ], + 'comment': [ + (r'[^\\\n]', Comment), + (r'\\\n', Comment), + (r'\\', Comment), + # don't add the newline to the Comment token + default('#pop'), + ], + 'whitespace': [ + ('#', Comment, 'comment'), + (r'[ \t\v\f]+', Text), + ], + 'noargs': [ + include('whitespace'), + # semicolon and newline end the argument list + (r';', Punctuation, '#pop'), + (r'\n', Text, '#pop'), + ], + 'dqstring': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + (r'\n', String, '#pop'), # newline ends the string too + ], + 'sqstring': [ + (r"''", String), # escaped single quote + (r"'", String, '#pop'), + (r"[^\\'\n]+", String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # normal backslash + (r'\n', String, '#pop'), # newline ends the string too + ], + 'genericargs': [ + include('noargs'), + (r'"', String, 'dqstring'), + (r"'", String, 'sqstring'), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), + (r'(\d+\.\d*|\.\d+)', Number.Float), + (r'-?\d+', Number.Integer), + ('[,.~!%^&*+=|?:<>/-]', Operator), + ('[{}()\[\]]', Punctuation), + (r'(eq|ne)\b', Operator.Word), + (r'([a-zA-Z_]\w*)(\s*)(\()', + bygroups(Name.Function, Text, Punctuation)), + (r'[a-zA-Z_]\w*', Name), + (r'@[a-zA-Z_]\w*', Name.Constant), # macros + (r'\\\n', Text), + ], + 'optionarg': [ + include('whitespace'), + (_shortened_many( + "a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der", + "box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta", + "data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign", + "fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid", + "hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle", + "la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale", + "mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin", + "rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot", + "mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics", + "nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics", + "mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput", + "pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot", + "poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze", + "st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs", + "ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le", + "v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta", + "yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel", + "yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs", + "x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs", + "zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs", + "x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs", + "noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs", + "xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs", + "noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs", + "cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange", + "y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange", + "vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis", + "zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'), + ], + 'bind': [ + ('!', Keyword, '#pop'), + (_shortened('all$windows'), Name.Builtin), + include('genericargs'), + ], + 'quit': [ + (r'gnuplot\b', Keyword), + include('noargs'), + ], + 'fit': [ + (r'via\b', Name.Builtin), + include('plot'), + ], + 'if': [ + (r'\)', Punctuation, '#pop'), + include('genericargs'), + ], + 'pause': [ + (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), + (_shortened('key$press'), Name.Builtin), + include('genericargs'), + ], + 'plot': [ + (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', + 'mat$rix', 's$mooth', 'thru$', 't$itle', + 'not$itle', 'u$sing', 'w$ith'), + Name.Builtin), + include('genericargs'), + ], + 'save': [ + (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), + Name.Builtin), + include('genericargs'), + ], + } + + +class PovrayLexer(RegexLexer): + """ + For `Persistence of Vision Raytracer `_ files. + + .. versionadded:: 0.11 + """ + name = 'POVRay' + aliases = ['pov'] + filenames = ['*.pov', '*.inc'] + mimetypes = ['text/x-povray'] + + tokens = { + 'root': [ + (r'/\*[\w\W]*?\*/', Comment.Multiline), + (r'//.*\n', Comment.Single), + (r'(?s)"(?:\\.|[^"\\])+"', String.Double), + (words(( + 'break', 'case', 'debug', 'declare', 'default', 'define', 'else', + 'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef', + 'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render', + 'statistics', 'switch', 'undef', 'version', 'warning', 'while', + 'write'), prefix=r'#', suffix=r'\b'), + Comment.Preproc), + (words(( + 'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout', + 'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle', + 'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma', + 'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation', + 'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples', + 'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size', + 'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map', + 'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock', + 'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat', + 'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh', + 'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping', + 'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction', + 'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity', + 'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent', + 'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false', + 'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor', + 'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif', + 'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold', + 'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex', + 'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate', + 'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard', + 'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like', + 'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map', + 'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level', + 'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar', + 'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves', + 'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open', + 'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3', + 'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment', + 'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm', + 'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color', + 'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow', + 'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red', + 'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt', + 'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave', + 'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere', + 'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral', + 'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str', + 'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys', + 't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3', + 'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold', + 'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit', + 'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type', + 'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps', + 'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength', + 'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light', + 'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width', + 'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'), + Keyword), + (words(( + 'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference', + 'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe', + 'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism', + 'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid', + 'text', 'torus', 'triangle', 'union'), suffix=r'\b'), + Name.Builtin), + # TODO: <=, etc + (r'[\[\](){}<>;,]', Punctuation), + (r'[-+*/=]', Operator), + (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), + (r'[a-zA-Z_]\w*', Name), + (r'[0-9]+\.[0-9]*', Number.Float), + (r'\.[0-9]+', Number.Float), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String), + (r'\s+', Text), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/haskell.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/haskell.py new file mode 100644 index 0000000..089cdf4 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/haskell.py @@ -0,0 +1,840 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.haskell + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Haskell and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \ + default, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic +from pygments import unistring as uni + +__all__ = ['HaskellLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer', + 'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer', + 'LiterateCryptolLexer', 'KokaLexer'] + + +line_re = re.compile('.*?\n') + + +class HaskellLexer(RegexLexer): + """ + A Haskell lexer based on the lexemes defined in the Haskell 98 Report. + + .. versionadded:: 0.8 + """ + name = 'Haskell' + aliases = ['haskell', 'hs'] + filenames = ['*.hs'] + mimetypes = ['text/x-haskell'] + + flags = re.MULTILINE | re.UNICODE + + reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else', + 'if', 'in', 'infix[lr]?', 'instance', + 'let', 'newtype', 'of', 'then', 'type', 'where', '_') + ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', + 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', + 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', + 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') + + tokens = { + 'root': [ + # Whitespace: + (r'\s+', Text), + # (r'--\s*|.*$', Comment.Doc), + (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + # Lexemes: + # Identifiers + (r'\bimport\b', Keyword.Reserved, 'import'), + (r'\bmodule\b', Keyword.Reserved, 'module'), + (r'\berror\b', Name.Exception), + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r"'[^\\]'", String.Char), # this has to come before the TH quote + (r'^[_' + uni.Ll + r'][\w\']*', Name.Function), + (r"'?[_" + uni.Ll + r"][\w']*", Name), + (r"('')?[" + uni.Lu + r"][\w\']*", Keyword.Type), + # Operators + (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator + (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials + (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[oO][0-7]+', Number.Oct), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Character/String Literals + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + # Special + (r'\[\]', Keyword.Type), + (r'\(\)', Name.Builtin), + (r'[][(),;`{}]', Punctuation), + ], + 'import': [ + # Import statements + (r'\s+', Text), + (r'"', String, 'string'), + # after "funclist" state + (r'\)', Punctuation, '#pop'), + (r'qualified\b', Keyword), + # import X as Y + (r'([' + uni.Lu + r'][\w.]*)(\s+)(as)(\s+)([' + uni.Lu + r'][\w.]*)', + bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'), + # import X hiding (functions) + (r'([' + uni.Lu + r'][\w.]*)(\s+)(hiding)(\s+)(\()', + bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'), + # import X (functions) + (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + # import X + (r'[\w.]+', Name.Namespace, '#pop'), + ], + 'module': [ + (r'\s+', Text), + (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + (r'[' + uni.Lu + r'][\w.]*', Name.Namespace, '#pop'), + ], + 'funclist': [ + (r'\s+', Text), + (r'[' + uni.Lu + r']\w*', Keyword.Type), + (r'(_[\w\']+|[' + uni.Ll + r'][\w\']*)', Name.Function), + (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + (r',', Punctuation), + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), + # (HACK, but it makes sense to push two instances, believe me) + (r'\(', Punctuation, ('funclist', 'funclist')), + (r'\)', Punctuation, '#pop:2'), + ], + # NOTE: the next four states are shared in the AgdaLexer; make sure + # any change is compatible with Agda as well or copy over and change + 'comment': [ + # Multiline Comments + (r'[^-{}]+', Comment.Multiline), + (r'\{-', Comment.Multiline, '#push'), + (r'-\}', Comment.Multiline, '#pop'), + (r'[-{}]', Comment.Multiline), + ], + 'character': [ + # Allows multi-chars, incorrectly. + (r"[^\\']'", String.Char, '#pop'), + (r"\\", String.Escape, 'escape'), + ("'", String.Char, '#pop'), + ], + 'string': [ + (r'[^\\"]+', String), + (r"\\", String.Escape, 'escape'), + ('"', String, '#pop'), + ], + 'escape': [ + (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), + (r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'), + ('|'.join(ascii), String.Escape, '#pop'), + (r'o[0-7]+', String.Escape, '#pop'), + (r'x[\da-fA-F]+', String.Escape, '#pop'), + (r'\d+', String.Escape, '#pop'), + (r'\s+\\', String.Escape, '#pop'), + ], + } + + +class IdrisLexer(RegexLexer): + """ + A lexer for the dependently typed programming language Idris. + + Based on the Haskell and Agda Lexer. + + .. versionadded:: 2.0 + """ + name = 'Idris' + aliases = ['idris', 'idr'] + filenames = ['*.idr'] + mimetypes = ['text/x-idris'] + + reserved = ('case', 'class', 'data', 'default', 'using', 'do', 'else', + 'if', 'in', 'infix[lr]?', 'instance', 'rewrite', 'auto', + 'namespace', 'codata', 'mutual', 'private', 'public', 'abstract', + 'total', 'partial', + 'let', 'proof', 'of', 'then', 'static', 'where', '_', 'with', + 'pattern', 'term', 'syntax', 'prefix', + 'postulate', 'parameters', 'record', 'dsl', 'impossible', 'implicit', + 'tactics', 'intros', 'intro', 'compute', 'refine', 'exact', 'trivial') + + ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', + 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', + 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', + 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') + + directives = ('lib', 'link', 'flag', 'include', 'hide', 'freeze', 'access', + 'default', 'logging', 'dynamic', 'name', 'error_handlers', 'language') + + tokens = { + 'root': [ + # Comments + (r'^(\s*)(%%%s)' % '|'.join(directives), + bygroups(Text, Keyword.Reserved)), + (r'(\s*)(--(?![!#$%&*+./<=>?@^|_~:\\]).*?)$', bygroups(Text, Comment.Single)), + (r'(\s*)(\|{3}.*?)$', bygroups(Text, Comment.Single)), + (r'(\s*)(\{-)', bygroups(Text, Comment.Multiline), 'comment'), + # Declaration + (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)', + bygroups(Text, Name.Function, Text, Operator.Word, Text)), + # Identifiers + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'), + (r"('')?[A-Z][\w\']*", Keyword.Type), + (r'[a-z][\w\']*', Text), + # Special Symbols + (r'(<-|::|->|=>|=)', Operator.Word), # specials + (r'([(){}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Strings + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + (r'[^\s(){}]+', Text), + (r'\s+?', Text), # Whitespace + ], + 'module': [ + (r'\s+', Text), + (r'([A-Z][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + (r'[A-Z][\w.]*', Name.Namespace, '#pop'), + ], + 'funclist': [ + (r'\s+', Text), + (r'[A-Z]\w*', Keyword.Type), + (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), + (r'--.*$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + (r',', Punctuation), + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), + # (HACK, but it makes sense to push two instances, believe me) + (r'\(', Punctuation, ('funclist', 'funclist')), + (r'\)', Punctuation, '#pop:2'), + ], + # NOTE: the next four states are shared in the AgdaLexer; make sure + # any change is compatible with Agda as well or copy over and change + 'comment': [ + # Multiline Comments + (r'[^-{}]+', Comment.Multiline), + (r'\{-', Comment.Multiline, '#push'), + (r'-\}', Comment.Multiline, '#pop'), + (r'[-{}]', Comment.Multiline), + ], + 'character': [ + # Allows multi-chars, incorrectly. + (r"[^\\']", String.Char), + (r"\\", String.Escape, 'escape'), + ("'", String.Char, '#pop'), + ], + 'string': [ + (r'[^\\"]+', String), + (r"\\", String.Escape, 'escape'), + ('"', String, '#pop'), + ], + 'escape': [ + (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), + (r'\^[][A-Z@^_]', String.Escape, '#pop'), + ('|'.join(ascii), String.Escape, '#pop'), + (r'o[0-7]+', String.Escape, '#pop'), + (r'x[\da-fA-F]+', String.Escape, '#pop'), + (r'\d+', String.Escape, '#pop'), + (r'\s+\\', String.Escape, '#pop') + ], + } + + +class AgdaLexer(RegexLexer): + """ + For the `Agda `_ + dependently typed functional programming language and proof assistant. + + .. versionadded:: 2.0 + """ + + name = 'Agda' + aliases = ['agda'] + filenames = ['*.agda'] + mimetypes = ['text/x-agda'] + + reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data', + 'field', 'forall', 'hiding', 'in', 'inductive', 'infix', + 'infixl', 'infixr', 'instance', 'let', 'mutual', 'open', + 'pattern', 'postulate', 'primitive', 'private', + 'quote', 'quoteGoal', 'quoteTerm', + 'record', 'renaming', 'rewrite', 'syntax', 'tactic', + 'unquote', 'unquoteDecl', 'using', 'where', 'with'] + + tokens = { + 'root': [ + # Declaration + (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)', + bygroups(Text, Name.Function, Text, Operator.Word, Text)), + # Comments + (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + # Holes + (r'\{!', Comment.Directive, 'hole'), + # Lexemes: + # Identifiers + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'), + (r'\b(Set|Prop)\b', Keyword.Type), + # Special Symbols + (r'(\(|\)|\{|\})', Operator), + (u'(\\.{1,3}|\\||\u039B|\u2200|\u2192|:|=|->)', Operator.Word), + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Strings + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + (r'[^\s(){}]+', Text), + (r'\s+?', Text), # Whitespace + ], + 'hole': [ + # Holes + (r'[^!{}]+', Comment.Directive), + (r'\{!', Comment.Directive, '#push'), + (r'!\}', Comment.Directive, '#pop'), + (r'[!{}]', Comment.Directive), + ], + 'module': [ + (r'\{-', Comment.Multiline, 'comment'), + (r'[a-zA-Z][\w.]*', Name, '#pop'), + (r'[^a-zA-Z]+', Text) + ], + 'comment': HaskellLexer.tokens['comment'], + 'character': HaskellLexer.tokens['character'], + 'string': HaskellLexer.tokens['string'], + 'escape': HaskellLexer.tokens['escape'] + } + + +class CryptolLexer(RegexLexer): + """ + FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report. + + .. versionadded:: 2.0 + """ + name = 'Cryptol' + aliases = ['cryptol', 'cry'] + filenames = ['*.cry'] + mimetypes = ['text/x-cryptol'] + + reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else', + 'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2', + 'max', 'min', 'module', 'newtype', 'pragma', 'property', + 'then', 'type', 'where', 'width') + ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', + 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', + 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', + 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') + + tokens = { + 'root': [ + # Whitespace: + (r'\s+', Text), + # (r'--\s*|.*$', Comment.Doc), + (r'//.*$', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + # Lexemes: + # Identifiers + (r'\bimport\b', Keyword.Reserved, 'import'), + (r'\bmodule\b', Keyword.Reserved, 'module'), + (r'\berror\b', Name.Exception), + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r'^[_a-z][\w\']*', Name.Function), + (r"'?[_a-z][\w']*", Name), + (r"('')?[A-Z][\w\']*", Keyword.Type), + # Operators + (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator + (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials + (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[oO][0-7]+', Number.Oct), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Character/String Literals + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + # Special + (r'\[\]', Keyword.Type), + (r'\(\)', Name.Builtin), + (r'[][(),;`{}]', Punctuation), + ], + 'import': [ + # Import statements + (r'\s+', Text), + (r'"', String, 'string'), + # after "funclist" state + (r'\)', Punctuation, '#pop'), + (r'qualified\b', Keyword), + # import X as Y + (r'([A-Z][\w.]*)(\s+)(as)(\s+)([A-Z][\w.]*)', + bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'), + # import X hiding (functions) + (r'([A-Z][\w.]*)(\s+)(hiding)(\s+)(\()', + bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'), + # import X (functions) + (r'([A-Z][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + # import X + (r'[\w.]+', Name.Namespace, '#pop'), + ], + 'module': [ + (r'\s+', Text), + (r'([A-Z][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + (r'[A-Z][\w.]*', Name.Namespace, '#pop'), + ], + 'funclist': [ + (r'\s+', Text), + (r'[A-Z]\w*', Keyword.Type), + (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), + # TODO: these don't match the comments in docs, remove. + #(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + #(r'{-', Comment.Multiline, 'comment'), + (r',', Punctuation), + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), + # (HACK, but it makes sense to push two instances, believe me) + (r'\(', Punctuation, ('funclist', 'funclist')), + (r'\)', Punctuation, '#pop:2'), + ], + 'comment': [ + # Multiline Comments + (r'[^/*]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'character': [ + # Allows multi-chars, incorrectly. + (r"[^\\']'", String.Char, '#pop'), + (r"\\", String.Escape, 'escape'), + ("'", String.Char, '#pop'), + ], + 'string': [ + (r'[^\\"]+', String), + (r"\\", String.Escape, 'escape'), + ('"', String, '#pop'), + ], + 'escape': [ + (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), + (r'\^[][A-Z@^_]', String.Escape, '#pop'), + ('|'.join(ascii), String.Escape, '#pop'), + (r'o[0-7]+', String.Escape, '#pop'), + (r'x[\da-fA-F]+', String.Escape, '#pop'), + (r'\d+', String.Escape, '#pop'), + (r'\s+\\', String.Escape, '#pop'), + ], + } + + EXTRA_KEYWORDS = set(('join', 'split', 'reverse', 'transpose', 'width', + 'length', 'tail', '<<', '>>', '<<<', '>>>', 'const', + 'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error', + 'trace')) + + def get_tokens_unprocessed(self, text): + stack = ['root'] + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text, stack): + if token is Name and value in self.EXTRA_KEYWORDS: + yield index, Name.Builtin, value + else: + yield index, token, value + + +class LiterateLexer(Lexer): + """ + Base class for lexers of literate file formats based on LaTeX or Bird-style + (prefixing each code line with ">"). + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + """ + + bird_re = re.compile(r'(>[ \t]*)(.*\n)') + + def __init__(self, baselexer, **options): + self.baselexer = baselexer + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + style = self.options.get('litstyle') + if style is None: + style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird' + + code = '' + insertions = [] + if style == 'bird': + # bird-style + for match in line_re.finditer(text): + line = match.group() + m = self.bird_re.match(line) + if m: + insertions.append((len(code), + [(0, Comment.Special, m.group(1))])) + code += m.group(2) + else: + insertions.append((len(code), [(0, Text, line)])) + else: + # latex-style + from pygments.lexers.markup import TexLexer + lxlexer = TexLexer(**self.options) + codelines = 0 + latex = '' + for match in line_re.finditer(text): + line = match.group() + if codelines: + if line.lstrip().startswith('\\end{code}'): + codelines = 0 + latex += line + else: + code += line + elif line.lstrip().startswith('\\begin{code}'): + codelines = 1 + latex += line + insertions.append((len(code), + list(lxlexer.get_tokens_unprocessed(latex)))) + latex = '' + else: + latex += line + insertions.append((len(code), + list(lxlexer.get_tokens_unprocessed(latex)))) + for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)): + yield item + + +class LiterateHaskellLexer(LiterateLexer): + """ + For Literate Haskell (Bird-style or LaTeX) source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 0.9 + """ + name = 'Literate Haskell' + aliases = ['lhs', 'literate-haskell', 'lhaskell'] + filenames = ['*.lhs'] + mimetypes = ['text/x-literate-haskell'] + + def __init__(self, **options): + hslexer = HaskellLexer(**options) + LiterateLexer.__init__(self, hslexer, **options) + + +class LiterateIdrisLexer(LiterateLexer): + """ + For Literate Idris (Bird-style or LaTeX) source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 2.0 + """ + name = 'Literate Idris' + aliases = ['lidr', 'literate-idris', 'lidris'] + filenames = ['*.lidr'] + mimetypes = ['text/x-literate-idris'] + + def __init__(self, **options): + hslexer = IdrisLexer(**options) + LiterateLexer.__init__(self, hslexer, **options) + + +class LiterateAgdaLexer(LiterateLexer): + """ + For Literate Agda source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 2.0 + """ + name = 'Literate Agda' + aliases = ['lagda', 'literate-agda'] + filenames = ['*.lagda'] + mimetypes = ['text/x-literate-agda'] + + def __init__(self, **options): + agdalexer = AgdaLexer(**options) + LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options) + + +class LiterateCryptolLexer(LiterateLexer): + """ + For Literate Cryptol (Bird-style or LaTeX) source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 2.0 + """ + name = 'Literate Cryptol' + aliases = ['lcry', 'literate-cryptol', 'lcryptol'] + filenames = ['*.lcry'] + mimetypes = ['text/x-literate-cryptol'] + + def __init__(self, **options): + crylexer = CryptolLexer(**options) + LiterateLexer.__init__(self, crylexer, **options) + + +class KokaLexer(RegexLexer): + """ + Lexer for the `Koka `_ + language. + + .. versionadded:: 1.6 + """ + + name = 'Koka' + aliases = ['koka'] + filenames = ['*.kk', '*.kki'] + mimetypes = ['text/x-koka'] + + keywords = [ + 'infix', 'infixr', 'infixl', + 'type', 'cotype', 'rectype', 'alias', + 'struct', 'con', + 'fun', 'function', 'val', 'var', + 'external', + 'if', 'then', 'else', 'elif', 'return', 'match', + 'private', 'public', 'private', + 'module', 'import', 'as', + 'include', 'inline', + 'rec', + 'try', 'yield', 'enum', + 'interface', 'instance', + ] + + # keywords that are followed by a type + typeStartKeywords = [ + 'type', 'cotype', 'rectype', 'alias', 'struct', 'enum', + ] + + # keywords valid in a type + typekeywords = [ + 'forall', 'exists', 'some', 'with', + ] + + # builtin names and special names + builtin = [ + 'for', 'while', 'repeat', + 'foreach', 'foreach-indexed', + 'error', 'catch', 'finally', + 'cs', 'js', 'file', 'ref', 'assigned', + ] + + # symbols that can be in an operator + symbols = r'[$%&*+@!/\\^~=.:\-?|<>]+' + + # symbol boundary: an operator keyword should not be followed by any of these + sboundary = '(?!'+symbols+')' + + # name boundary: a keyword should not be followed by any of these + boundary = '(?![\w/])' + + # koka token abstractions + tokenType = Name.Attribute + tokenTypeDef = Name.Class + tokenConstructor = Generic.Emph + + # main lexer + tokens = { + 'root': [ + include('whitespace'), + + # go into type mode + (r'::?' + sboundary, tokenType, 'type'), + (r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), + 'alias-type'), + (r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), + 'struct-type'), + ((r'(%s)' % '|'.join(typeStartKeywords)) + + r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), + 'type'), + + # special sequences of tokens (we use ?: for non-capturing group as + # required by 'bygroups') + (r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)', + bygroups(Keyword, Text, Keyword, Name.Namespace)), + (r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)' + r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)' + r'((?:[a-z]\w*/)*[a-z]\w*))?', + bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text, + Keyword, Name.Namespace)), + + (r'(^(?:(?:public|private)\s*)?(?:function|fun|val))' + r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))', + bygroups(Keyword, Text, Name.Function)), + (r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?' + r'([a-z]\w*|\((?:' + symbols + r'|/)\))', + bygroups(Keyword, Text, Keyword, Name.Function)), + + # keywords + (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type), + (r'(%s)' % '|'.join(keywords) + boundary, Keyword), + (r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo), + (r'::?|:=|\->|[=.]' + sboundary, Keyword), + + # names + (r'((?:[a-z]\w*/)*)([A-Z]\w*)', + bygroups(Name.Namespace, tokenConstructor)), + (r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)), + (r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))', + bygroups(Name.Namespace, Name)), + (r'_\w*', Name.Variable), + + # literal string + (r'@"', String.Double, 'litstring'), + + # operators + (symbols + "|/(?![*/])", Operator), + (r'`', Operator), + (r'[{}()\[\];,]', Punctuation), + + # literals. No check for literal characters with len > 1 + (r'[0-9]+\.[0-9]+([eE][\-+]?[0-9]+)?', Number.Float), + (r'0[xX][0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + + (r"'", String.Char, 'char'), + (r'"', String.Double, 'string'), + ], + + # type started by alias + 'alias-type': [ + (r'=', Keyword), + include('type') + ], + + # type started by struct + 'struct-type': [ + (r'(?=\((?!,*\)))', Punctuation, '#pop'), + include('type') + ], + + # type started by colon + 'type': [ + (r'[(\[<]', tokenType, 'type-nested'), + include('type-content') + ], + + # type nested in brackets: can contain parameters, comma etc. + 'type-nested': [ + (r'[)\]>]', tokenType, '#pop'), + (r'[(\[<]', tokenType, 'type-nested'), + (r',', tokenType), + (r'([a-z]\w*)(\s*)(:)(?!:)', + bygroups(Name, Text, tokenType)), # parameter name + include('type-content') + ], + + # shared contents of a type + 'type-content': [ + include('whitespace'), + + # keywords + (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword), + (r'(?=((%s)' % '|'.join(keywords) + boundary + '))', + Keyword, '#pop'), # need to match because names overlap... + + # kinds + (r'[EPHVX]' + boundary, tokenType), + + # type names + (r'[a-z][0-9]*(?![\w/])', tokenType), + (r'_\w*', tokenType.Variable), # Generic.Emph + (r'((?:[a-z]\w*/)*)([A-Z]\w*)', + bygroups(Name.Namespace, tokenType)), + (r'((?:[a-z]\w*/)*)([a-z]\w+)', + bygroups(Name.Namespace, tokenType)), + + # type keyword operators + (r'::|->|[.:|]', tokenType), + + # catchall + default('#pop') + ], + + # comments and literals + 'whitespace': [ + (r'\n\s*#.*$', Comment.Preproc), + (r'\s+', Text), + (r'/\*', Comment.Multiline, 'comment'), + (r'//.*$', Comment.Single) + ], + 'comment': [ + (r'[^/*]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'litstring': [ + (r'[^"]+', String.Double), + (r'""', String.Escape), + (r'"', String.Double, '#pop'), + ], + 'string': [ + (r'[^\\"\n]+', String.Double), + include('escape-sequence'), + (r'["\n]', String.Double, '#pop'), + ], + 'char': [ + (r'[^\\\'\n]+', String.Char), + include('escape-sequence'), + (r'[\'\n]', String.Char, '#pop'), + ], + 'escape-sequence': [ + (r'\\[nrt\\"\']', String.Escape), + (r'\\x[0-9a-fA-F]{2}', String.Escape), + (r'\\u[0-9a-fA-F]{4}', String.Escape), + # Yes, \U literals are 6 hex digits. + (r'\\U[0-9a-fA-F]{6}', String.Escape) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/haxe.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/haxe.py new file mode 100644 index 0000000..69c0add --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/haxe.py @@ -0,0 +1,936 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.haxe + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Haxe and related stuff. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \ + default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Whitespace + +__all__ = ['HaxeLexer', 'HxmlLexer'] + + +class HaxeLexer(ExtendedRegexLexer): + """ + For Haxe source code (http://haxe.org/). + + .. versionadded:: 1.3 + """ + + name = 'Haxe' + aliases = ['hx', 'haxe', 'hxsl'] + filenames = ['*.hx', '*.hxsl'] + mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx'] + + # keywords extracted from lexer.mll in the haxe compiler source + keyword = (r'(?:function|class|static|var|if|else|while|do|for|' + r'break|return|continue|extends|implements|import|' + r'switch|case|default|public|private|try|untyped|' + r'catch|new|this|throw|extern|enum|in|interface|' + r'cast|override|dynamic|typedef|package|' + r'inline|using|null|true|false|abstract)\b') + + # idtype in lexer.mll + typeid = r'_*[A-Z]\w*' + + # combined ident and dollar and idtype + ident = r'(?:_*[a-z]\w*|_+[0-9]\w*|' + typeid + '|_+|\$\w+)' + + binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|' + r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|' + r'/|\-|=>|=)') + + # ident except keywords + ident_no_keyword = r'(?!' + keyword + ')' + ident + + flags = re.DOTALL | re.MULTILINE + + preproc_stack = [] + + def preproc_callback(self, match, ctx): + proc = match.group(2) + + if proc == 'if': + # store the current stack + self.preproc_stack.append(ctx.stack[:]) + elif proc in ['else', 'elseif']: + # restore the stack back to right before #if + if self.preproc_stack: + ctx.stack = self.preproc_stack[-1][:] + elif proc == 'end': + # remove the saved stack of previous #if + if self.preproc_stack: + self.preproc_stack.pop() + + # #if and #elseif should follow by an expr + if proc in ['if', 'elseif']: + ctx.stack.append('preproc-expr') + + # #error can be optionally follow by the error msg + if proc in ['error']: + ctx.stack.append('preproc-error') + + yield match.start(), Comment.Preproc, '#' + proc + ctx.pos = match.end() + + tokens = { + 'root': [ + include('spaces'), + include('meta'), + (r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')), + (r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')), + (r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')), + (r'(?:extern|private)\b', Keyword.Declaration), + (r'(?:abstract)\b', Keyword.Declaration, 'abstract'), + (r'(?:class|interface)\b', Keyword.Declaration, 'class'), + (r'(?:enum)\b', Keyword.Declaration, 'enum'), + (r'(?:typedef)\b', Keyword.Declaration, 'typedef'), + + # top-level expression + # although it is not supported in haxe, but it is common to write + # expression in web pages the positive lookahead here is to prevent + # an infinite loop at the EOF + (r'(?=.)', Text, 'expr-statement'), + ], + + # space/tab/comment/preproc + 'spaces': [ + (r'\s+', Text), + (r'//[^\n\r]*', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'(#)(if|elseif|else|end|error)\b', preproc_callback), + ], + + 'string-single-interpol': [ + (r'\$\{', String.Interpol, ('string-interpol-close', 'expr')), + (r'\$\$', String.Escape), + (r'\$(?=' + ident + ')', String.Interpol, 'ident'), + include('string-single'), + ], + + 'string-single': [ + (r"'", String.Single, '#pop'), + (r'\\.', String.Escape), + (r'.', String.Single), + ], + + 'string-double': [ + (r'"', String.Double, '#pop'), + (r'\\.', String.Escape), + (r'.', String.Double), + ], + + 'string-interpol-close': [ + (r'\$'+ident, String.Interpol), + (r'\}', String.Interpol, '#pop'), + ], + + 'package': [ + include('spaces'), + (ident, Name.Namespace), + (r'\.', Punctuation, 'import-ident'), + default('#pop'), + ], + + 'import': [ + include('spaces'), + (ident, Name.Namespace), + (r'\*', Keyword), # wildcard import + (r'\.', Punctuation, 'import-ident'), + (r'in', Keyword.Namespace, 'ident'), + default('#pop'), + ], + + 'import-ident': [ + include('spaces'), + (r'\*', Keyword, '#pop'), # wildcard import + (ident, Name.Namespace, '#pop'), + ], + + 'using': [ + include('spaces'), + (ident, Name.Namespace), + (r'\.', Punctuation, 'import-ident'), + default('#pop'), + ], + + 'preproc-error': [ + (r'\s+', Comment.Preproc), + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + default('#pop'), + ], + + 'preproc-expr': [ + (r'\s+', Comment.Preproc), + (r'\!', Comment.Preproc), + (r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')), + + (ident, Comment.Preproc, '#pop'), + + # Float + (r'\.[0-9]+', Number.Float), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float), + (r'[0-9]+\.[0-9]+', Number.Float), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + + # String + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + ], + + 'preproc-parenthesis': [ + (r'\s+', Comment.Preproc), + (r'\)', Comment.Preproc, '#pop'), + default('preproc-expr-in-parenthesis'), + ], + + 'preproc-expr-chain': [ + (r'\s+', Comment.Preproc), + (binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')), + default('#pop'), + ], + + # same as 'preproc-expr' but able to chain 'preproc-expr-chain' + 'preproc-expr-in-parenthesis': [ + (r'\s+', Comment.Preproc), + (r'\!', Comment.Preproc), + (r'\(', Comment.Preproc, + ('#pop', 'preproc-expr-chain', 'preproc-parenthesis')), + + (ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')), + + # Float + (r'\.[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'preproc-expr-chain')), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+', Number.Integer, ('#pop', 'preproc-expr-chain')), + + # String + (r"'", String.Single, + ('#pop', 'preproc-expr-chain', 'string-single')), + (r'"', String.Double, + ('#pop', 'preproc-expr-chain', 'string-double')), + ], + + 'abstract': [ + include('spaces'), + default(('#pop', 'abstract-body', 'abstract-relation', + 'abstract-opaque', 'type-param-constraint', 'type-name')), + ], + + 'abstract-body': [ + include('spaces'), + (r'\{', Punctuation, ('#pop', 'class-body')), + ], + + 'abstract-opaque': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')), + default('#pop'), + ], + + 'abstract-relation': [ + include('spaces'), + (r'(?:to|from)', Keyword.Declaration, 'type'), + (r',', Punctuation), + default('#pop'), + ], + + 'meta': [ + include('spaces'), + (r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')), + ], + + # optional colon + 'meta-colon': [ + include('spaces'), + (r':', Name.Decorator, '#pop'), + default('#pop'), + ], + + # same as 'ident' but set token as Name.Decorator instead of Name + 'meta-ident': [ + include('spaces'), + (ident, Name.Decorator, '#pop'), + ], + + 'meta-body': [ + include('spaces'), + (r'\(', Name.Decorator, ('#pop', 'meta-call')), + default('#pop'), + ], + + 'meta-call': [ + include('spaces'), + (r'\)', Name.Decorator, '#pop'), + default(('#pop', 'meta-call-sep', 'expr')), + ], + + 'meta-call-sep': [ + include('spaces'), + (r'\)', Name.Decorator, '#pop'), + (r',', Punctuation, ('#pop', 'meta-call')), + ], + + 'typedef': [ + include('spaces'), + default(('#pop', 'typedef-body', 'type-param-constraint', + 'type-name')), + ], + + 'typedef-body': [ + include('spaces'), + (r'=', Operator, ('#pop', 'optional-semicolon', 'type')), + ], + + 'enum': [ + include('spaces'), + default(('#pop', 'enum-body', 'bracket-open', + 'type-param-constraint', 'type-name')), + ], + + 'enum-body': [ + include('spaces'), + include('meta'), + (r'\}', Punctuation, '#pop'), + (ident_no_keyword, Name, ('enum-member', 'type-param-constraint')), + ], + + 'enum-member': [ + include('spaces'), + (r'\(', Punctuation, + ('#pop', 'semicolon', 'flag', 'function-param')), + default(('#pop', 'semicolon', 'flag')), + ], + + 'class': [ + include('spaces'), + default(('#pop', 'class-body', 'bracket-open', 'extends', + 'type-param-constraint', 'type-name')), + ], + + 'extends': [ + include('spaces'), + (r'(?:extends|implements)\b', Keyword.Declaration, 'type'), + (r',', Punctuation), # the comma is made optional here, since haxe2 + # requires the comma but haxe3 does not allow it + default('#pop'), + ], + + 'bracket-open': [ + include('spaces'), + (r'\{', Punctuation, '#pop'), + ], + + 'bracket-close': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + ], + + 'class-body': [ + include('spaces'), + include('meta'), + (r'\}', Punctuation, '#pop'), + (r'(?:static|public|private|override|dynamic|inline|macro)\b', + Keyword.Declaration), + default('class-member'), + ], + + 'class-member': [ + include('spaces'), + (r'(var)\b', Keyword.Declaration, + ('#pop', 'optional-semicolon', 'var')), + (r'(function)\b', Keyword.Declaration, + ('#pop', 'optional-semicolon', 'class-method')), + ], + + # local function, anonymous or not + 'function-local': [ + include('spaces'), + (ident_no_keyword, Name.Function, + ('#pop', 'optional-expr', 'flag', 'function-param', + 'parenthesis-open', 'type-param-constraint')), + default(('#pop', 'optional-expr', 'flag', 'function-param', + 'parenthesis-open', 'type-param-constraint')), + ], + + 'optional-expr': [ + include('spaces'), + include('expr'), + default('#pop'), + ], + + 'class-method': [ + include('spaces'), + (ident, Name.Function, ('#pop', 'optional-expr', 'flag', + 'function-param', 'parenthesis-open', + 'type-param-constraint')), + ], + + # function arguments + 'function-param': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r'\?', Punctuation), + (ident_no_keyword, Name, + ('#pop', 'function-param-sep', 'assign', 'flag')), + ], + + 'function-param-sep': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'function-param')), + ], + + 'prop-get-set': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'parenthesis-close', + 'prop-get-set-opt', 'comma', 'prop-get-set-opt')), + default('#pop'), + ], + + 'prop-get-set-opt': [ + include('spaces'), + (r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'), + (ident_no_keyword, Text, '#pop'), # custom getter/setter + ], + + 'expr-statement': [ + include('spaces'), + # makes semicolon optional here, just to avoid checking the last + # one is bracket or not. + default(('#pop', 'optional-semicolon', 'expr')), + ], + + 'expr': [ + include('spaces'), + (r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body', + 'meta-ident', 'meta-colon')), + (r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator), + (r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')), + (r'(?:static|public|private|override|dynamic|inline)\b', + Keyword.Declaration), + (r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain', + 'function-local')), + (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')), + (r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')), + (r'(?:this)\b', Keyword, ('#pop', 'expr-chain')), + (r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')), + (r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')), + (r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')), + (r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')), + (r'(?:switch)\b', Keyword, ('#pop', 'switch')), + (r'(?:if)\b', Keyword, ('#pop', 'if')), + (r'(?:do)\b', Keyword, ('#pop', 'do')), + (r'(?:while)\b', Keyword, ('#pop', 'while')), + (r'(?:for)\b', Keyword, ('#pop', 'for')), + (r'(?:untyped|throw)\b', Keyword), + (r'(?:return)\b', Keyword, ('#pop', 'optional-expr')), + (r'(?:macro)\b', Keyword, ('#pop', 'macro')), + (r'(?:continue|break)\b', Keyword, '#pop'), + (r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')), + (ident_no_keyword, Name, ('#pop', 'expr-chain')), + + # Float + (r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')), + (r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')), + + # String + (r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')), + (r'"', String.Double, ('#pop', 'expr-chain', 'string-double')), + + # EReg + (r'~/(\\\\|\\/|[^/\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')), + + # Array + (r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')), + ], + + 'expr-chain': [ + include('spaces'), + (r'(?:\+\+|\-\-)', Operator), + (binop, Operator, ('#pop', 'expr')), + (r'(?:in)\b', Keyword, ('#pop', 'expr')), + (r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')), + (r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)), + (r'\[', Punctuation, 'array-access'), + (r'\(', Punctuation, 'call'), + default('#pop'), + ], + + # macro reification + 'macro': [ + include('spaces'), + include('meta'), + (r':', Punctuation, ('#pop', 'type')), + + (r'(?:extern|private)\b', Keyword.Declaration), + (r'(?:abstract)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'abstract')), + (r'(?:class|interface)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'macro-class')), + (r'(?:enum)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'enum')), + (r'(?:typedef)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'typedef')), + + default(('#pop', 'expr')), + ], + + 'macro-class': [ + (r'\{', Punctuation, ('#pop', 'class-body')), + include('class') + ], + + # cast can be written as "cast expr" or "cast(expr, type)" + 'cast': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'parenthesis-close', + 'cast-type', 'expr')), + default(('#pop', 'expr')), + ], + + # optionally give a type as the 2nd argument of cast() + 'cast-type': [ + include('spaces'), + (r',', Punctuation, ('#pop', 'type')), + default('#pop'), + ], + + 'catch': [ + include('spaces'), + (r'(?:catch)\b', Keyword, ('expr', 'function-param', + 'parenthesis-open')), + default('#pop'), + ], + + # do-while loop + 'do': [ + include('spaces'), + default(('#pop', 'do-while', 'expr')), + ], + + # the while after do + 'do-while': [ + include('spaces'), + (r'(?:while)\b', Keyword, ('#pop', 'parenthesis', + 'parenthesis-open')), + ], + + 'while': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), + ], + + 'for': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), + ], + + 'if': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr', + 'parenthesis')), + ], + + 'else': [ + include('spaces'), + (r'(?:else)\b', Keyword, ('#pop', 'expr')), + default('#pop'), + ], + + 'switch': [ + include('spaces'), + default(('#pop', 'switch-body', 'bracket-open', 'expr')), + ], + + 'switch-body': [ + include('spaces'), + (r'(?:case|default)\b', Keyword, ('case-block', 'case')), + (r'\}', Punctuation, '#pop'), + ], + + 'case': [ + include('spaces'), + (r':', Punctuation, '#pop'), + default(('#pop', 'case-sep', 'case-guard', 'expr')), + ], + + 'case-sep': [ + include('spaces'), + (r':', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'case')), + ], + + 'case-guard': [ + include('spaces'), + (r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')), + default('#pop'), + ], + + # optional multiple expr under a case + 'case-block': [ + include('spaces'), + (r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'), + default('#pop'), + ], + + 'new': [ + include('spaces'), + default(('#pop', 'call', 'parenthesis-open', 'type')), + ], + + 'array-decl': [ + include('spaces'), + (r'\]', Punctuation, '#pop'), + default(('#pop', 'array-decl-sep', 'expr')), + ], + + 'array-decl-sep': [ + include('spaces'), + (r'\]', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'array-decl')), + ], + + 'array-access': [ + include('spaces'), + default(('#pop', 'array-access-close', 'expr')), + ], + + 'array-access-close': [ + include('spaces'), + (r'\]', Punctuation, '#pop'), + ], + + 'comma': [ + include('spaces'), + (r',', Punctuation, '#pop'), + ], + + 'colon': [ + include('spaces'), + (r':', Punctuation, '#pop'), + ], + + 'semicolon': [ + include('spaces'), + (r';', Punctuation, '#pop'), + ], + + 'optional-semicolon': [ + include('spaces'), + (r';', Punctuation, '#pop'), + default('#pop'), + ], + + # identity that CAN be a Haxe keyword + 'ident': [ + include('spaces'), + (ident, Name, '#pop'), + ], + + 'dollar': [ + include('spaces'), + (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket-close', 'expr')), + default(('#pop', 'expr-chain')), + ], + + 'type-name': [ + include('spaces'), + (typeid, Name, '#pop'), + ], + + 'type-full-name': [ + include('spaces'), + (r'\.', Punctuation, 'ident'), + default('#pop'), + ], + + 'type': [ + include('spaces'), + (r'\?', Punctuation), + (ident, Name, ('#pop', 'type-check', 'type-full-name')), + (r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')), + (r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')), + ], + + 'type-parenthesis': [ + include('spaces'), + default(('#pop', 'parenthesis-close', 'type')), + ], + + 'type-check': [ + include('spaces'), + (r'->', Punctuation, ('#pop', 'type')), + (r'<(?!=)', Punctuation, 'type-param'), + default('#pop'), + ], + + 'type-struct': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + (r'\?', Punctuation), + (r'>', Punctuation, ('comma', 'type')), + (ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')), + include('class-body'), + ], + + 'type-struct-sep': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'type-struct')), + ], + + # type-param can be a normal type or a constant literal... + 'type-param-type': [ + # Float + (r'\.[0-9]+', Number.Float, '#pop'), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, '#pop'), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, '#pop'), + (r'[0-9]+\.[0-9]+', Number.Float, '#pop'), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex, '#pop'), + (r'[0-9]+', Number.Integer, '#pop'), + + # String + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + + # EReg + (r'~/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex, '#pop'), + + # Array + (r'\[', Operator, ('#pop', 'array-decl')), + + include('type'), + ], + + # type-param part of a type + # ie. the path in Map + 'type-param': [ + include('spaces'), + default(('#pop', 'type-param-sep', 'type-param-type')), + ], + + 'type-param-sep': [ + include('spaces'), + (r'>', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'type-param')), + ], + + # optional type-param that may include constraint + # ie. + 'type-param-constraint': [ + include('spaces'), + (r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep', + 'type-param-constraint-flag', 'type-name')), + default('#pop'), + ], + + 'type-param-constraint-sep': [ + include('spaces'), + (r'>', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'type-param-constraint-sep', + 'type-param-constraint-flag', 'type-name')), + ], + + # the optional constraint inside type-param + 'type-param-constraint-flag': [ + include('spaces'), + (r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')), + default('#pop'), + ], + + 'type-param-constraint-flag-type': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep', + 'type')), + default(('#pop', 'type')), + ], + + 'type-param-constraint-flag-type-sep': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r',', Punctuation, 'type'), + ], + + # a parenthesis expr that contain exactly one expr + 'parenthesis': [ + include('spaces'), + default(('#pop', 'parenthesis-close', 'flag', 'expr')), + ], + + 'parenthesis-open': [ + include('spaces'), + (r'\(', Punctuation, '#pop'), + ], + + 'parenthesis-close': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + ], + + 'var': [ + include('spaces'), + (ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag', 'prop-get-set')), + ], + + # optional more var decl. + 'var-sep': [ + include('spaces'), + (r',', Punctuation, ('#pop', 'var')), + default('#pop'), + ], + + # optional assignment + 'assign': [ + include('spaces'), + (r'=', Operator, ('#pop', 'expr')), + default('#pop'), + ], + + # optional type flag + 'flag': [ + include('spaces'), + (r':', Punctuation, ('#pop', 'type')), + default('#pop'), + ], + + # colon as part of a ternary operator (?:) + 'ternary': [ + include('spaces'), + (r':', Operator, '#pop'), + ], + + # function call + 'call': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + default(('#pop', 'call-sep', 'expr')), + ], + + # after a call param + 'call-sep': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'call')), + ], + + # bracket can be block or object + 'bracket': [ + include('spaces'), + (r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name, + ('#pop', 'bracket-check')), + (r"'", String.Single, ('#pop', 'bracket-check', 'string-single')), + (r'"', String.Double, ('#pop', 'bracket-check', 'string-double')), + default(('#pop', 'block')), + ], + + 'bracket-check': [ + include('spaces'), + (r':', Punctuation, ('#pop', 'object-sep', 'expr')), # is object + default(('#pop', 'block', 'optional-semicolon', 'expr-chain')), # is block + ], + + # code block + 'block': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + default('expr-statement'), + ], + + # object in key-value pairs + 'object': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + default(('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string')) + ], + + # a key of an object + 'ident-or-string': [ + include('spaces'), + (ident_no_keyword, Name, '#pop'), + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + ], + + # after a key-value pair in object + 'object-sep': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'object')), + ], + + + + } + + def analyse_text(text): + if re.match(r'\w+\s*:\s*\w', text): + return 0.3 + + +class HxmlLexer(RegexLexer): + """ + Lexer for `haXe build `_ files. + + .. versionadded:: 1.6 + """ + name = 'Hxml' + aliases = ['haxeml', 'hxml'] + filenames = ['*.hxml'] + + tokens = { + 'root': [ + # Seperator + (r'(--)(next)', bygroups(Punctuation, Generic.Heading)), + # Compiler switches with one dash + (r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)), + # Compilerswitches with two dashes + (r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|' + r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)), + # Targets and other options that take an argument + (r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|' + r'cp|cmd)( +)(.+)', + bygroups(Punctuation, Keyword, Whitespace, String)), + # Options that take only numerical arguments + (r'(-)(swf-version)( +)(\d+)', + bygroups(Punctuation, Keyword, Number.Integer)), + # An Option that defines the size, the fps and the background + # color of an flash movie + (r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})', + bygroups(Punctuation, Keyword, Whitespace, Number.Integer, + Punctuation, Number.Integer, Punctuation, Number.Integer, + Punctuation, Number.Hex)), + # options with two dashes that takes arguments + (r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)' + r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)), + # Single line comment, multiline ones are not allowed. + (r'#.*', Comment.Single) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/hdl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/hdl.py new file mode 100644 index 0000000..c8e3471 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/hdl.py @@ -0,0 +1,375 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.hdl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for hardware descriptor languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, include, using, this, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer'] + + +class VerilogLexer(RegexLexer): + """ + For verilog source code with preprocessor directives. + + .. versionadded:: 1.4 + """ + name = 'verilog' + aliases = ['verilog', 'v'] + filenames = ['*.v'] + mimetypes = ['text/x-verilog'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' + + tokens = { + 'root': [ + (r'^\s*`define', Comment.Preproc, 'macro'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'[{}#@]', Punctuation), + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), + (r'([0-9]+)|(\'b)[01]+', Number.Bin), + (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), + (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), + (r'\'[01xz]', Number), + (r'\d+[Ll]?', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.;\']', Punctuation), + (r'`[a-zA-Z_]\w*', Name.Constant), + + (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), + (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), + 'import'), + + (words(( + 'always', 'always_comb', 'always_ff', 'always_latch', 'and', + 'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1', + 'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign', + 'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase', + 'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive', + 'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for', + 'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0', + 'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large', + 'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge', + 'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed', + 'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1', + 'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return', + 'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed', + 'small', 'specify', 'specparam', 'strength', 'string', 'strong0', + 'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1', + 'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait', + 'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'), + Keyword), + + (words(( + 'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype', + 'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected', + 'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate', + 'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames', + 'nounconnected_drive', 'protect', 'protected', 'remove_gatenames', + 'remove_netnames', 'resetall', 'timescale', 'unconnected_drive', + 'undef'), prefix=r'`', suffix=r'\b'), + Comment.Preproc), + + (words(( + 'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose', + 'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite', + 'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log', + 'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale', + 'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset', + 'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope', + 'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb', + 'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'), + prefix=r'\$', suffix=r'\b'), + Name.Builtin), + + (words(( + 'byte', 'shortint', 'int', 'longint', 'integer', 'time', + 'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand', + 'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo' + 'shortreal', 'real', 'realtime'), suffix=r'\b'), + Keyword.Type), + ('[a-zA-Z_]\w*:(?!:)', Name.Label), + ('[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'import': [ + (r'[\w:]+\*?', Name.Namespace, '#pop') + ] + } + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + # Convention: mark all upper case names as constants + if token is Name: + if value.isupper(): + token = Name.Constant + yield index, token, value + + +class SystemVerilogLexer(RegexLexer): + """ + Extends verilog lexer to recognise all SystemVerilog keywords from IEEE + 1800-2009 standard. + + .. versionadded:: 1.5 + """ + name = 'systemverilog' + aliases = ['systemverilog', 'sv'] + filenames = ['*.sv', '*.svh'] + mimetypes = ['text/x-systemverilog'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' + + tokens = { + 'root': [ + (r'^\s*`define', Comment.Preproc, 'macro'), + (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), + (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'), + + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'[{}#@]', Punctuation), + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), + (r'([0-9]+)|(\'b)[01]+', Number.Bin), + (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), + (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), + (r'\'[01xz]', Number), + (r'\d+[Ll]?', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.;\']', Punctuation), + (r'`[a-zA-Z_]\w*', Name.Constant), + + (words(( + 'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch', + 'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins', + 'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez', + 'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', 'const', 'constraint', + 'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign', + 'default', 'defparam', 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase', + 'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate', + 'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive', + 'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable', + 'endtask', 'enum', 'event', 'eventually', 'expect', 'export', 'extends', 'extern', + 'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', + 'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone', + 'ignore_bins', 'illegal_bins', 'implies', 'import', 'incdir', 'include', + 'initial', 'inout', 'input', 'inside', 'instance', 'int', 'integer', 'interface', + 'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library', + 'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', 'medium', + 'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled', + 'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter', + 'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected', + 'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent', + 'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', 'realtime', + 'ref', 'reg', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos', + 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime', + 's_until', 's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal', + 'showcancelled', 'signed', 'small', 'solve', 'specify', 'specparam', 'static', + 'string', 'strong', 'strong0', 'strong1', 'struct', 'super', 'supply0', 'supply1', + 'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout', + 'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', 'tri0', + 'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', 'unique', 'unique0', + 'unsigned', 'until', 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored', + 'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while', + 'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'), suffix=r'\b'), + Keyword), + + (words(( + '`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype', + '`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif', + '`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma', + '`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'), + suffix=r'\b'), + Comment.Preproc), + + (words(( + '$display', '$displayb', '$displayh', '$displayo', '$dumpall', '$dumpfile', + '$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports', + '$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff', + '$dumpportson', '$dumpvars', '$fclose', '$fdisplay', '$fdisplayb', + '$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', + '$fgets', '$finish', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', + '$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh', + '$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo', + '$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff', + '$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', '$rewind', + '$sformat', '$sformatf', '$sscanf', '$strobe', '$strobeb', '$strobeh', '$strobeo', + '$swrite', '$swriteb', '$swriteh', '$swriteo', '$test', '$ungetc', + '$value$plusargs', '$write', '$writeb', '$writeh', '$writememb', + '$writememh', '$writeo'), suffix=r'\b'), + Name.Builtin), + + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + (words(( + 'byte', 'shortint', 'int', 'longint', 'integer', 'time', + 'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand', + 'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo' + 'shortreal', 'real', 'realtime'), suffix=r'\b'), + Keyword.Type), + ('[a-zA-Z_]\w*:(?!:)', Name.Label), + ('[a-zA-Z_]\w*', Name), + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'import': [ + (r'[\w:]+\*?', Name.Namespace, '#pop') + ] + } + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + # Convention: mark all upper case names as constants + if token is Name: + if value.isupper(): + token = Name.Constant + yield index, token, value + + +class VhdlLexer(RegexLexer): + """ + For VHDL source code. + + .. versionadded:: 1.5 + """ + name = 'vhdl' + aliases = ['vhdl'] + filenames = ['*.vhdl', '*.vhd'] + mimetypes = ['text/x-vhdl'] + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'--.*?$', Comment.Single), + (r"'(U|X|0|1|Z|W|L|H|-)'", String.Char), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r"'[a-z_]\w*", Name.Attribute), + (r'[()\[\],.;\']', Punctuation), + (r'"[^\n\\]*"', String), + + (r'(library)(\s+)([a-z_]\w*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)), + (r'(use)(\s+)([a-z_][\w.]*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(entity|component)(\s+)([a-z_]\w*)', + bygroups(Keyword, Text, Name.Class)), + (r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)' + r'(of)(\s+)([a-z_]\w*)(\s+)(is)', + bygroups(Keyword, Text, Name.Class, Text, Keyword, Text, + Name.Class, Text, Keyword)), + + (r'(end)(\s+)', bygroups(using(this), Text), 'endblock'), + + include('types'), + include('keywords'), + include('numbers'), + + (r'[a-z_]\w*', Name), + ], + 'endblock': [ + include('keywords'), + (r'[a-z_]\w*', Name.Class), + (r'(\s+)', Text), + (r';', Punctuation, '#pop'), + ], + 'types': [ + (words(( + 'boolean', 'bit', 'character', 'severity_level', 'integer', 'time', + 'delay_length', 'natural', 'positive', 'string', 'bit_vector', + 'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector', + 'std_logic', 'std_logic_vector'), suffix=r'\b'), + Keyword.Type), + ], + 'keywords': [ + (words(( + 'abs', 'access', 'after', 'alias', 'all', 'and', + 'architecture', 'array', 'assert', 'attribute', 'begin', 'block', + 'body', 'buffer', 'bus', 'case', 'component', 'configuration', + 'constant', 'disconnect', 'downto', 'else', 'elsif', 'end', + 'entity', 'exit', 'file', 'for', 'function', 'generate', + 'generic', 'group', 'guarded', 'if', 'impure', 'in', + 'inertial', 'inout', 'is', 'label', 'library', 'linkage', + 'literal', 'loop', 'map', 'mod', 'nand', 'new', + 'next', 'nor', 'not', 'null', 'of', 'on', + 'open', 'or', 'others', 'out', 'package', 'port', + 'postponed', 'procedure', 'process', 'pure', 'range', 'record', + 'register', 'reject', 'return', 'rol', 'ror', 'select', + 'severity', 'signal', 'shared', 'sla', 'sli', 'sra', + 'srl', 'subtype', 'then', 'to', 'transport', 'type', + 'units', 'until', 'use', 'variable', 'wait', 'when', + 'while', 'with', 'xnor', 'xor'), suffix=r'\b'), + Keyword), + ], + 'numbers': [ + (r'\d{1,2}#[0-9a-f_]+#?', Number.Integer), + (r'\d+', Number.Integer), + (r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float), + (r'X"[0-9a-f_]+"', Number.Hex), + (r'O"[0-7_]+"', Number.Oct), + (r'B"[01_]+"', Number.Bin), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/html.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/html.py new file mode 100644 index 0000000..8cf331d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/html.py @@ -0,0 +1,589 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.html + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for HTML, XML and related markup. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ + default, using +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Punctuation +from pygments.util import looks_like_xml, html_doctype_matches + +from pygments.lexers.javascript import JavascriptLexer +from pygments.lexers.jvm import ScalaLexer +from pygments.lexers.css import CssLexer, _indentation, _starts_block +from pygments.lexers.ruby import RubyLexer + +__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer', + 'ScamlLexer', 'JadeLexer'] + + +class HtmlLexer(RegexLexer): + """ + For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted + by the appropriate lexer. + """ + + name = 'HTML' + aliases = ['html'] + filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt'] + mimetypes = ['text/html', 'application/xhtml+xml'] + + flags = re.IGNORECASE | re.DOTALL + tokens = { + 'root': [ + ('[^<&]+', Text), + (r'&\S*?;', Name.Entity), + (r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc), + ('', Comment, '#pop'), + ('-', Comment), + ], + 'tag': [ + (r'\s+', Text), + (r'([\w:-]+\s*=)(\s*)', bygroups(Name.Attribute, Text), 'attr'), + (r'[\w:-]+', Name.Attribute), + (r'/?\s*>', Name.Tag, '#pop'), + ], + 'script-content': [ + (r'<\s*/\s*script\s*>', Name.Tag, '#pop'), + (r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)), + ], + 'style-content': [ + (r'<\s*/\s*style\s*>', Name.Tag, '#pop'), + (r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)), + ], + 'attr': [ + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } + + def analyse_text(text): + if html_doctype_matches(text): + return 0.5 + + +class DtdLexer(RegexLexer): + """ + A lexer for DTDs (Document Type Definitions). + + .. versionadded:: 1.5 + """ + + flags = re.MULTILINE | re.DOTALL + + name = 'DTD' + aliases = ['dtd'] + filenames = ['*.dtd'] + mimetypes = ['application/xml-dtd'] + + tokens = { + 'root': [ + include('common'), + + (r'(\s]+)', + bygroups(Keyword, Text, Name.Tag)), + (r'PUBLIC|SYSTEM', Keyword.Constant), + (r'[\[\]>]', Keyword), + ], + + 'common': [ + (r'\s+', Text), + (r'(%|&)[^;]*;', Name.Entity), + ('', Comment, '#pop'), + ('-', Comment), + ], + + 'element': [ + include('common'), + (r'EMPTY|ANY|#PCDATA', Keyword.Constant), + (r'[^>\s|()?+*,]+', Name.Tag), + (r'>', Keyword, '#pop'), + ], + + 'attlist': [ + include('common'), + (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', + Keyword.Constant), + (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant), + (r'xml:space|xml:lang', Keyword.Reserved), + (r'[^>\s|()?+*,]+', Name.Attribute), + (r'>', Keyword, '#pop'), + ], + + 'entity': [ + include('common'), + (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant), + (r'[^>\s|()?+*,]+', Name.Entity), + (r'>', Keyword, '#pop'), + ], + + 'notation': [ + include('common'), + (r'SYSTEM|PUBLIC', Keyword.Constant), + (r'[^>\s|()?+*,]+', Name.Attribute), + (r'>', Keyword, '#pop'), + ], + } + + def analyse_text(text): + if not looks_like_xml(text) and \ + ('', Comment.Preproc), + ('', Comment, '#pop'), + ('-', Comment), + ], + 'tag': [ + (r'\s+', Text), + (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), + (r'/?\s*>', Name.Tag, '#pop'), + ], + 'attr': [ + ('\s+', Text), + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } + + def analyse_text(text): + if looks_like_xml(text): + return 0.45 # less than HTML + + +class XsltLexer(XmlLexer): + """ + A lexer for XSLT. + + .. versionadded:: 0.10 + """ + + name = 'XSLT' + aliases = ['xslt'] + filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc + mimetypes = ['application/xsl+xml', 'application/xslt+xml'] + + EXTRA_KEYWORDS = set(( + 'apply-imports', 'apply-templates', 'attribute', + 'attribute-set', 'call-template', 'choose', 'comment', + 'copy', 'copy-of', 'decimal-format', 'element', 'fallback', + 'for-each', 'if', 'import', 'include', 'key', 'message', + 'namespace-alias', 'number', 'otherwise', 'output', 'param', + 'preserve-space', 'processing-instruction', 'sort', + 'strip-space', 'stylesheet', 'template', 'text', 'transform', + 'value-of', 'variable', 'when', 'with-param' + )) + + def get_tokens_unprocessed(self, text): + for index, token, value in XmlLexer.get_tokens_unprocessed(self, text): + m = re.match(']*)/?>?', value) + + if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS: + yield index, Keyword, value + else: + yield index, token, value + + def analyse_text(text): + if looks_like_xml(text) and ']{1,2}(?=[ \t=])', Punctuation), + include('eval-or-plain'), + ], + + 'plain': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(RubyLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + + 'html-attributes': [ + (r'\s+', Text), + (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), + (r'[\w:-]+', Name.Attribute), + (r'\)', Text, '#pop'), + ], + + 'html-attribute-value': [ + (r'[ \t]+', Text), + (r'\w+', Name.Variable, '#pop'), + (r'@\w+', Name.Variable.Instance, '#pop'), + (r'\$\w+', Name.Variable.Global, '#pop'), + (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), + (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), + ], + + 'html-comment-block': [ + (_dot + '+', Comment), + (r'\n', Text, 'root'), + ], + + 'haml-comment-block': [ + (_dot + '+', Comment.Preproc), + (r'\n', Text, 'root'), + ], + + 'filter-block': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(RubyLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + } + + +class ScamlLexer(ExtendedRegexLexer): + """ + For `Scaml markup `_. Scaml is Haml for Scala. + + .. versionadded:: 1.4 + """ + + name = 'Scaml' + aliases = ['scaml'] + filenames = ['*.scaml'] + mimetypes = ['text/x-scaml'] + + flags = re.IGNORECASE + # Scaml does not yet support the " |\n" notation to + # wrap long lines. Once it does, use the custom faux + # dot instead. + # _dot = r'(?: \|\n(?=.* \|)|.)' + _dot = r'.' + + tokens = { + 'root': [ + (r'[ \t]*\n', Text), + (r'[ \t]*', _indentation), + ], + + 'css': [ + (r'\.[\w:-]+', Name.Class, 'tag'), + (r'\#[\w:-]+', Name.Function, 'tag'), + ], + + 'eval-or-plain': [ + (r'[&!]?==', Punctuation, 'plain'), + (r'([&!]?[=~])(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), + 'root'), + default('plain'), + ], + + 'content': [ + include('css'), + (r'%[\w:-]+', Name.Tag, 'tag'), + (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), + (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', + bygroups(Comment, Comment.Special, Comment), + '#pop'), + (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), + '#pop'), + (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, + 'scaml-comment-block'), '#pop'), + (r'(-@\s*)(import)?(' + _dot + r'*\n)', + bygroups(Punctuation, Keyword, using(ScalaLexer)), + '#pop'), + (r'(-)(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), + '#pop'), + (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), + '#pop'), + include('eval-or-plain'), + ], + + 'tag': [ + include('css'), + (r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)), + (r'\[' + _dot + '*?\]', using(ScalaLexer)), + (r'\(', Text, 'html-attributes'), + (r'/[ \t]*\n', Punctuation, '#pop:2'), + (r'[<>]{1,2}(?=[ \t=])', Punctuation), + include('eval-or-plain'), + ], + + 'plain': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + + 'html-attributes': [ + (r'\s+', Text), + (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), + (r'[\w:-]+', Name.Attribute), + (r'\)', Text, '#pop'), + ], + + 'html-attribute-value': [ + (r'[ \t]+', Text), + (r'\w+', Name.Variable, '#pop'), + (r'@\w+', Name.Variable.Instance, '#pop'), + (r'\$\w+', Name.Variable.Global, '#pop'), + (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), + (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), + ], + + 'html-comment-block': [ + (_dot + '+', Comment), + (r'\n', Text, 'root'), + ], + + 'scaml-comment-block': [ + (_dot + '+', Comment.Preproc), + (r'\n', Text, 'root'), + ], + + 'filter-block': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + } + + +class JadeLexer(ExtendedRegexLexer): + """ + For Jade markup. + Jade is a variant of Scaml, see: + http://scalate.fusesource.org/documentation/scaml-reference.html + + .. versionadded:: 1.4 + """ + + name = 'Jade' + aliases = ['jade'] + filenames = ['*.jade'] + mimetypes = ['text/x-jade'] + + flags = re.IGNORECASE + _dot = r'.' + + tokens = { + 'root': [ + (r'[ \t]*\n', Text), + (r'[ \t]*', _indentation), + ], + + 'css': [ + (r'\.[\w:-]+', Name.Class, 'tag'), + (r'\#[\w:-]+', Name.Function, 'tag'), + ], + + 'eval-or-plain': [ + (r'[&!]?==', Punctuation, 'plain'), + (r'([&!]?[=~])(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), 'root'), + default('plain'), + ], + + 'content': [ + include('css'), + (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), + (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', + bygroups(Comment, Comment.Special, Comment), + '#pop'), + (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), + '#pop'), + (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, + 'scaml-comment-block'), '#pop'), + (r'(-@\s*)(import)?(' + _dot + r'*\n)', + bygroups(Punctuation, Keyword, using(ScalaLexer)), + '#pop'), + (r'(-)(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), + '#pop'), + (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), + '#pop'), + (r'[\w:-]+', Name.Tag, 'tag'), + (r'\|', Text, 'eval-or-plain'), + ], + + 'tag': [ + include('css'), + (r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)), + (r'\[' + _dot + '*?\]', using(ScalaLexer)), + (r'\(', Text, 'html-attributes'), + (r'/[ \t]*\n', Punctuation, '#pop:2'), + (r'[<>]{1,2}(?=[ \t=])', Punctuation), + include('eval-or-plain'), + ], + + 'plain': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + + 'html-attributes': [ + (r'\s+', Text), + (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), + (r'[\w:-]+', Name.Attribute), + (r'\)', Text, '#pop'), + ], + + 'html-attribute-value': [ + (r'[ \t]+', Text), + (r'\w+', Name.Variable, '#pop'), + (r'@\w+', Name.Variable.Instance, '#pop'), + (r'\$\w+', Name.Variable.Global, '#pop'), + (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), + (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), + ], + + 'html-comment-block': [ + (_dot + '+', Comment), + (r'\n', Text, 'root'), + ], + + 'scaml-comment-block': [ + (_dot + '+', Comment.Preproc), + (r'\n', Text, 'root'), + ], + + 'filter-block': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/idl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/idl.py new file mode 100644 index 0000000..7941028 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/idl.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.idl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for IDL. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, words +from pygments.token import Text, Comment, Operator, Keyword, Name, Number + +__all__ = ['IDLLexer'] + + +class IDLLexer(RegexLexer): + """ + Pygments Lexer for IDL (Interactive Data Language). + + .. versionadded:: 1.6 + """ + name = 'IDL' + aliases = ['idl'] + filenames = ['*.pro'] + mimetypes = ['text/idl'] + + flags = re.IGNORECASE | re.MULTILINE + + _RESERVED = ( + 'and', 'begin', 'break', 'case', 'common', 'compile_opt', + 'continue', 'do', 'else', 'end', 'endcase', 'elseelse', + 'endfor', 'endforeach', 'endif', 'endrep', 'endswitch', + 'endwhile', 'eq', 'for', 'foreach', 'forward_function', + 'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le', + 'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro', + 'repeat', 'switch', 'then', 'until', 'while', 'xor') + """Reserved words from: http://www.exelisvis.com/docs/reswords.html""" + + _BUILTIN_LIB = ( + 'abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10', + 'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query', + 'arg_present', 'array_equal', 'array_indices', 'arrow', + 'ascii_template', 'asin', 'assoc', 'atan', 'axis', + 'a_correlate', 'bandpass_filter', 'bandreject_filter', + 'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk', + 'besely', 'beta', 'bilinear', 'binary_template', 'bindgen', + 'binomial', 'bin_date', 'bit_ffs', 'bit_population', + 'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint', + 'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder', + 'bytscl', 'caldat', 'calendar', 'call_external', + 'call_function', 'call_method', 'call_procedure', 'canny', + 'catch', 'cd', 'cdf_\w*', 'ceil', 'chebyshev', + 'check_math', + 'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen', + 'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts', + 'cmyk_convert', 'colorbar', 'colorize_sample', + 'colormap_applicable', 'colormap_gradient', + 'colormap_rotation', 'colortable', 'color_convert', + 'color_exchange', 'color_quan', 'color_range_map', 'comfit', + 'command_line_args', 'complex', 'complexarr', 'complexround', + 'compute_mesh_normals', 'cond', 'congrid', 'conj', + 'constrained_min', 'contour', 'convert_coord', 'convol', + 'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos', + 'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct', + 'create_view', 'crossp', 'crvlength', 'cti_test', + 'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord', + 'cw_animate', 'cw_animate_getp', 'cw_animate_load', + 'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index', + 'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel', + 'cw_form', 'cw_fslider', 'cw_light_editor', + 'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient', + 'cw_palette_editor', 'cw_palette_editor_get', + 'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider', + 'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists', + 'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key', + 'define_msgblk', 'define_msgblk_from_file', 'defroi', + 'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv', + 'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix', + 'dialog_dbconnect', 'dialog_message', 'dialog_pickfile', + 'dialog_printersetup', 'dialog_printjob', + 'dialog_read_image', 'dialog_write_image', 'digital_filter', + 'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure', + 'dlm_load', 'dlm_register', 'doc_library', 'double', + 'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec', + 'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn', + 'eof', 'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx', + 'erode', 'errorplot', 'errplot', 'estimator_filter', + 'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint', + 'extrac', 'extract_slice', 'factorial', 'fft', 'filepath', + 'file_basename', 'file_chmod', 'file_copy', 'file_delete', + 'file_dirname', 'file_expand_path', 'file_info', + 'file_lines', 'file_link', 'file_mkdir', 'file_move', + 'file_poll_input', 'file_readlink', 'file_same', + 'file_search', 'file_test', 'file_which', 'findgen', + 'finite', 'fix', 'flick', 'float', 'floor', 'flow3', + 'fltarr', 'flush', 'format_axis_values', 'free_lun', + 'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root', + 'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct', + 'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint', + 'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv', + 'getwindows', 'get_drive_list', 'get_dxf_objects', + 'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size', + 'greg2jul', 'grib_\w*', 'grid3', 'griddata', + 'grid_input', 'grid_tps', 'gs_iter', + 'h5[adfgirst]_\w*', 'h5_browser', 'h5_close', + 'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse', + 'hanning', 'hash', 'hdf_\w*', 'heap_free', + 'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save', + 'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal', + 'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int', + 'i18n_multibytetoutf8', 'i18n_multibytetowidechar', + 'i18n_utf8tomultibyte', 'i18n_widechartomultibyte', + 'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity', + 'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64', + 'idl_validname', 'iellipse', 'igamma', 'igetcurrent', + 'igetdata', 'igetid', 'igetproperty', 'iimage', 'image', + 'image_cont', 'image_statistics', 'imaginary', 'imap', + 'indgen', 'intarr', 'interpol', 'interpolate', + 'interval_volume', 'int_2d', 'int_3d', 'int_tabulated', + 'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon', + 'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve', + 'irotate', 'ir_filter', 'isa', 'isave', 'iscale', + 'isetcurrent', 'isetproperty', 'ishft', 'isocontour', + 'isosurface', 'isurface', 'itext', 'itranslate', 'ivector', + 'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse', + 'json_serialize', 'jul2greg', 'julday', 'keyword_set', + 'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date', + 'label_region', 'ladfit', 'laguerre', 'laplacian', + 'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ', + 'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes', + 'la_gm_linear_model', 'la_hqr', 'la_invert', + 'la_least_squares', 'la_least_square_equality', + 'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol', + 'la_svd', 'la_tridc', 'la_trimprove', 'la_triql', + 'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt', + 'legend', 'legendre', 'linbcg', 'lindgen', 'linfit', + 'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr', + 'lngamma', 'lnp_test', 'loadct', 'locale_get', + 'logical_and', 'logical_or', 'logical_true', 'lon64arr', + 'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove', + 'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll', + 'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points', + 'map_continents', 'map_grid', 'map_image', 'map_patch', + 'map_proj_forward', 'map_proj_image', 'map_proj_info', + 'map_proj_init', 'map_proj_inverse', 'map_set', + 'matrix_multiply', 'matrix_power', 'max', 'md_test', + 'mean', 'meanabsdev', 'mean_filter', 'median', 'memory', + 'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge', + 'mesh_numtriangles', 'mesh_obj', 'mesh_smooth', + 'mesh_surfacearea', 'mesh_validate', 'mesh_volume', + 'message', 'min', 'min_curve_surf', 'mk_html_help', + 'modifyct', 'moment', 'morph_close', 'morph_distance', + 'morph_gradient', 'morph_hitormiss', 'morph_open', + 'morph_thin', 'morph_tophat', 'multi', 'm_correlate', + 'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick', + 'noise_scatter', 'noise_slur', 'norm', 'n_elements', + 'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy', + 'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid', + 'online_help', 'on_error', 'open', 'oplot', 'oploterr', + 'parse_url', 'particle_trace', 'path_cache', 'path_sep', + 'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox', + 'plot_field', 'pnt_line', 'point_lun', 'polarplot', + 'polar_contour', 'polar_surface', 'poly', 'polyfill', + 'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp', + 'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell', + 'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes', + 'print', 'printd', 'product', 'profile', 'profiler', + 'profiles', 'project_vol', 'psafm', 'pseudo', + 'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new', + 'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull', + 'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp', + 'query_csv', 'query_dicom', 'query_gif', 'query_image', + 'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict', + 'query_png', 'query_ppm', 'query_srf', 'query_tiff', + 'query_wav', 'radon', 'randomn', 'randomu', 'ranks', + 'rdpix', 'read', 'reads', 'readu', 'read_ascii', + 'read_binary', 'read_bmp', 'read_csv', 'read_dicom', + 'read_gif', 'read_image', 'read_interfile', 'read_jpeg', + 'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png', + 'read_ppm', 'read_spr', 'read_srf', 'read_sylk', + 'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap', + 'read_xwd', 'real_part', 'rebin', 'recall_commands', + 'recon3', 'reduce_colors', 'reform', 'region_grow', + 'register_cursor', 'regress', 'replicate', + 'replicate_inplace', 'resolve_all', 'resolve_routine', + 'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts', + 'rot', 'rotate', 'round', 'routine_filepath', + 'routine_info', 'rs_test', 'r_correlate', 'r_test', + 'save', 'savgol', 'scale3', 'scale3d', 'scope_level', + 'scope_traceback', 'scope_varfetch', 'scope_varname', + 'search2d', 'search3d', 'sem_create', 'sem_delete', + 'sem_lock', 'sem_release', 'setenv', 'set_plot', + 'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr', + 'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap', + 'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin', + 'sindgen', 'sinh', 'size', 'skewness', 'skip_lun', + 'slicer3', 'slide_image', 'smooth', 'sobel', 'socket', + 'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat', + 'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab', + 'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize', + 'stddev', 'stop', 'strarr', 'strcmp', 'strcompress', + 'streamline', 'stregex', 'stretch', 'string', 'strjoin', + 'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid', + 'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign', + 'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc', + 'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace', + 'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan', + 'tanh', 'tek_color', 'temporary', 'tetra_clip', + 'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed', + 'timegen', 'time_test2', 'tm_test', 'total', 'trace', + 'transpose', 'triangulate', 'trigrid', 'triql', 'trired', + 'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff', + 'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd', + 'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint', + 'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr', + 'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym', + 'value_locate', 'variance', 'vector', 'vector_field', 'vel', + 'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj', + 'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw', + 'where', 'widget_base', 'widget_button', 'widget_combobox', + 'widget_control', 'widget_displaycontextmen', 'widget_draw', + 'widget_droplist', 'widget_event', 'widget_info', + 'widget_label', 'widget_list', 'widget_propertysheet', + 'widget_slider', 'widget_tab', 'widget_table', + 'widget_text', 'widget_tree', 'widget_tree_move', + 'widget_window', 'wiener_filter', 'window', 'writeu', + 'write_bmp', 'write_csv', 'write_gif', 'write_image', + 'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict', + 'write_png', 'write_ppm', 'write_spr', 'write_srf', + 'write_sylk', 'write_tiff', 'write_wav', 'write_wave', + 'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt', + 'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet', + 'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar', + 'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet', + 'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps', + 'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise', + 'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont', + 'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl', + 'xmtool', 'xobjview', 'xobjview_rotate', + 'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d', + 'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit', + 'xvolume', 'xvolume_rotate', 'xvolume_write_image', + 'xyouts', 'zoom', 'zoom_24') + """Functions from: http://www.exelisvis.com/docs/routines-1.html""" + + tokens = { + 'root': [ + (r'^\s*;.*?\n', Comment.Singleline), + (words(_RESERVED, prefix=r'\b', suffix=r'\b'), Keyword), + (words(_BUILTIN_LIB, prefix=r'\b', suffix=r'\b'), Name.Builtin), + (r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator), + (r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator), + (r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator), + (r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator), + (r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number), + (r'.', Text), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/igor.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/igor.py new file mode 100644 index 0000000..dcf9770 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/igor.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.igor + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Igor Pro. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, words +from pygments.token import Text, Comment, Keyword, Name, String + +__all__ = ['IgorLexer'] + + +class IgorLexer(RegexLexer): + """ + Pygments Lexer for Igor Pro procedure files (.ipf). + See http://www.wavemetrics.com/ and http://www.igorexchange.com/. + + .. versionadded:: 2.0 + """ + + name = 'Igor' + aliases = ['igor', 'igorpro'] + filenames = ['*.ipf'] + mimetypes = ['text/ipf'] + + flags = re.IGNORECASE | re.MULTILINE + + flowControl = ( + 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch', + 'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry', + 'break', 'continue', 'return', + ) + types = ( + 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE', + 'STRUCT', 'dfref' + ) + keywords = ( + 'override', 'ThreadSafe', 'static', 'FuncFit', 'Proc', 'Picture', + 'Prompt', 'DoPrompt', 'macro', 'window', 'graph', 'function', 'end', + 'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu', + ) + operations = ( + 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', + 'AddMovieFrame', 'APMath', 'Append', 'AppendImage', + 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText', + 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour', + 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall', + 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', + 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc', + 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar', + 'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile', + 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross', + 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor', + 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions', + 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide', + 'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints', + 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic', + 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow', + 'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine', + 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText', + 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder', + 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText', + 'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp', + 'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR', + 'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly', + 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf', + 'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload', + 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo', + 'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow', + 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', + 'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools', + 'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles', + 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection', + 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask', + 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', + 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration', + 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave', + 'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold', + 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', + 'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath', + 'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder', + 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings', + 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout', + 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', + 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess', + 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime', + 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter', + 'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve', + 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', + 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', + 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText', + 'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList', + 'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', + 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder', + 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable', + 'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain', + 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage', + 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath', + 'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open', + 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo', + 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction', + 'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu', + 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', + 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable', + 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit', + 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour', + 'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', + 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder', + 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages', + 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample', + 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData', + 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook', + 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', + 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern', + 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer', + 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode', + 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed', + 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus', + 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth', + 'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet', + 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart', + 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString', + 'sprintf', 'sscanf', 'Stack', 'StackWindows', + 'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest', + 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest', + 'StatsCircularCorrelationTest', 'StatsCircularMeans', + 'StatsCircularMoments', 'StatsCircularTwoSampleTest', + 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest', + 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest', + 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest', + 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest', + 'StatsLinearRegression', 'StatsMultiCorrelationTest', + 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles', + 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample', + 'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest', + 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest', + 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest', + 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String', + 'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile', + 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid', + 'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv', + 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform', + 'WindowFunction', + ) + functions = ( + 'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog', + 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh', + 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi', + 'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch', + 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs', + 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev', + 'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos', + 'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi', + 'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual', + 'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian', + 'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave', + 'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma', + 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', + 'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata', + 'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor', + 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin', + 'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss', + 'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize', + 'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise', + 'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1', + 'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion', + 'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D', + 'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre', + 'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln', + 'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint', + 'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max', + 'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey', + 'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect', + 'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x', + 'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar', + 'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec', + 'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ', + 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD', + 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF', + 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF', + 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', + 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', + 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', + 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', + 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF', + 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF', + 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF', + 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF', + 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF', + 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', + 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', + 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', + 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF', + 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF', + 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF', + 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF', + 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF', + 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF', + 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', + 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF', + 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF', + 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF', + 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF', + 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute', + 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF', + 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', + 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF', + 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF', + 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF', + 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean', + 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', + 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', + 'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch', + 'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists', + 'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease', + 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', + 'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists', + 'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem', + 'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR', + ) + functions += ( + 'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo', + 'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName', + 'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo', + 'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date', + 'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo', + 'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont', + 'GetDimLabel', 'GetErrMessage', 'GetFormula', + 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR', + 'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData', + 'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash', + 'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile', + 'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList', + 'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str', + 'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo', + 'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey', + 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', + 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey', + 'Secs2Date', 'Secs2Time', 'SelectString', 'SortList', + 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath', + 'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault', + 'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel', + 'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr', + 'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits', + 'WinList', 'WinName', 'WinRecreation', 'XWaveName', + 'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef', + 'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef', + 'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR', + 'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR', + ) + + tokens = { + 'root': [ + (r'//.*$', Comment.Single), + (r'"([^"\\]|\\.)*"', String), + # Flow Control. + (words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword), + # Types. + (words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type), + # Keywords. + (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), + # Built-in operations. + (words(operations, prefix=r'\b', suffix=r'\b'), Name.Class), + # Built-in functions. + (words(functions, prefix=r'\b', suffix=r'\b'), Name.Function), + # Compiler directives. + (r'^#(include|pragma|define|ifdef|ifndef|endif)', + Name.Decorator), + (r'[^a-z"/]+$', Text), + (r'.', Text), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/inferno.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/inferno.py new file mode 100644 index 0000000..706be0c --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/inferno.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.inferno + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Inferno os and all the related stuff. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default +from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \ + Name, String, Number + +__all__ = ['LimboLexer'] + + +class LimboLexer(RegexLexer): + """ + Lexer for `Limbo programming language `_ + + TODO: + - maybe implement better var declaration highlighting + - some simple syntax error highlighting + + .. versionadded:: 2.0 + """ + name = 'Limbo' + aliases = ['limbo'] + filenames = ['*.b'] + mimetypes = ['text/limbo'] + + tokens = { + 'whitespace': [ + (r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)', + bygroups(Text, Name.Label)), + (r'\n', Text), + (r'\s+', Text), + (r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' + r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\', String), # stray backslash + ], + 'statements': [ + (r'"', String, 'string'), + (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float), + (r'16r[0-9a-fA-F]+', Number.Hex), + (r'8r[0-7]+', Number.Oct), + (r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer), + (r'[()\[\],.]', Punctuation), + (r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator), + (r'(alt|break|case|continue|cyclic|do|else|exit' + r'for|hd|if|implement|import|include|len|load|or' + r'pick|return|spawn|tagof|tl|to|while)\b', Keyword), + (r'(byte|int|big|real|string|array|chan|list|adt' + r'|fn|ref|of|module|self|type)\b', Keyword.Type), + (r'(con|iota|nil)\b', Keyword.Constant), + ('[a-zA-Z_]\w*', Name), + ], + 'statement' : [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'root': [ + include('whitespace'), + default('statement'), + ], + } + + def analyse_text(text): + # Any limbo module implements something + if re.search(r'^implement \w+;', text, re.MULTILINE): + return 0.7 + +# TODO: +# - Make lexers for: +# - asm sources +# - man pages +# - mkfiles +# - module definitions +# - namespace definitions +# - shell scripts +# - maybe keyfiles and fonts +# they all seem to be quite similar to their equivalents +# from unix world, so there should not be a lot of problems diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/installers.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/installers.py new file mode 100644 index 0000000..769bce6 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/installers.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.installers + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for installer/packager DSLs and formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, this, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Punctuation, Generic, Number, Whitespace + +__all__ = ['NSISLexer', 'RPMSpecLexer', 'SourcesListLexer', + 'DebianControlLexer'] + + +class NSISLexer(RegexLexer): + """ + For `NSIS `_ scripts. + + .. versionadded:: 1.6 + """ + name = 'NSIS' + aliases = ['nsis', 'nsi', 'nsh'] + filenames = ['*.nsi', '*.nsh'] + mimetypes = ['text/x-nsis'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'[;#].*\n', Comment), + (r"'.*?'", String.Single), + (r'"', String.Double, 'str_double'), + (r'`', String.Backtick, 'str_backtick'), + include('macro'), + include('interpol'), + include('basic'), + (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), + (r'/[a-z_]\w*', Name.Attribute), + ('.', Text), + ], + 'basic': [ + (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', + bygroups(Text, Keyword, Text, Name.Function)), + (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', + bygroups(Keyword.Namespace, Punctuation, Name.Function)), + (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), + (r'(\b[ULS]|\B)([!<>=]?=|\<\>?|\>)\B', Operator), + (r'[|+-]', Operator), + (r'\\', Punctuation), + (r'\b(Abort|Add(?:BrandingImage|Size)|' + r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' + r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' + r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' + r'ComponentText|CopyFiles|CRCCheck|' + r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' + r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' + r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' + r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' + r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' + r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' + r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' + r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' + r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' + r'InstDirError|LabelAddress|TempFileName)|' + r'Goto|HideWindow|Icon|' + r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' + r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' + r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' + r'IsWindow|LangString(?:UP)?|' + r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' + r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' + r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' + r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' + r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' + r'Return|RMDir|SearchPath|Section(?:Divider|End|' + r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' + r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' + r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' + r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' + r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' + r'Silent|StaticBkColor)|' + r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' + r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' + r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' + r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' + r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' + r'XPStyle)\b', Keyword), + (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' + r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' + r'HK(CC|CR|CU|DD|LM|PD|U)|' + r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' + r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' + r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' + r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' + r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' + r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' + r'YESNO(?:CANCEL)?)|SET|SHCTX|' + r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' + r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' + r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' + r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' + r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' + r'true|try|user|zlib)\b', Name.Constant), + ], + 'macro': [ + (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' + r'delfilefile|echo(?:message)?|else|endif|error|execute|' + r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' + r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' + r'warning)\b', Comment.Preproc), + ], + 'interpol': [ + (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers + (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' + r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' + r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' + r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' + r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' + r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', + Name.Builtin), + (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), + (r'\$[a-z_]\w*', Name.Variable), + ], + 'str_double': [ + (r'"', String, '#pop'), + (r'\$(\\[nrt"]|\$)', String.Escape), + include('interpol'), + (r'.', String.Double), + ], + 'str_backtick': [ + (r'`', String, '#pop'), + (r'\$(\\[nrt"]|\$)', String.Escape), + include('interpol'), + (r'.', String.Double), + ], + } + + +class RPMSpecLexer(RegexLexer): + """ + For RPM ``.spec`` files. + + .. versionadded:: 1.6 + """ + + name = 'RPMSpec' + aliases = ['spec'] + filenames = ['*.spec'] + mimetypes = ['text/x-rpm-spec'] + + _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' + 'post[a-z]*|trigger[a-z]*|files)') + + tokens = { + 'root': [ + (r'#.*\n', Comment), + include('basic'), + ], + 'description': [ + (r'^(%' + _directives + ')(.*)$', + bygroups(Name.Decorator, Text), '#pop'), + (r'\n', Text), + (r'.', Text), + ], + 'changelog': [ + (r'\*.*\n', Generic.Subheading), + (r'^(%' + _directives + ')(.*)$', + bygroups(Name.Decorator, Text), '#pop'), + (r'\n', Text), + (r'.', Text), + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + include('interpol'), + (r'.', String.Double), + ], + 'basic': [ + include('macro'), + (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' + r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' + r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' + r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', + bygroups(Generic.Heading, Punctuation, using(this))), + (r'^%description', Name.Decorator, 'description'), + (r'^%changelog', Name.Decorator, 'changelog'), + (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), + (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' + r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', + Keyword), + include('interpol'), + (r"'.*?'", String.Single), + (r'"', String.Double, 'string'), + (r'.', Text), + ], + 'macro': [ + (r'%define.*\n', Comment.Preproc), + (r'%\{\!\?.*%define.*\}', Comment.Preproc), + (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', + bygroups(Comment.Preproc, Text)), + ], + 'interpol': [ + (r'%\{?__[a-z_]+\}?', Name.Function), + (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), + (r'%\{\?\w+\}', Name.Variable), + (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), + (r'%\{[a-zA-Z]\w+\}', Keyword.Constant), + ] + } + + +class SourcesListLexer(RegexLexer): + """ + Lexer that highlights debian sources.list files. + + .. versionadded:: 0.7 + """ + + name = 'Debian Sourcelist' + aliases = ['sourceslist', 'sources.list', 'debsources'] + filenames = ['sources.list'] + mimetype = ['application/x-debian-sourceslist'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?$', Comment), + (r'^(deb(?:-src)?)(\s+)', + bygroups(Keyword, Text), 'distribution') + ], + 'distribution': [ + (r'#.*?$', Comment, '#pop'), + (r'\$\(ARCH\)', Name.Variable), + (r'[^\s$[]+', String), + (r'\[', String.Other, 'escaped-distribution'), + (r'\$', String), + (r'\s+', Text, 'components') + ], + 'escaped-distribution': [ + (r'\]', String.Other, '#pop'), + (r'\$\(ARCH\)', Name.Variable), + (r'[^\]$]+', String.Other), + (r'\$', String.Other) + ], + 'components': [ + (r'#.*?$', Comment, '#pop:2'), + (r'$', Text, '#pop:2'), + (r'\s+', Text), + (r'\S+', Keyword.Pseudo), + ] + } + + def analyse_text(text): + for line in text.splitlines(): + line = line.strip() + if line.startswith('deb ') or line.startswith('deb-src '): + return True + + +class DebianControlLexer(RegexLexer): + """ + Lexer for Debian ``control`` files and ``apt-cache show `` outputs. + + .. versionadded:: 0.9 + """ + name = 'Debian Control file' + aliases = ['control', 'debcontrol'] + filenames = ['control'] + + tokens = { + 'root': [ + (r'^(Description)', Keyword, 'description'), + (r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'), + (r'^((Build-)?Depends)', Keyword, 'depends'), + (r'^((?:Python-)?Version)(:\s*)(\S+)$', + bygroups(Keyword, Text, Number)), + (r'^((?:Installed-)?Size)(:\s*)(\S+)$', + bygroups(Keyword, Text, Number)), + (r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$', + bygroups(Keyword, Text, Number)), + (r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$', + bygroups(Keyword, Whitespace, String)), + ], + 'maintainer': [ + (r'<[^>]+>', Generic.Strong), + (r'<[^>]+>$', Generic.Strong, '#pop'), + (r',\n?', Text), + (r'.', Text), + ], + 'description': [ + (r'(.*)(Homepage)(: )(\S+)', + bygroups(Text, String, Name, Name.Class)), + (r':.*\n', Generic.Strong), + (r' .*\n', Text), + default('#pop'), + ], + 'depends': [ + (r':\s*', Text), + (r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)), + (r'\(', Text, 'depend_vers'), + (r',', Text), + (r'\|', Operator), + (r'[\s]+', Text), + (r'[})]\s*$', Text, '#pop'), + (r'\}', Text), + (r'[^,]$', Name.Function, '#pop'), + (r'([+.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)), + (r'\[.*?\]', Name.Entity), + ], + 'depend_vers': [ + (r'\),', Text, '#pop'), + (r'\)[^,]', Text, '#pop:2'), + (r'([><=]+)(\s*)([^)]+)', bygroups(Operator, Text, Number)) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/int_fiction.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/int_fiction.py new file mode 100644 index 0000000..7b004c2 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/int_fiction.py @@ -0,0 +1,1342 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.int_fiction + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for interactive fiction languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, \ + this, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Generic + +__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer', + 'Tads3Lexer'] + + +class Inform6Lexer(RegexLexer): + """ + For `Inform 6 `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Inform 6' + aliases = ['inform6', 'i6'] + filenames = ['*.inf'] + + flags = re.MULTILINE | re.DOTALL | re.UNICODE + + _name = r'[a-zA-Z_]\w*' + + # Inform 7 maps these four character classes to their ASCII + # equivalents. To support Inform 6 inclusions within Inform 7, + # Inform6Lexer maps them too. + _dash = u'\\-\u2010-\u2014' + _dquote = u'"\u201c\u201d' + _squote = u"'\u2018\u2019" + _newline = u'\\n\u0085\u2028\u2029' + + tokens = { + 'root': [ + (r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc, + 'directive'), + default('directive') + ], + '_whitespace': [ + (r'\s+', Text), + (r'![^%s]*' % _newline, Comment.Single) + ], + 'default': [ + include('_whitespace'), + (r'\[', Punctuation, 'many-values'), # Array initialization + (r':|(?=;)', Punctuation, '#pop'), + (r'<', Punctuation), # Second angle bracket in an action statement + default(('expression', '_expression')) + ], + + # Expressions + '_expression': [ + include('_whitespace'), + (r'(?=sp\b)', Text, '#pop'), + (r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text, + ('#pop', 'value')), + (r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator), + (r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop') + ], + 'expression': [ + include('_whitespace'), + (r'\(', Punctuation, ('expression', '_expression')), + (r'\)', Punctuation, '#pop'), + (r'\[', Punctuation, ('#pop', 'statements', 'locals')), + (r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation), + (r'\+\+|[%s]{2}(?!>)' % _dash, Operator), + (r',', Punctuation, '_expression'), + (r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash, + Operator, '_expression'), + (r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word, + '_expression'), + (r'sp\b', Name), + (r'\?~?', Name.Label, 'label?'), + (r'[@{]', Error), + default('#pop') + ], + '_assembly-expression': [ + (r'\(', Punctuation, ('#push', '_expression')), + (r'[\[\]]', Punctuation), + (r'[%s]>' % _dash, Punctuation, '_expression'), + (r'sp\b', Keyword.Pseudo), + (r';', Punctuation, '#pop:3'), + include('expression') + ], + '_for-expression': [ + (r'\)', Punctuation, '#pop:2'), + (r':', Punctuation, '#pop'), + include('expression') + ], + '_keyword-expression': [ + (r'(from|near|to)\b', Keyword, '_expression'), + include('expression') + ], + '_list-expression': [ + (r',', Punctuation, '#pop'), + include('expression') + ], + '_object-expression': [ + (r'has\b', Keyword.Declaration, '#pop'), + include('_list-expression') + ], + + # Values + 'value': [ + include('_whitespace'), + # Strings + (r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'), + (r'([%s])(@\{[0-9a-fA-F]{1,4}\})([%s])' % (_squote, _squote), + bygroups(String.Char, String.Escape, String.Char), '#pop'), + (r'([%s])(@.{2})([%s])' % (_squote, _squote), + bygroups(String.Char, String.Escape, String.Char), '#pop'), + (r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')), + (r'[%s]' % _dquote, String.Double, ('#pop', 'string')), + # Numbers + (r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash), + Number.Float, '#pop'), + (r'\$[0-9a-fA-F]+', Number.Hex, '#pop'), + (r'\$\$[01]+', Number.Bin, '#pop'), + (r'[0-9]+', Number.Integer, '#pop'), + # Values prefixed by hashes + (r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'), + (r'(#g\$)(%s)' % _name, + bygroups(Operator, Name.Variable.Global), '#pop'), + (r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')), + (r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'), + (r'#', Name.Builtin, ('#pop', 'system-constant')), + # System functions + (words(( + 'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass', + 'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'), + Name.Builtin, '#pop'), + # Metaclasses + (r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'), + # Veneer routines + (words(( + 'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms', + 'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String', + 'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__', + 'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr', + 'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process', + 'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA', + 'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR', + 'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr', + 'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'), + prefix='(?i)', suffix=r'\b'), + Name.Builtin, '#pop'), + # Other built-in symbols + (words(( + 'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE', + 'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'false', + 'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY', 'GOBJFIELD_CHAIN', + 'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT', + 'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START', + 'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX', + 'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print', + 'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE', + 'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag', + 'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3', + 'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'), + prefix='(?i)', suffix=r'\b'), + Name.Builtin, '#pop'), + # Other values + (_name, Name, '#pop') + ], + # Strings + 'dictionary-word': [ + (r'[~^]+', String.Escape), + (r'[^~^\\@({%s]+' % _squote, String.Single), + (r'[({]', String.Single), + (r'@\{[0-9a-fA-F]{,4}\}', String.Escape), + (r'@.{2}', String.Escape), + (r'[%s]' % _squote, String.Single, '#pop') + ], + 'string': [ + (r'[~^]+', String.Escape), + (r'[^~^\\@({%s]+' % _dquote, String.Double), + (r'[({]', String.Double), + (r'\\', String.Escape), + (r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' % + (_newline, _newline), String.Escape), + (r'@(\\\s*[%s]\s*)*\{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}' + r'(\\\s*[%s]\s*)*\}' % (_newline, _newline, _newline), + String.Escape), + (r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline), + String.Escape), + (r'[%s]' % _dquote, String.Double, '#pop') + ], + 'plain-string': [ + (r'[^~^\\({\[\]%s]+' % _dquote, String.Double), + (r'[~^({\[\]]', String.Double), + (r'\\', String.Escape), + (r'[%s]' % _dquote, String.Double, '#pop') + ], + # Names + '_constant': [ + include('_whitespace'), + (_name, Name.Constant, '#pop'), + include('value') + ], + '_global': [ + include('_whitespace'), + (_name, Name.Variable.Global, '#pop'), + include('value') + ], + 'label?': [ + include('_whitespace'), + (_name, Name.Label, '#pop'), + default('#pop') + ], + 'variable?': [ + include('_whitespace'), + (_name, Name.Variable, '#pop'), + default('#pop') + ], + # Values after hashes + 'obsolete-dictionary-word': [ + (r'\S\w*', String.Other, '#pop') + ], + 'system-constant': [ + include('_whitespace'), + (_name, Name.Builtin, '#pop') + ], + + # Directives + 'directive': [ + include('_whitespace'), + (r'#', Punctuation), + (r';', Punctuation, '#pop'), + (r'\[', Punctuation, + ('default', 'statements', 'locals', 'routine-name?')), + (words(( + 'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot', + 'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file', + 'version'), prefix='(?i)', suffix=r'\b'), + Keyword, 'default'), + (r'(?i)(array|global)\b', Keyword, + ('default', 'directive-keyword?', '_global')), + (r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')), + (r'(?i)class\b', Keyword, + ('object-body', 'duplicates', 'class-name')), + (r'(?i)(constant|default)\b', Keyword, + ('default', 'expression', '_constant')), + (r'(?i)(end\b)(.*)', bygroups(Keyword, Text)), + (r'(?i)(extend|verb)\b', Keyword, 'grammar'), + (r'(?i)fake_action\b', Keyword, ('default', '_constant')), + (r'(?i)import\b', Keyword, 'manifest'), + (r'(?i)(include|link)\b', Keyword, + ('default', 'before-plain-string')), + (r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')), + (r'(?i)message\b', Keyword, ('default', 'diagnostic')), + (r'(?i)(nearby|object)\b', Keyword, + ('object-body', '_object-head')), + (r'(?i)property\b', Keyword, + ('default', 'alias?', '_constant', 'property-keyword*')), + (r'(?i)replace\b', Keyword, + ('default', 'routine-name?', 'routine-name?')), + (r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')), + (r'(?i)stub\b', Keyword, ('default', 'routine-name?')), + (r'(?i)trace\b', Keyword, + ('default', 'trace-keyword?', 'trace-keyword?')), + (r'(?i)zcharacter\b', Keyword, + ('default', 'directive-keyword?', 'directive-keyword?')), + (_name, Name.Class, ('object-body', '_object-head')) + ], + # [, Replace, Stub + 'routine-name?': [ + include('_whitespace'), + (_name, Name.Function, '#pop'), + default('#pop') + ], + 'locals': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r'\*', Punctuation), + (_name, Name.Variable) + ], + # Array + 'many-values': [ + include('_whitespace'), + (r';', Punctuation), + (r'\]', Punctuation, '#pop'), + (r':', Error), + default(('expression', '_expression')) + ], + # Attribute, Property + 'alias?': [ + include('_whitespace'), + (r'alias\b', Keyword, ('#pop', '_constant')), + default('#pop') + ], + # Class, Object, Nearby + 'class-name': [ + include('_whitespace'), + (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'), + (_name, Name.Class, '#pop') + ], + 'duplicates': [ + include('_whitespace'), + (r'\(', Punctuation, ('#pop', 'expression', '_expression')), + default('#pop') + ], + '_object-head': [ + (r'[%s]>' % _dash, Punctuation), + (r'(class|has|private|with)\b', Keyword.Declaration, '#pop'), + include('_global') + ], + 'object-body': [ + include('_whitespace'), + (r';', Punctuation, '#pop:2'), + (r',', Punctuation), + (r'class\b', Keyword.Declaration, 'class-segment'), + (r'(has|private|with)\b', Keyword.Declaration), + (r':', Error), + default(('_object-expression', '_expression')) + ], + 'class-segment': [ + include('_whitespace'), + (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'), + (_name, Name.Class), + default('value') + ], + # Extend, Verb + 'grammar': [ + include('_whitespace'), + (r'=', Punctuation, ('#pop', 'default')), + (r'\*', Punctuation, ('#pop', 'grammar-line')), + default('_directive-keyword') + ], + 'grammar-line': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r'[/*]', Punctuation), + (r'[%s]>' % _dash, Punctuation, 'value'), + (r'(noun|scope)\b', Keyword, '=routine'), + default('_directive-keyword') + ], + '=routine': [ + include('_whitespace'), + (r'=', Punctuation, 'routine-name?'), + default('#pop') + ], + # Import + 'manifest': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r',', Punctuation), + (r'(?i)global\b', Keyword, '_global'), + default('_global') + ], + # Include, Link, Message + 'diagnostic': [ + include('_whitespace'), + (r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')), + default(('#pop', 'before-plain-string', 'directive-keyword?')) + ], + 'before-plain-string': [ + include('_whitespace'), + (r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string')) + ], + 'message-string': [ + (r'[~^]+', String.Escape), + include('plain-string') + ], + + # Keywords used in directives + '_directive-keyword!': [ + include('_whitespace'), + (words(( + 'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror', + 'first', 'has', 'held', 'initial', 'initstr', 'last', 'long', 'meta', 'multi', + 'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only', 'private', + 'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table', 'terminating', + 'time', 'topic', 'warning', 'with'), suffix=r'\b'), + Keyword, '#pop'), + (r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop') + ], + '_directive-keyword': [ + include('_directive-keyword!'), + include('value') + ], + 'directive-keyword?': [ + include('_directive-keyword!'), + default('#pop') + ], + 'property-keyword*': [ + include('_whitespace'), + (r'(additive|long)\b', Keyword), + default('#pop') + ], + 'trace-keyword?': [ + include('_whitespace'), + (words(( + 'assembly', 'dictionary', 'expressions', 'lines', 'linker', + 'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'), + Keyword, '#pop'), + default('#pop') + ], + + # Statements + 'statements': [ + include('_whitespace'), + (r'\]', Punctuation, '#pop'), + (r'[;{}]', Punctuation), + (words(( + 'box', 'break', 'continue', 'default', 'give', 'inversion', + 'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue', + 'spaces', 'string', 'until'), suffix=r'\b'), + Keyword, 'default'), + (r'(do|else)\b', Keyword), + (r'(font|style)\b', Keyword, + ('default', 'miscellaneous-keyword?')), + (r'for\b', Keyword, ('for', '(?')), + (r'(if|switch|while)', Keyword, + ('expression', '_expression', '(?')), + (r'(jump|save|restore)\b', Keyword, ('default', 'label?')), + (r'objectloop\b', Keyword, + ('_keyword-expression', 'variable?', '(?')), + (r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'), + (r'\.', Name.Label, 'label?'), + (r'@', Keyword, 'opcode'), + (r'#(?![agrnw]\$|#)', Punctuation, 'directive'), + (r'<', Punctuation, 'default'), + (r'move\b', Keyword, + ('default', '_keyword-expression', '_expression')), + default(('default', '_keyword-expression', '_expression')) + ], + 'miscellaneous-keyword?': [ + include('_whitespace'), + (r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b', + Keyword, '#pop'), + (r'(a|A|an|address|char|name|number|object|property|string|the|' + r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo, + '#pop'), + (r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function, + '#pop'), + default('#pop') + ], + '(?': [ + include('_whitespace'), + (r'\(', Punctuation, '#pop'), + default('#pop') + ], + 'for': [ + include('_whitespace'), + (r';', Punctuation, ('_for-expression', '_expression')), + default(('_for-expression', '_expression')) + ], + 'print-list': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r':', Error), + default(('_list-expression', '_expression', '_list-expression', 'form')) + ], + 'form': [ + include('_whitespace'), + (r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')), + default('#pop') + ], + + # Assembly + 'opcode': [ + include('_whitespace'), + (r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')), + (_name, Keyword, 'operands') + ], + 'operands': [ + (r':', Error), + default(('_assembly-expression', '_expression')) + ] + } + + def get_tokens_unprocessed(self, text): + # 'in' is either a keyword or an operator. + # If the token two tokens after 'in' is ')', 'in' is a keyword: + # objectloop(a in b) + # Otherwise, it is an operator: + # objectloop(a in b && true) + objectloop_queue = [] + objectloop_token_count = -1 + previous_token = None + for index, token, value in RegexLexer.get_tokens_unprocessed(self, + text): + if previous_token is Name.Variable and value == 'in': + objectloop_queue = [[index, token, value]] + objectloop_token_count = 2 + elif objectloop_token_count > 0: + if token not in Comment and token not in Text: + objectloop_token_count -= 1 + objectloop_queue.append((index, token, value)) + else: + if objectloop_token_count == 0: + if objectloop_queue[-1][2] == ')': + objectloop_queue[0][1] = Keyword + while objectloop_queue: + yield objectloop_queue.pop(0) + objectloop_token_count = -1 + yield index, token, value + if token not in Comment and token not in Text: + previous_token = token + while objectloop_queue: + yield objectloop_queue.pop(0) + + +class Inform7Lexer(RegexLexer): + """ + For `Inform 7 `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Inform 7' + aliases = ['inform7', 'i7'] + filenames = ['*.ni', '*.i7x'] + + flags = re.MULTILINE | re.DOTALL | re.UNICODE + + _dash = Inform6Lexer._dash + _dquote = Inform6Lexer._dquote + _newline = Inform6Lexer._newline + _start = r'\A|(?<=[%s])' % _newline + + # There are three variants of Inform 7, differing in how to + # interpret at signs and braces in I6T. In top-level inclusions, at + # signs in the first column are inweb syntax. In phrase definitions + # and use options, tokens in braces are treated as I7. Use options + # also interpret "{N}". + tokens = {} + token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option'] + + for level in token_variants: + tokens[level] = { + '+i6-root': list(Inform6Lexer.tokens['root']), + '+i6t-root': [ # For Inform6TemplateLexer + (r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc, + ('directive', '+p')) + ], + 'root': [ + (r'(\|?\s)+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'[%s]' % _dquote, Generic.Heading, + ('+main', '+titling', '+titling-string')), + default(('+main', '+heading?')) + ], + '+titling-string': [ + (r'[^%s]+' % _dquote, Generic.Heading), + (r'[%s]' % _dquote, Generic.Heading, '#pop') + ], + '+titling': [ + (r'\[', Comment.Multiline, '+comment'), + (r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading), + (r'[%s]' % _dquote, Generic.Heading, '+titling-string'), + (r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote), + Text, ('#pop', '+heading?')), + (r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'), + (r'[|%s]' % _newline, Generic.Heading) + ], + '+main': [ + (r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text), + (r'[%s]' % _dquote, String.Double, '+text'), + (r':', Text, '+phrase-definition'), + (r'(?i)\bas\b', Text, '+use-option'), + (r'\[', Comment.Multiline, '+comment'), + (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash), + bygroups(Punctuation, + using(this, state=('+i6-root', 'directive'), + i6t='+i6t-not-inline'), Punctuation)), + (r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' % + (_start, _dquote, _newline), Text, '+heading?'), + (r'(?i)[a(|%s]' % _newline, Text) + ], + '+phrase-definition': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash), + bygroups(Punctuation, + using(this, state=('+i6-root', 'directive', + 'default', 'statements'), + i6t='+i6t-inline'), Punctuation), '#pop'), + default('#pop') + ], + '+use-option': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash), + bygroups(Punctuation, + using(this, state=('+i6-root', 'directive'), + i6t='+i6t-use-option'), Punctuation), '#pop'), + default('#pop') + ], + '+comment': [ + (r'[^\[\]]+', Comment.Multiline), + (r'\[', Comment.Multiline, '#push'), + (r'\]', Comment.Multiline, '#pop') + ], + '+text': [ + (r'[^\[%s]+' % _dquote, String.Double), + (r'\[.*?\]', String.Interpol), + (r'[%s]' % _dquote, String.Double, '#pop') + ], + '+heading?': [ + (r'(\|?\s)+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'), + (r'[%s]{1,3}' % _dash, Text), + (r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline, + Generic.Heading, '#pop'), + default('#pop') + ], + '+documentation-heading': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'(?i)documentation\s+', Text, '+documentation-heading2'), + default('#pop') + ], + '+documentation-heading2': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'[%s]{4}\s' % _dash, Text, '+documentation'), + default('#pop:2') + ], + '+documentation': [ + (r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' % + (_start, _newline), Generic.Heading), + (r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline), + Generic.Subheading), + (r'((%s)\t.*?[%s])+' % (_start, _newline), + using(this, state='+main')), + (r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text), + (r'\[', Comment.Multiline, '+comment'), + ], + '+i6t-not-inline': [ + (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline), + Comment.Preproc), + (r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline), + Comment.Preproc), + (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline), + Generic.Heading, '+p') + ], + '+i6t-use-option': [ + include('+i6t-not-inline'), + (r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation)) + ], + '+i6t-inline': [ + (r'(\{)(\S[^}]*)?(\})', + bygroups(Punctuation, using(this, state='+main'), + Punctuation)) + ], + '+i6t': [ + (r'(\{[%s])(![^}]*)(\}?)' % _dash, + bygroups(Punctuation, Comment.Single, Punctuation)), + (r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash, + bygroups(Punctuation, Keyword, Punctuation, Text, + Punctuation), '+lines'), + (r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash, + bygroups(Punctuation, Keyword, Punctuation, Text, + Punctuation)), + (r'(\(\+)(.*?)(\+\)|\Z)', + bygroups(Punctuation, using(this, state='+main'), + Punctuation)) + ], + '+p': [ + (r'[^@]+', Comment.Preproc), + (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline), + Comment.Preproc, '#pop'), + (r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc), + (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline), + Generic.Heading), + (r'@', Comment.Preproc) + ], + '+lines': [ + (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline), + Comment.Preproc), + (r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline), + Comment.Preproc), + (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline), + Generic.Heading, '+p'), + (r'(%s)@\w*[ %s]' % (_start, _newline), Keyword), + (r'![^%s]*' % _newline, Comment.Single), + (r'(\{)([%s]endlines)(\})' % _dash, + bygroups(Punctuation, Keyword, Punctuation), '#pop'), + (r'[^@!{]+?([%s]|\Z)|.' % _newline, Text) + ] + } + # Inform 7 can include snippets of Inform 6 template language, + # so all of Inform6Lexer's states are copied here, with + # modifications to account for template syntax. Inform7Lexer's + # own states begin with '+' to avoid name conflicts. Some of + # Inform6Lexer's states begin with '_': these are not modified. + # They deal with template syntax either by including modified + # states, or by matching r'' then pushing to modified states. + for token in Inform6Lexer.tokens: + if token == 'root': + continue + tokens[level][token] = list(Inform6Lexer.tokens[token]) + if not token.startswith('_'): + tokens[level][token][:0] = [include('+i6t'), include(level)] + + def __init__(self, **options): + level = options.get('i6t', '+i6t-not-inline') + if level not in self._all_tokens: + self._tokens = self.__class__.process_tokendef(level) + else: + self._tokens = self._all_tokens[level] + RegexLexer.__init__(self, **options) + + +class Inform6TemplateLexer(Inform7Lexer): + """ + For `Inform 6 template + `_ code. + + .. versionadded:: 2.0 + """ + + name = 'Inform 6 template' + aliases = ['i6t'] + filenames = ['*.i6t'] + + def get_tokens_unprocessed(self, text, stack=('+i6t-root',)): + return Inform7Lexer.get_tokens_unprocessed(self, text, stack) + + +class Tads3Lexer(RegexLexer): + """ + For `TADS 3 `_ source code. + """ + + name = 'TADS 3' + aliases = ['tads3'] + filenames = ['*.t'] + + flags = re.DOTALL | re.MULTILINE + + _comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)' + _comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)' + _escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|' + r'[0-3]?[0-7]{1,2}))') + _name = r'(?:[_a-zA-Z]\w*)' + _no_quote = r'(?=\s|\\?>)' + _operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|' + r'(?:[=+\-*/%!&|^]|<>?>?)=?)') + _ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline) + _ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline) + + def _make_string_state(triple, double, verbatim=None, _escape=_escape): + if verbatim: + verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()), + re.escape(c.upper())) + for c in verbatim]) + char = r'"' if double else r"'" + token = String.Double if double else String.Single + escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r'' + prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's') + tag_state_name = '%sqt' % prefix + state = [] + if triple: + state += [ + (r'%s{3,}' % char, token, '#pop'), + (r'\\%s+' % char, String.Escape), + (char, token) + ] + else: + state.append((char, token, '#pop')) + state += [ + include('s/verbatim'), + (r'[^\\<&{}%s]+' % char, token) + ] + if verbatim: + # This regex can't use `(?i)` because escape sequences are + # case-sensitive. `<\XMP>` works; `<\xmp>` doesn't. + state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' % + (_escape, verbatim), + Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name))) + else: + state += [ + (r'\\?<\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' % + (char, char, escaped_quotes, _escape), Comment.Multiline), + (r'(?i)\\?]|\\>)', Name.Tag, + ('#pop', '%sqs/listing' % prefix, tag_state_name)), + (r'(?i)\\?]|\\>)', Name.Tag, + ('#pop', '%sqs/xmp' % prefix, tag_state_name)), + (r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' % + (char, char, escaped_quotes, _escape), Name.Tag, + tag_state_name), + include('s/entity') + ] + state += [ + include('s/escape'), + (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' % + (char, char, escaped_quotes, _escape), String.Interpol), + (r'[\\&{}<]', token) + ] + return state + + def _make_tag_state(triple, double, _escape=_escape): + char = r'"' if double else r"'" + quantifier = r'{3,}' if triple else r'' + state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's') + token = String.Double if double else String.Single + escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r'' + return [ + (r'%s%s' % (char, quantifier), token, '#pop:2'), + (r'(\s|\\\n)+', Text), + (r'(=)(\\?")', bygroups(Punctuation, String.Double), + 'dqs/%s' % state_name), + (r"(=)(\\?')", bygroups(Punctuation, String.Single), + 'sqs/%s' % state_name), + (r'=', Punctuation, 'uqs/%s' % state_name), + (r'\\?>', Name.Tag, '#pop'), + (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' % + (char, char, escaped_quotes, _escape), String.Interpol), + (r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' % + (char, char, escaped_quotes, _escape), Name.Attribute), + include('s/escape'), + include('s/verbatim'), + include('s/entity'), + (r'[\\{}&]', Name.Attribute) + ] + + def _make_attribute_value_state(terminator, host_triple, host_double, + _escape=_escape): + token = (String.Double if terminator == r'"' else + String.Single if terminator == r"'" else String.Other) + host_char = r'"' if host_double else r"'" + host_quantifier = r'{3,}' if host_triple else r'' + host_token = String.Double if host_double else String.Single + escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char) + if host_triple else r'') + return [ + (r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'), + (r'%s%s' % (r'' if token is String.Other else r'\\?', terminator), + token, '#pop'), + include('s/verbatim'), + include('s/entity'), + (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' % + (host_char, host_char, escaped_quotes, _escape), String.Interpol), + (r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''), + token), + include('s/escape'), + (r'["\'\s&{<}\\]', token) + ] + + tokens = { + 'root': [ + (u'\ufeff', Text), + (r'\{', Punctuation, 'object-body'), + (r';+', Punctuation), + (r'(?=(argcount|break|case|catch|continue|default|definingobj|' + r'delegated|do|else|for|foreach|finally|goto|if|inherited|' + r'invokee|local|nil|new|operator|replaced|return|self|switch|' + r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'), + (r'(%s)(%s*)(\()' % (_name, _ws), + bygroups(Name.Function, using(this, state='whitespace'), + Punctuation), + ('block?/root', 'more/parameters', 'main/parameters')), + include('whitespace'), + (r'\++', Punctuation), + (r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop + (r'(?!\Z)', Text, 'main/root') + ], + 'main/root': [ + include('main/basic'), + default(('#pop', 'object-body/no-braces', 'classes', 'class')) + ], + 'object-body/no-braces': [ + (r';', Punctuation, '#pop'), + (r'\{', Punctuation, ('#pop', 'object-body')), + include('object-body') + ], + 'object-body': [ + (r';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + (r':', Punctuation, ('classes', 'class')), + (r'(%s?)(%s*)(\()' % (_name, _ws), + bygroups(Name.Function, using(this, state='whitespace'), + Punctuation), + ('block?', 'more/parameters', 'main/parameters')), + (r'(%s)(%s*)(\{)' % (_name, _ws), + bygroups(Name.Function, using(this, state='whitespace'), + Punctuation), 'block'), + (r'(%s)(%s*)(:)' % (_name, _ws), + bygroups(Name.Variable, using(this, state='whitespace'), + Punctuation), + ('object-body/no-braces', 'classes', 'class')), + include('whitespace'), + (r'->|%s' % _operator, Punctuation, 'main'), + default('main/object-body') + ], + 'main/object-body': [ + include('main/basic'), + (r'(%s)(%s*)(=?)' % (_name, _ws), + bygroups(Name.Variable, using(this, state='whitespace'), + Punctuation), ('#pop', 'more', 'main')), + default('#pop:2') + ], + 'block?/root': [ + (r'\{', Punctuation, ('#pop', 'block')), + include('whitespace'), + (r'(?=[[\'"<(:])', Text, # It might be a VerbRule macro. + ('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')), + # It might be a macro like DefineAction. + default(('#pop', 'object-body/no-braces')) + ], + 'block?': [ + (r'\{', Punctuation, ('#pop', 'block')), + include('whitespace'), + default('#pop') + ], + 'block/basic': [ + (r'[;:]+', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + (r'default\b', Keyword.Reserved), + (r'(%s)(%s*)(:)' % (_name, _ws), + bygroups(Name.Label, using(this, state='whitespace'), + Punctuation)), + include('whitespace') + ], + 'block': [ + include('block/basic'), + (r'(?!\Z)', Text, ('more', 'main')) + ], + 'block/embed': [ + (r'>>', String.Interpol, '#pop'), + include('block/basic'), + (r'(?!\Z)', Text, ('more/embed', 'main')) + ], + 'main/basic': [ + include('whitespace'), + (r'\(', Punctuation, ('#pop', 'more', 'main')), + (r'\[', Punctuation, ('#pop', 'more/list', 'main')), + (r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner', + 'more/parameters', 'main/parameters')), + (r'\*|\.{3}', Punctuation, '#pop'), + (r'(?i)0x[\da-f]+', Number.Hex, '#pop'), + (r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+', + Number.Float, '#pop'), + (r'0[0-7]+', Number.Oct, '#pop'), + (r'\d+', Number.Integer, '#pop'), + (r'"""', String.Double, ('#pop', 'tdqs')), + (r"'''", String.Single, ('#pop', 'tsqs')), + (r'"', String.Double, ('#pop', 'dqs')), + (r"'", String.Single, ('#pop', 'sqs')), + (r'R"""', String.Regex, ('#pop', 'tdqr')), + (r"R'''", String.Regex, ('#pop', 'tsqr')), + (r'R"', String.Regex, ('#pop', 'dqr')), + (r"R'", String.Regex, ('#pop', 'sqr')), + # Two-token keywords + (r'(extern)(%s+)(object\b)' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Keyword.Reserved)), + (r'(function|method)(%s*)(\()' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Punctuation), + ('#pop', 'block?', 'more/parameters', 'main/parameters')), + (r'(modify)(%s+)(grammar\b)' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Keyword.Reserved), + ('#pop', 'object-body/no-braces', ':', 'grammar')), + (r'(new)(%s+(?=(?:function|method)\b))' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'))), + (r'(object)(%s+)(template\b)' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Keyword.Reserved), ('#pop', 'template')), + (r'(string)(%s+)(template\b)' % _ws, + bygroups(Keyword, using(this, state='whitespace'), + Keyword.Reserved), ('#pop', 'function-name')), + # Keywords + (r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b', + Name.Builtin, '#pop'), + (r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')), + (r'(case|extern|if|intrinsic|return|static|while)\b', + Keyword.Reserved), + (r'catch\b', Keyword.Reserved, ('#pop', 'catch')), + (r'class\b', Keyword.Reserved, + ('#pop', 'object-body/no-braces', 'class')), + (r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'), + (r'(dictionary|property)\b', Keyword.Reserved, + ('#pop', 'constants')), + (r'enum\b', Keyword.Reserved, ('#pop', 'enum')), + (r'export\b', Keyword.Reserved, ('#pop', 'main')), + (r'(for|foreach)\b', Keyword.Reserved, + ('#pop', 'more/inner', 'main/inner')), + (r'(function|method)\b', Keyword.Reserved, + ('#pop', 'block?', 'function-name')), + (r'grammar\b', Keyword.Reserved, + ('#pop', 'object-body/no-braces', 'grammar')), + (r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')), + (r'local\b', Keyword.Reserved, + ('#pop', 'more/local', 'main/local')), + (r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved, + '#pop'), + (r'new\b', Keyword.Reserved, ('#pop', 'class')), + (r'(nil|true)\b', Keyword.Constant, '#pop'), + (r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')), + (r'operator\b', Keyword.Reserved, ('#pop', 'operator')), + (r'propertyset\b', Keyword.Reserved, + ('#pop', 'propertyset', 'main')), + (r'self\b', Name.Builtin.Pseudo, '#pop'), + (r'template\b', Keyword.Reserved, ('#pop', 'template')), + # Operators + (r'(__objref|defined)(%s*)(\()' % _ws, + bygroups(Operator.Word, using(this, state='whitespace'), + Operator), ('#pop', 'more/__objref', 'main')), + (r'delegated\b', Operator.Word), + # Compiler-defined macros and built-in properties + (r'(__DATE__|__DEBUG|__LINE__|__FILE__|' + r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|' + r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|' + r'construct|finalize|grammarInfo|grammarTag|lexicalParent|' + r'miscVocab|sourceTextGroup|sourceTextGroupName|' + r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop') + ], + 'main': [ + include('main/basic'), + (_name, Name, '#pop'), + default('#pop') + ], + 'more/basic': [ + (r'\(', Punctuation, ('more/list', 'main')), + (r'\[', Punctuation, ('more', 'main')), + (r'\.{3}', Punctuation), + (r'->|\.\.', Punctuation, 'main'), + (r'(?=;)|[:)\]]', Punctuation, '#pop'), + include('whitespace'), + (_operator, Operator, 'main'), + (r'\?', Operator, ('main', 'more/conditional', 'main')), + (r'(is|not)(%s+)(in\b)' % _ws, + bygroups(Operator.Word, using(this, state='whitespace'), + Operator.Word)), + (r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop + ], + 'more': [ + include('more/basic'), + default('#pop') + ], + # Then expression (conditional operator) + 'more/conditional': [ + (r':(?!:)', Operator, '#pop'), + include('more') + ], + # Embedded expressions + 'more/embed': [ + (r'>>', String.Interpol, '#pop:2'), + include('more') + ], + # For/foreach loop initializer or short-form anonymous function + 'main/inner': [ + (r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')), + (r'local\b', Keyword.Reserved, ('#pop', 'main/local')), + include('main') + ], + 'more/inner': [ + (r'\}', Punctuation, '#pop'), + (r',', Punctuation, 'main/inner'), + (r'(in|step)\b', Keyword, 'main/inner'), + include('more') + ], + # Local + 'main/local': [ + (_name, Name.Variable, '#pop'), + include('whitespace') + ], + 'more/local': [ + (r',', Punctuation, 'main/local'), + include('more') + ], + # List + 'more/list': [ + (r'[,:]', Punctuation, 'main'), + include('more') + ], + # Parameter list + 'main/parameters': [ + (r'(%s)(%s*)(?=:)' % (_name, _ws), + bygroups(Name.Variable, using(this, state='whitespace')), '#pop'), + (r'(%s)(%s+)(%s)' % (_name, _ws, _name), + bygroups(Name.Class, using(this, state='whitespace'), + Name.Variable), '#pop'), + (r'\[+', Punctuation), + include('main/basic'), + (_name, Name.Variable, '#pop'), + default('#pop') + ], + 'more/parameters': [ + (r'(:)(%s*(?=[?=,:)]))' % _ws, + bygroups(Punctuation, using(this, state='whitespace'))), + (r'[?\]]+', Punctuation), + (r'[:)]', Punctuation, ('#pop', 'multimethod?')), + (r',', Punctuation, 'main/parameters'), + (r'=', Punctuation, ('more/parameter', 'main')), + include('more') + ], + 'more/parameter': [ + (r'(?=[,)])', Text, '#pop'), + include('more') + ], + 'multimethod?': [ + (r'multimethod\b', Keyword, '#pop'), + include('whitespace'), + default('#pop') + ], + + # Statements and expressions + 'more/__objref': [ + (r',', Punctuation, 'mode'), + (r'\)', Operator, '#pop'), + include('more') + ], + 'mode': [ + (r'(error|warn)\b', Keyword, '#pop'), + include('whitespace') + ], + 'catch': [ + (r'\(+', Punctuation), + (_name, Name.Exception, ('#pop', 'variables')), + include('whitespace') + ], + 'enum': [ + include('whitespace'), + (r'token\b', Keyword, ('#pop', 'constants')), + default(('#pop', 'constants')) + ], + 'grammar': [ + (r'\)+', Punctuation), + (r'\(', Punctuation, 'grammar-tag'), + (r':', Punctuation, 'grammar-rules'), + (_name, Name.Class), + include('whitespace') + ], + 'grammar-tag': [ + include('whitespace'), + (r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|' + r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|' + r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|" + r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|" + r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|' + r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|" + r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop') + ], + 'grammar-rules': [ + include('string'), + include('whitespace'), + (r'(\[)(%s*)(badness)' % _ws, + bygroups(Punctuation, using(this, state='whitespace'), Keyword), + 'main'), + (r'->|%s|[()]' % _operator, Punctuation), + (_name, Name.Constant), + default('#pop:2') + ], + ':': [ + (r':', Punctuation, '#pop') + ], + 'function-name': [ + (r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol), + (r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'), + (_name, Name.Function, '#pop'), + include('whitespace') + ], + 'inherited': [ + (r'<', Punctuation, ('#pop', 'classes', 'class')), + include('whitespace'), + (_name, Name.Class, '#pop'), + default('#pop') + ], + 'operator': [ + (r'negate\b', Operator.Word, '#pop'), + include('whitespace'), + (_operator, Operator), + default('#pop') + ], + 'propertyset': [ + (r'\(', Punctuation, ('more/parameters', 'main/parameters')), + (r'\{', Punctuation, ('#pop', 'object-body')), + include('whitespace') + ], + 'template': [ + (r'(?=;)', Text, '#pop'), + include('string'), + (r'inherited\b', Keyword.Reserved), + include('whitespace'), + (r'->|\?|%s' % _operator, Punctuation), + (_name, Name.Variable) + ], + + # Identifiers + 'class': [ + (r'\*|\.{3}', Punctuation, '#pop'), + (r'object\b', Keyword.Reserved, '#pop'), + (r'transient\b', Keyword.Reserved), + (_name, Name.Class, '#pop'), + include('whitespace'), + default('#pop') + ], + 'classes': [ + (r'[:,]', Punctuation, 'class'), + include('whitespace'), + (r'>', Punctuation, '#pop'), + default('#pop') + ], + 'constants': [ + (r',+', Punctuation), + (r';', Punctuation, '#pop'), + (r'property\b', Keyword.Reserved), + (_name, Name.Constant), + include('whitespace') + ], + 'label': [ + (_name, Name.Label, '#pop'), + include('whitespace'), + default('#pop') + ], + 'variables': [ + (r',+', Punctuation), + (r'\)', Punctuation, '#pop'), + include('whitespace'), + (_name, Name.Variable) + ], + + # Whitespace and comments + 'whitespace': [ + (r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline), + Comment.Preproc), + (_comment_single, Comment.Single), + (_comment_multiline, Comment.Multiline), + (r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text) + ], + + # Strings + 'string': [ + (r'"""', String.Double, 'tdqs'), + (r"'''", String.Single, 'tsqs'), + (r'"', String.Double, 'dqs'), + (r"'", String.Single, 'sqs') + ], + 's/escape': [ + (r'\{\{|\}\}|%s' % _escape, String.Escape) + ], + 's/verbatim': [ + (r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|' + r'first\s+time|one\s+of|only|or|otherwise|' + r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|' + r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol), + (r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|' + r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape), + String.Interpol, ('block/embed', 'more/embed', 'main')) + ], + 's/entity': [ + (r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity) + ], + 'tdqs': _make_string_state(True, True), + 'tsqs': _make_string_state(True, False), + 'dqs': _make_string_state(False, True), + 'sqs': _make_string_state(False, False), + 'tdqs/listing': _make_string_state(True, True, 'listing'), + 'tsqs/listing': _make_string_state(True, False, 'listing'), + 'dqs/listing': _make_string_state(False, True, 'listing'), + 'sqs/listing': _make_string_state(False, False, 'listing'), + 'tdqs/xmp': _make_string_state(True, True, 'xmp'), + 'tsqs/xmp': _make_string_state(True, False, 'xmp'), + 'dqs/xmp': _make_string_state(False, True, 'xmp'), + 'sqs/xmp': _make_string_state(False, False, 'xmp'), + + # Tags + 'tdqt': _make_tag_state(True, True), + 'tsqt': _make_tag_state(True, False), + 'dqt': _make_tag_state(False, True), + 'sqt': _make_tag_state(False, False), + 'dqs/tdqt': _make_attribute_value_state(r'"', True, True), + 'dqs/tsqt': _make_attribute_value_state(r'"', True, False), + 'dqs/dqt': _make_attribute_value_state(r'"', False, True), + 'dqs/sqt': _make_attribute_value_state(r'"', False, False), + 'sqs/tdqt': _make_attribute_value_state(r"'", True, True), + 'sqs/tsqt': _make_attribute_value_state(r"'", True, False), + 'sqs/dqt': _make_attribute_value_state(r"'", False, True), + 'sqs/sqt': _make_attribute_value_state(r"'", False, False), + 'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True), + 'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False), + 'uqs/dqt': _make_attribute_value_state(_no_quote, False, True), + 'uqs/sqt': _make_attribute_value_state(_no_quote, False, False), + + # Regular expressions + 'tdqr': [ + (r'[^\\"]+', String.Regex), + (r'\\"*', String.Regex), + (r'"{3,}', String.Regex, '#pop'), + (r'"', String.Regex) + ], + 'tsqr': [ + (r"[^\\']+", String.Regex), + (r"\\'*", String.Regex), + (r"'{3,}", String.Regex, '#pop'), + (r"'", String.Regex) + ], + 'dqr': [ + (r'[^\\"]+', String.Regex), + (r'\\"?', String.Regex), + (r'"', String.Regex, '#pop') + ], + 'sqr': [ + (r"[^\\']+", String.Regex), + (r"\\'?", String.Regex), + (r"'", String.Regex, '#pop') + ] + } + + def get_tokens_unprocessed(self, text, **kwargs): + pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp) + if_false_level = 0 + for index, token, value in ( + RegexLexer.get_tokens_unprocessed(self, text, **kwargs)): + if if_false_level == 0: # Not in a false #if + if (token is Comment.Preproc and + re.match(r'%sif%s+(0|nil)%s*$\n?' % + (pp, self._ws_pp, self._ws_pp), value)): + if_false_level = 1 + else: # In a false #if + if token is Comment.Preproc: + if (if_false_level == 1 and + re.match(r'%sel(if|se)\b' % pp, value)): + if_false_level = 0 + elif re.match(r'%sif' % pp, value): + if_false_level += 1 + elif re.match(r'%sendif\b' % pp, value): + if_false_level -= 1 + else: + token = Comment + yield index, token, value diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/iolang.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/iolang.py new file mode 100644 index 0000000..0bf86f5 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/iolang.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.iolang + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Io language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number + +__all__ = ['IoLexer'] + + +class IoLexer(RegexLexer): + """ + For `Io `_ (a small, prototype-based + programming language) source. + + .. versionadded:: 0.10 + """ + name = 'Io' + filenames = ['*.io'] + aliases = ['io'] + mimetypes = ['text/x-iosrc'] + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + # Comments + (r'//(.*?)\n', Comment.Single), + (r'#(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'/\+', Comment.Multiline, 'nestedcomment'), + # DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"', String), + # Operators + (r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}', + Operator), + # keywords + (r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b', + Keyword), + # constants + (r'(nil|false|true)\b', Name.Constant), + # names + (r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b', + Name.Builtin), + ('[a-zA-Z_]\w*', Name), + # numbers + (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer) + ], + 'nestedcomment': [ + (r'[^+/]+', Comment.Multiline), + (r'/\+', Comment.Multiline, '#push'), + (r'\+/', Comment.Multiline, '#pop'), + (r'[+/]', Comment.Multiline), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/javascript.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/javascript.py new file mode 100644 index 0000000..404bcf9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/javascript.py @@ -0,0 +1,1199 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.javascript + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for JavaScript and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, using, this +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Other +from pygments.util import get_bool_opt, iteritems +import pygments.unistring as uni + +__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer', + 'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer', + 'CoffeeScriptLexer', 'MaskLexer'] + +JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + + ']|\\\\u[a-fA-F0-9]{4})') +JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', + 'Mn', 'Mc', 'Nd', 'Pc') + + u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})') +JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*' + + +class JavascriptLexer(RegexLexer): + """ + For JavaScript source code. + """ + + name = 'JavaScript' + aliases = ['js', 'javascript'] + filenames = ['*.js', ] + mimetypes = ['application/javascript', 'application/x-javascript', + 'text/x-javascript', 'text/javascript', ] + + flags = re.DOTALL | re.UNICODE | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r')?', Other, 'delimiters'), + (r'\s+', Other), + default(('delimiters', 'lassofile')), + ], + 'delimiters': [ + (r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'), + (r'\[noprocess\]', Comment.Preproc, 'noprocess'), + (r'\[', Comment.Preproc, 'squarebrackets'), + (r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'), + (r'<(!--.*?-->)?', Other), + (r'[^[<]+', Other), + ], + 'nosquarebrackets': [ + (r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'), + (r'<', Other), + (r'[^<]+', Other), + ], + 'noprocess': [ + (r'\[/noprocess\]', Comment.Preproc, '#pop'), + (r'\[', Other), + (r'[^[]', Other), + ], + 'squarebrackets': [ + (r'\]', Comment.Preproc, '#pop'), + include('lasso'), + ], + 'anglebrackets': [ + (r'\?>', Comment.Preproc, '#pop'), + include('lasso'), + ], + 'lassofile': [ + (r'\]|\?>', Comment.Preproc, '#pop'), + include('lasso'), + ], + 'whitespacecomments': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*\*!.*?\*/', String.Doc), + (r'/\*.*?\*/', Comment.Multiline), + ], + 'lasso': [ + # whitespace/comments + include('whitespacecomments'), + + # literals + (r'\d*\.\d+(e[+-]?\d+)?', Number.Float), + (r'0x[\da-f]+', Number.Hex), + (r'\d+', Number.Integer), + (r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)), + (r"'", String.Single, 'singlestring'), + (r'"', String.Double, 'doublestring'), + (r'`[^`]*`', String.Backtick), + + # names + (r'\$[a-z_][\w.]*', Name.Variable), + (r'#([a-z_][\w.]*|\d+)', Name.Variable.Instance), + (r"(\.)('[a-z_][\w.]*')", + bygroups(Name.Builtin.Pseudo, Name.Variable.Class)), + (r"(self)(\s*->\s*)('[a-z_][\w.]*')", + bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)), + (r'(\.\.?)([a-z_][\w.]*(=(?!=))?)', + bygroups(Name.Builtin.Pseudo, Name.Other.Member)), + (r'(->\\?\s*|&\s*)([a-z_][\w.]*(=(?!=))?)', + bygroups(Operator, Name.Other.Member)), + (r'(self|inherited)\b', Name.Builtin.Pseudo), + (r'-[a-z_][\w.]*', Name.Attribute), + (r'::\s*[a-z_][\w.]*', Name.Label), + (r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|' + r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|' + r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|' + r'Error_InvalidDatabase|Error_InvalidPassword|' + r'Error_InvalidUsername|Error_ModuleNotFound|' + r'Error_NoError|Error_NoPermission|Error_OutOfMemory|' + r'Error_ReqColumnMissing|Error_ReqFieldMissing|' + r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|' + r'Error_UpdateError)\b', Name.Exception), + + # definitions + (r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b', + bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)), + (r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%])', + bygroups(Keyword.Declaration, Text, Name.Class, Operator, + Name.Function), 'signature'), + (r'(define)(\s+)([a-z_][\w.]*)', + bygroups(Keyword.Declaration, Text, Name.Function), 'signature'), + (r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])' + r'(?=\s*\())', bygroups(Keyword, Text, Name.Function), + 'signature'), + (r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)', + bygroups(Keyword, Text, Name.Function)), + + # keywords + (r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant), + (r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration), + (r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|' + r'null|bytes|list|queue|set|stack|staticarray|tie)\b', Keyword.Type), + (r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)), + (r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)), + (r'require\b', Keyword, 'requiresection'), + (r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)), + (r'(/?)(Cache|Database_Names|Database_SchemaNames|' + r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|' + r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|' + r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|' + r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|' + r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|' + r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|' + r'NoProcess|Output_None|Portal|Private|Protect|Records|Referer|' + r'Referrer|Repeating|ResultSet|Rows|Search_Args|Search_Arguments|' + r'Select|Sort_Args|Sort_Arguments|Thread_Atomic|Value_List|While|' + r'Abort|Case|Else|If_Empty|If_False|If_Null|If_True|Loop_Abort|' + r'Loop_Continue|Loop_Count|Params|Params_Up|Return|Return_Value|' + r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|' + r'Tag_Name|ascending|average|by|define|descending|do|equals|' + r'frozen|group|handle_failure|import|in|into|join|let|match|max|' + r'min|on|order|parent|protected|provide|public|require|returnhome|' + r'skip|split_thread|sum|take|thread|to|trait|type|where|with|' + r'yield|yieldhome)\b', + bygroups(Punctuation, Keyword)), + + # other + (r',', Punctuation, 'commamember'), + (r'(and|or|not)\b', Operator.Word), + (r'([a-z_][\w.]*)(\s*::\s*[a-z_][\w.]*)?(\s*=(?!=))', + bygroups(Name, Name.Label, Operator)), + (r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)), + (r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b', + bygroups(Operator, Operator.Word)), + (r':=|[-+*/%=<>&|!?\\]+', Operator), + (r'[{}():;,@^]', Punctuation), + ], + 'singlestring': [ + (r"'", String.Single, '#pop'), + (r"[^'\\]+", String.Single), + include('escape'), + (r"\\", String.Single), + ], + 'doublestring': [ + (r'"', String.Double, '#pop'), + (r'[^"\\]+', String.Double), + include('escape'), + (r'\\', String.Double), + ], + 'escape': [ + (r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|' + r'[abefnrtv?"\'\\]|$)', String.Escape), + ], + 'signature': [ + (r'=>', Operator, '#pop'), + (r'\)', Punctuation, '#pop'), + (r'[(,]', Punctuation, 'parameter'), + include('lasso'), + ], + 'parameter': [ + (r'\)', Punctuation, '#pop'), + (r'-?[a-z_][\w.]*', Name.Attribute, '#pop'), + (r'\.\.\.', Name.Builtin.Pseudo), + include('lasso'), + ], + 'requiresection': [ + (r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'), + (r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name), + (r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'), + (r'::\s*[a-z_][\w.]*', Name.Label), + (r',', Punctuation), + include('whitespacecomments'), + ], + 'requiresignature': [ + (r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'), + (r'\)', Punctuation, '#pop:2'), + (r'-?[a-z_][\w.]*', Name.Attribute), + (r'::\s*[a-z_][\w.]*', Name.Label), + (r'\.\.\.', Name.Builtin.Pseudo), + (r'[(,]', Punctuation), + include('whitespacecomments'), + ], + 'commamember': [ + (r'(([a-z_][\w.]*=?|[-+*/%])' + r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))', + Name.Function, 'signature'), + include('whitespacecomments'), + default('#pop'), + ], + } + + def __init__(self, **options): + self.builtinshighlighting = get_bool_opt( + options, 'builtinshighlighting', True) + self.requiredelimiters = get_bool_opt( + options, 'requiredelimiters', False) + + self._builtins = set() + self._members = set() + if self.builtinshighlighting: + from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS + for key, value in iteritems(BUILTINS): + self._builtins.update(value) + for key, value in iteritems(MEMBERS): + self._members.update(value) + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + stack = ['root'] + if self.requiredelimiters: + stack.append('delimiters') + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text, stack): + if (token is Name.Other and value.lower() in self._builtins or + token is Name.Other.Member and + value.lower().rstrip('=') in self._members): + yield index, Name.Builtin, value + continue + yield index, token, value + + def analyse_text(text): + rv = 0.0 + if 'bin/lasso9' in text: + rv += 0.8 + if re.search(r'<\?lasso', text, re.I): + rv += 0.4 + if re.search(r'local\(', text, re.I): + rv += 0.4 + return rv + + +class ObjectiveJLexer(RegexLexer): + """ + For Objective-J source code with preprocessor directives. + + .. versionadded:: 1.3 + """ + + name = 'Objective-J' + aliases = ['objective-j', 'objectivej', 'obj-j', 'objj'] + filenames = ['*.j'] + mimetypes = ['text/x-objective-j'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*' + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'root': [ + include('whitespace'), + + # function definition + (r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)', + bygroups(using(this), using(this, state='function_signature'), + using(this))), + + # class definition + (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), + 'classname'), + (r'(@class|@protocol)(\s*)', bygroups(Keyword, Text), + 'forward_classname'), + (r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)), + + include('statements'), + ('[{()}]', Punctuation), + (';', Punctuation), + ], + 'whitespace': [ + (r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")', + bygroups(Comment.Preproc, Text, String.Double)), + (r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)', + bygroups(Comment.Preproc, Text, String.Double)), + (r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")', + bygroups(Comment.Preproc, Text, String.Double)), + (r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)', + bygroups(Comment.Preproc, Text, String.Double)), + + (r'#if\s+0', Comment.Preproc, 'if0'), + (r'#', Comment.Preproc, 'macro'), + + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'', Comment, '#pop'), + (r'[^\-]+|-', Comment), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/modeling.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/modeling.py new file mode 100644 index 0000000..1552fbf --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/modeling.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.modeling + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for modeling languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +from pygments.lexers.html import HtmlLexer +from pygments.lexers import _stan_builtins + +__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer'] + + +class ModelicaLexer(RegexLexer): + """ + For `Modelica `_ source code. + + .. versionadded:: 1.1 + """ + name = 'Modelica' + aliases = ['modelica'] + filenames = ['*.mo'] + mimetypes = ['text/x-modelica'] + + flags = re.DOTALL | re.MULTILINE + + _name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)" + + tokens = { + 'whitespace': [ + (u'[\\s\ufeff]+', Text), + (r'//[^\n]*\n?', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline) + ], + 'root': [ + include('whitespace'), + (r'"', String.Double, 'string'), + (r'[()\[\]{},;]+', Punctuation), + (r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator), + (r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float), + (r'\d+', Number.Integer), + (r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|' + r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|' + r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|' + r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|' + r'identity|inStream|integer|Integer|interval|inverse|isPresent|' + r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|' + r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|' + r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|' + r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|' + r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|' + r'transpose|vector|zeros)\b', Name.Builtin), + (r'(algorithm|annotation|break|connect|constant|constrainedby|der|' + r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' + r'equation|exit|expandable|extends|external|final|flow|for|if|' + r'import|impure|in|initial|inner|input|loop|nondiscrete|outer|' + r'output|parameter|partial|protected|public|pure|redeclare|' + r'replaceable|return|stream|then|when|while)\b', + Keyword.Reserved), + (r'(and|not|or)\b', Operator.Word), + (r'(block|class|connector|end|function|model|operator|package|' + r'record|type)\b', Keyword.Reserved, 'class'), + (r'(false|true)\b', Keyword.Constant), + (r'within\b', Keyword.Reserved, 'package-prefix'), + (_name, Name) + ], + 'class': [ + include('whitespace'), + (r'(function|record)\b', Keyword.Reserved), + (r'(if|for|when|while)\b', Keyword.Reserved, '#pop'), + (_name, Name.Class, '#pop'), + default('#pop') + ], + 'package-prefix': [ + include('whitespace'), + (_name, Name.Namespace, '#pop'), + default('#pop') + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'\\[\'"?\\abfnrtv]', String.Escape), + (r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))', + using(HtmlLexer)), + (r'<|\\?[^"\\<]+', String.Double) + ] + } + + +class BugsLexer(RegexLexer): + """ + Pygments Lexer for `OpenBugs `_ and WinBugs + models. + + .. versionadded:: 1.6 + """ + + name = 'BUGS' + aliases = ['bugs', 'winbugs', 'openbugs'] + filenames = ['*.bug'] + + _FUNCTIONS = ( + # Scalar functions + 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', + 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', + 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', + 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', + 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', + 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', + 'trunc', + # Vector functions + 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', + 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', + 'sd', 'sort', 'sum', + # Special + 'D', 'I', 'F', 'T', 'C') + """ OpenBUGS built-in functions + + From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII + + This also includes + + - T, C, I : Truncation and censoring. + ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. + - D : ODE + - F : Functional http://www.openbugs.info/Examples/Functionals.html + + """ + + _DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', + 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', + 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', + 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', + 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', + 'dmt', 'dwish') + """ OpenBUGS built-in distributions + + Functions from + http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI + """ + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'comments': [ + # Comments + (r'#.*$', Comment.Single), + ], + 'root': [ + # Comments + include('comments'), + include('whitespace'), + # Block start + (r'(model)(\s+)(\{)', + bygroups(Keyword.Namespace, Text, Punctuation)), + # Reserved Words + (r'(for|in)(?![\w.])', Keyword.Reserved), + # Built-in Functions + (r'(%s)(?=\s*\()' + % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), + Name.Builtin), + # Regular variable names + (r'[A-Za-z][\w.]*', Name), + # Number Literals + (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), + # Punctuation + (r'\[|\]|\(|\)|:|,|;', Punctuation), + # Assignment operators + # SLexer makes these tokens Operators. + (r'<-|~', Operator), + # Infix and prefix operators + (r'\+|-|\*|/', Operator), + # Block + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r"^\s*model\s*{", text, re.M): + return 0.7 + else: + return 0.0 + + +class JagsLexer(RegexLexer): + """ + Pygments Lexer for JAGS. + + .. versionadded:: 1.6 + """ + + name = 'JAGS' + aliases = ['jags'] + filenames = ['*.jag', '*.bug'] + + # JAGS + _FUNCTIONS = ( + 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', + 'cos', 'cosh', 'cloglog', + 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', + 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', + 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', + 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', + 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', + # Truncation/Censoring (should I include) + 'T', 'I') + # Distributions with density, probability and quartile functions + _DISTRIBUTIONS = tuple('[dpq]%s' % x for x in + ('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', + 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', + 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib')) + # Other distributions without density and probability + _OTHER_DISTRIBUTIONS = ( + 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', + 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', + 'dnbinom', 'dweibull', 'ddirich') + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'names': [ + # Regular variable names + (r'[a-zA-Z][\w.]*\b', Name), + ], + 'comments': [ + # do not use stateful comments + (r'(?s)/\*.*?\*/', Comment.Multiline), + # Comments + (r'#.*$', Comment.Single), + ], + 'root': [ + # Comments + include('comments'), + include('whitespace'), + # Block start + (r'(model|data)(\s+)(\{)', + bygroups(Keyword.Namespace, Text, Punctuation)), + (r'var(?![\w.])', Keyword.Declaration), + # Reserved Words + (r'(for|in)(?![\w.])', Keyword.Reserved), + # Builtins + # Need to use lookahead because . is a valid char + (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS + + _DISTRIBUTIONS + + _OTHER_DISTRIBUTIONS), + Name.Builtin), + # Names + include('names'), + # Number Literals + (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), + (r'\[|\]|\(|\)|:|,|;', Punctuation), + # Assignment operators + (r'<-|~', Operator), + # # JAGS includes many more than OpenBUGS + (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r'^\s*model\s*\{', text, re.M): + if re.search(r'^\s*data\s*\{', text, re.M): + return 0.9 + elif re.search(r'^\s*var', text, re.M): + return 0.9 + else: + return 0.3 + else: + return 0 + + +class StanLexer(RegexLexer): + """Pygments Lexer for Stan models. + + The Stan modeling language is specified in the *Stan Modeling Language + User's Guide and Reference Manual, v2.4.0*, + `pdf `__. + + .. versionadded:: 1.6 + """ + + name = 'Stan' + aliases = ['stan'] + filenames = ['*.stan'] + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'comments': [ + (r'(?s)/\*.*?\*/', Comment.Multiline), + # Comments + (r'(//|#).*$', Comment.Single), + ], + 'root': [ + # Stan is more restrictive on strings than this regex + (r'"[^"]*"', String), + # Comments + include('comments'), + # block start + include('whitespace'), + # Block start + (r'(%s)(\s*)(\{)' % + r'|'.join(('functions', 'data', r'transformed\s+?data', + 'parameters', r'transformed\s+parameters', + 'model', r'generated\s+quantities')), + bygroups(Keyword.Namespace, Text, Punctuation)), + # Reserved Words + (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), + # Truncation + (r'T(?=\s*\[)', Keyword), + # Data types + (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), + # Punctuation + (r"[;:,\[\]()]", Punctuation), + # Builtin + (r'(%s)(?=\s*\()' + % r'|'.join(_stan_builtins.FUNCTIONS + + _stan_builtins.DISTRIBUTIONS), + Name.Builtin), + # Special names ending in __, like lp__ + (r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo), + (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), + # Regular variable names + (r'[A-Za-z]\w*\b', Name), + # Real Literals + (r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float), + (r'-?[0-9]*\.[0-9]*', Number.Float), + # Integer Literals + (r'-?[0-9]+', Number.Integer), + # Assignment operators + # SLexer makes these tokens Operators. + (r'<-|~', Operator), + # Infix, prefix and postfix operators (and = ) + (r"\+|-|\.?\*|\.?/|\\|'|\^|==?|!=?|<=?|>=?|\|\||&&", Operator), + # Block delimiters + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r'^\s*parameters\s*\{', text, re.M): + return 1.0 + else: + return 0.0 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/nimrod.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/nimrod.py new file mode 100644 index 0000000..de2eafb --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/nimrod.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.nimrod + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Nimrod language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['NimrodLexer'] + + +class NimrodLexer(RegexLexer): + """ + For `Nimrod `_ source code. + + .. versionadded:: 1.5 + """ + + name = 'Nimrod' + aliases = ['nimrod', 'nim'] + filenames = ['*.nim', '*.nimrod'] + mimetypes = ['text/x-nimrod'] + + flags = re.MULTILINE | re.IGNORECASE | re.UNICODE + + def underscorize(words): + newWords = [] + new = "" + for word in words: + for ch in word: + new += (ch + "_?") + newWords.append(new) + new = "" + return "|".join(newWords) + + keywords = [ + 'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break', + 'case', 'cast', 'const', 'continue', 'converter', 'discard', + 'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally', + 'for', 'generic', 'if', 'implies', 'in', 'yield', + 'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method', + 'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc', + 'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try', + 'tuple', 'type', 'when', 'while', 'with', 'without', 'xor' + ] + + keywordsPseudo = [ + 'nil', 'true', 'false' + ] + + opWords = [ + 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in', + 'notin', 'is', 'isnot' + ] + + types = [ + 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64', + 'bool', 'char', 'range', 'array', 'seq', 'set', 'string' + ] + + tokens = { + 'root': [ + (r'##.*$', String.Doc), + (r'#.*$', Comment), + (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator), + (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;', + Punctuation), + + # Strings + (r'(?:[\w]+)"', String, 'rdqs'), + (r'"""', String, 'tdqs'), + ('"', String, 'dqs'), + + # Char + ("'", String.Char, 'chars'), + + # Keywords + (r'(%s)\b' % underscorize(opWords), Operator.Word), + (r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'), + (r'(%s)\b' % underscorize(keywords), Keyword), + (r'(%s)\b' % underscorize(['from', 'import', 'include']), + Keyword.Namespace), + (r'(v_?a_?r)\b', Keyword.Declaration), + (r'(%s)\b' % underscorize(types), Keyword.Type), + (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), + # Identifiers + (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), + # Numbers + (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))', + Number.Float, ('float-suffix', 'float-number')), + (r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'), + (r'0b[01][01_]*', Number.Bin, 'int-suffix'), + (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), + (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), + # Whitespace + (r'\s+', Text), + (r'.+$', Error), + ], + 'chars': [ + (r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape), + (r"'", String.Char, '#pop'), + (r".", String.Char) + ], + 'strings': [ + (r'(?`_ source. + + .. versionadded:: 2.0 + """ + + name = 'Nit' + aliases = ['nit'] + filenames = ['*.nit'] + tokens = { + 'root': [ + (r'#.*?$', Comment.Single), + (words(( + 'package', 'module', 'import', 'class', 'abstract', 'interface', + 'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef', + 'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern', + 'public', 'protected', 'private', 'intrude', 'if', 'then', + 'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not', + 'implies', 'return', 'continue', 'break', 'abort', 'assert', + 'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable', + 'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'), + Keyword), + (r'[A-Z]\w*', Name.Class), + (r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string + (r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|' + r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt + (r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string + (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string + (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string + (r'"(\\.|([^"}{\\]))*"', String), # Simple String + (r'"(\\.|([^"}{\\]))*\{', String), # Start string + (r'\}(\\.|([^"}{\\]))*\{', String), # Mid String + (r'\}(\\.|([^"}{\\]))*"', String), # End String + (r'(\'[^\'\\]\')|(\'\\.\')', String.Char), + (r'[0-9]+', Number.Integer), + (r'[0-9]*.[0-9]+', Number.Float), + (r'0(x|X)[0-9A-Fa-f]+', Number.Hex), + (r'[a-z]\w*', Name), + (r'_\w+', Name.Variable.Instance), + (r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator), + (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation), + (r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit + (r'[\r\n\t ]+', Text), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/nix.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/nix.py new file mode 100644 index 0000000..1bf533d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/nix.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.nix + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the NixOS Nix language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal + +__all__ = ['NixLexer'] + + +class NixLexer(RegexLexer): + """ + For the `Nix language `_. + + .. versionadded:: 2.0 + """ + + name = 'Nix' + aliases = ['nixos', 'nix'] + filenames = ['*.nix'] + mimetypes = ['text/x-nix'] + + flags = re.MULTILINE | re.UNICODE + + keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if', + 'else', 'then', '...'] + builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins', + 'map', 'removeAttrs', 'throw', 'toString', 'derivation'] + operators = ['++', '+', '?', '.', '!', '//', '==', + '!=', '&&', '||', '->', '='] + + punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"] + + tokens = { + 'root': [ + # comments starting with # + (r'#.*$', Comment.Single), + + # multiline comments + (r'/\*', Comment.Multiline, 'comment'), + + # whitespace + (r'\s+', Text), + + # keywords + ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword), + + # highlight the builtins + ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins), + Name.Builtin), + + (r'\b(true|false|null)\b', Name.Constant), + + # operators + ('(%s)' % '|'.join(re.escape(entry) for entry in operators), + Operator), + + # word operators + (r'\b(or|and)\b', Operator.Word), + + # punctuations + ('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation), + + # integers + (r'[0-9]+', Number.Integer), + + # strings + (r'"', String.Double, 'doublequote'), + (r"''", String.Single, 'singlequote'), + + # paths + (r'[\w.+-]*(\/[\w.+-]+)+', Literal), + (r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal), + + # urls + (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal), + + # names of variables + (r'[\w-]+\s*=', String.Symbol), + (r'[a-zA-Z_][\w\'-]*', Text), + + ], + 'comment': [ + (r'[^/*]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'singlequote': [ + (r"'''", String.Escape), + (r"''\$\{", String.Escape), + (r"''\n", String.Escape), + (r"''\r", String.Escape), + (r"''\t", String.Escape), + (r"''", String.Single, '#pop'), + (r'\$\{', String.Interpol, 'antiquote'), + (r"[^']", String.Single), + ], + 'doublequote': [ + (r'\\', String.Escape), + (r'\\"', String.Escape), + (r'\\$\{', String.Escape), + (r'"', String.Double, '#pop'), + (r'\$\{', String.Interpol, 'antiquote'), + (r'[^"]', String.Double), + ], + 'antiquote': [ + (r"\}", String.Interpol, '#pop'), + # TODO: we should probably escape also here ''${ \${ + (r"\$\{", String.Interpol, '#push'), + include('root'), + ], + } + + def analyse_text(text): + rv = 0.0 + # TODO: let/in + if re.search(r'import.+?<[^>]+>', text): + rv += 0.4 + if re.search(r'mkDerivation\s+(\(|\{|rec)', text): + rv += 0.4 + if re.search(r'=\s+mkIf\s+', text): + rv += 0.4 + if re.search(r'\{[a-zA-Z,\s]+\}:', text): + rv += 0.1 + return rv diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/objective.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/objective.py new file mode 100644 index 0000000..4e0ecf0 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/objective.py @@ -0,0 +1,487 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.objective + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Objective-C family languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, this, words, \ + inherit, default +from pygments.token import Text, Keyword, Name, String, Operator, \ + Number, Punctuation, Literal, Comment + +from pygments.lexers.c_cpp import CLexer, CppLexer + +__all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer'] + + +def objective(baselexer): + """ + Generate a subclass of baselexer that accepts the Objective-C syntax + extensions. + """ + + # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, + # since that's quite common in ordinary C/C++ files. It's OK to match + # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. + # + # The upshot of this is that we CANNOT match @class or @interface + _oc_keywords = re.compile(r'@(?:end|implementation|protocol)') + + # Matches [ ? identifier ( identifier ? ] | identifier? : ) + # (note the identifier is *optional* when there is a ':'!) + _oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+' + r'(?:[a-zA-Z_]\w*\s*\]|' + r'(?:[a-zA-Z_]\w*)?:)') + + class GeneratedObjectiveCVariant(baselexer): + """ + Implements Objective-C syntax on top of an existing C family lexer. + """ + + tokens = { + 'statements': [ + (r'@"', String, 'string'), + (r'@(YES|NO)', Number), + (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex), + (r'@0[0-7]+[Ll]?', Number.Oct), + (r'@\d+[Ll]?', Number.Integer), + (r'@\(', Literal, 'literal_number'), + (r'@\[', Literal, 'literal_array'), + (r'@\{', Literal, 'literal_dictionary'), + (words(( + '@selector', '@private', '@protected', '@public', '@encode', + '@synchronized', '@try', '@throw', '@catch', '@finally', + '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer', + '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong', + 'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic', + 'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in', + 'out', 'inout', 'release', 'class', '@dynamic', '@optional', + '@required', '@autoreleasepool'), suffix=r'\b'), + Keyword), + (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL', + 'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'), + Keyword.Type), + (r'@(true|false|YES|NO)\n', Name.Builtin), + (r'(YES|NO|nil|self|super)\b', Name.Builtin), + # Carbon types + (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type), + # Carbon built-ins + (r'(TRUE|FALSE)\b', Name.Builtin), + (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), + ('#pop', 'oc_classname')), + (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text), + ('#pop', 'oc_forward_classname')), + # @ can also prefix other expressions like @{...} or @(...) + (r'@', Punctuation), + inherit, + ], + 'oc_classname': [ + # interface definition that inherits + ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)', + bygroups(Name.Class, Text, Name.Class, Text, Punctuation), + ('#pop', 'oc_ivars')), + ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', + bygroups(Name.Class, Text, Name.Class), '#pop'), + # interface definition for a category + ('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)', + bygroups(Name.Class, Text, Name.Label, Text, Punctuation), + ('#pop', 'oc_ivars')), + ('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))', + bygroups(Name.Class, Text, Name.Label), '#pop'), + # simple interface / implementation + ('([a-zA-Z$_][\w$]*)(\s*)(\{)', + bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')), + ('([a-zA-Z$_][\w$]*)', Name.Class, '#pop') + ], + 'oc_forward_classname': [ + ('([a-zA-Z$_][\w$]*)(\s*,\s*)', + bygroups(Name.Class, Text), 'oc_forward_classname'), + ('([a-zA-Z$_][\w$]*)(\s*;?)', + bygroups(Name.Class, Text), '#pop') + ], + 'oc_ivars': [ + include('whitespace'), + include('statements'), + (';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'root': [ + # methods + (r'^([-+])(\s*)' # method marker + r'(\(.*?\))?(\s*)' # return type + r'([a-zA-Z$_][\w$]*:?)', # begin of method name + bygroups(Punctuation, Text, using(this), + Text, Name.Function), + 'method'), + inherit, + ], + 'method': [ + include('whitespace'), + # TODO unsure if ellipses are allowed elsewhere, see + # discussion in Issue 789 + (r',', Punctuation), + (r'\.\.\.', Punctuation), + (r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)', + bygroups(using(this), Text, Name.Variable)), + (r'[a-zA-Z$_][\w$]*:', Name.Function), + (';', Punctuation, '#pop'), + (r'\{', Punctuation, 'function'), + default('#pop'), + ], + 'literal_number': [ + (r'\(', Punctuation, 'literal_number_inner'), + (r'\)', Literal, '#pop'), + include('statement'), + ], + 'literal_number_inner': [ + (r'\(', Punctuation, '#push'), + (r'\)', Punctuation, '#pop'), + include('statement'), + ], + 'literal_array': [ + (r'\[', Punctuation, 'literal_array_inner'), + (r'\]', Literal, '#pop'), + include('statement'), + ], + 'literal_array_inner': [ + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), + include('statement'), + ], + 'literal_dictionary': [ + (r'\}', Literal, '#pop'), + include('statement'), + ], + } + + def analyse_text(text): + if _oc_keywords.search(text): + return 1.0 + elif '@"' in text: # strings + return 0.8 + elif re.search('@[0-9]+', text): + return 0.7 + elif _oc_message.search(text): + return 0.8 + return 0 + + def get_tokens_unprocessed(self, text): + from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ + COCOA_PROTOCOLS, COCOA_PRIMITIVES + + for index, token, value in \ + baselexer.get_tokens_unprocessed(self, text): + if token is Name or token is Name.Class: + if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ + or value in COCOA_PRIMITIVES: + token = Name.Builtin.Pseudo + + yield index, token, value + + return GeneratedObjectiveCVariant + + +class ObjectiveCLexer(objective(CLexer)): + """ + For Objective-C source code with preprocessor directives. + """ + + name = 'Objective-C' + aliases = ['objective-c', 'objectivec', 'obj-c', 'objc'] + filenames = ['*.m', '*.h'] + mimetypes = ['text/x-objective-c'] + priority = 0.05 # Lower than C + + +class ObjectiveCppLexer(objective(CppLexer)): + """ + For Objective-C++ source code with preprocessor directives. + """ + + name = 'Objective-C++' + aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++'] + filenames = ['*.mm', '*.hh'] + mimetypes = ['text/x-objective-c++'] + priority = 0.05 # Lower than C++ + + +class LogosLexer(ObjectiveCppLexer): + """ + For Logos + Objective-C source code with preprocessor directives. + + .. versionadded:: 1.6 + """ + + name = 'Logos' + aliases = ['logos'] + filenames = ['*.x', '*.xi', '*.xm', '*.xmi'] + mimetypes = ['text/x-logos'] + priority = 0.25 + + tokens = { + 'statements': [ + (r'(%orig|%log)\b', Keyword), + (r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))', + bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), + (r'(%init)\b(\()', + bygroups(Keyword, Punctuation), 'logos_init_directive'), + (r'(%init)(?=\s*;)', bygroups(Keyword)), + (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', + bygroups(Keyword, Text, Name.Class), '#pop'), + (r'(%subclass)(\s+)', bygroups(Keyword, Text), + ('#pop', 'logos_classname')), + inherit, + ], + 'logos_init_directive': [ + ('\s+', Text), + (',', Punctuation, ('logos_init_directive', '#pop')), + ('([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)', + bygroups(Name.Class, Text, Punctuation, Text, Text)), + ('([a-zA-Z$_][\w$]*)', Name.Class), + ('\)', Punctuation, '#pop'), + ], + 'logos_classname': [ + ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', + bygroups(Name.Class, Text, Name.Class), '#pop'), + ('([a-zA-Z$_][\w$]*)', Name.Class, '#pop') + ], + 'root': [ + (r'(%subclass)(\s+)', bygroups(Keyword, Text), + 'logos_classname'), + (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', + bygroups(Keyword, Text, Name.Class)), + (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', + bygroups(Keyword, Text, Name.Variable, Text, String, Text)), + (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation), + 'function'), + (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', + bygroups(Keyword, Text, Keyword, String, Keyword)), + (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), + inherit, + ], + } + + _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()') + + def analyse_text(text): + if LogosLexer._logos_keywords.search(text): + return 1.0 + return 0 + +class SwiftLexer(RegexLexer): + """ + For `Swift `_ source. + + .. versionadded:: 2.0 + """ + name = 'Swift' + filenames = ['*.swift'] + aliases = ['swift'] + mimetypes = ['text/x-swift'] + + tokens = { + 'root': [ + # Whitespace and Comments + (r'\n', Text), + (r'\s+', Text), + (r'//', Comment.Single, 'comment-single'), + (r'/\*', Comment.Multiline, 'comment-multi'), + (r'#(if|elseif|else|endif)\b', Comment.Preproc, 'preproc'), + + # Keywords + include('keywords'), + + # Global Types + (r'(Array|AutoreleasingUnsafeMutablePointer|BidirectionalReverseView' + r'|Bit|Bool|CFunctionPointer|COpaquePointer|CVaListPointer' + r'|Character|ClosedInterval|CollectionOfOne|ContiguousArray' + r'|Dictionary|DictionaryGenerator|DictionaryIndex|Double' + r'|EmptyCollection|EmptyGenerator|EnumerateGenerator' + r'|EnumerateSequence|FilterCollectionView' + r'|FilterCollectionViewIndex|FilterGenerator|FilterSequenceView' + r'|Float|Float80|FloatingPointClassification|GeneratorOf' + r'|GeneratorOfOne|GeneratorSequence|HalfOpenInterval|HeapBuffer' + r'|HeapBufferStorage|ImplicitlyUnwrappedOptional|IndexingGenerator' + r'|Int|Int16|Int32|Int64|Int8|LazyBidirectionalCollection' + r'|LazyForwardCollection|LazyRandomAccessCollection' + r'|LazySequence|MapCollectionView|MapSequenceGenerator' + r'|MapSequenceView|MirrorDisposition|ObjectIdentifier|OnHeap' + r'|Optional|PermutationGenerator|QuickLookObject' + r'|RandomAccessReverseView|Range|RangeGenerator|RawByte|Repeat' + r'|ReverseBidirectionalIndex|ReverseRandomAccessIndex|SequenceOf' + r'|SinkOf|Slice|StaticString|StrideThrough|StrideThroughGenerator' + r'|StrideTo|StrideToGenerator|String|UInt|UInt16|UInt32|UInt64' + r'|UInt8|UTF16|UTF32|UTF8|UnicodeDecodingResult|UnicodeScalar' + r'|Unmanaged|UnsafeBufferPointer|UnsafeBufferPointerGenerator' + r'|UnsafeMutableBufferPointer|UnsafeMutablePointer|UnsafePointer' + r'|Zip2|ZipGenerator2' + # Protocols + r'|AbsoluteValuable|AnyObject|ArrayLiteralConvertible' + r'|BidirectionalIndexType|BitwiseOperationsType' + r'|BooleanLiteralConvertible|BooleanType|CVarArgType' + r'|CollectionType|Comparable|DebugPrintable' + r'|DictionaryLiteralConvertible|Equatable' + r'|ExtendedGraphemeClusterLiteralConvertible' + r'|ExtensibleCollectionType|FloatLiteralConvertible' + r'|FloatingPointType|ForwardIndexType|GeneratorType|Hashable' + r'|IntegerArithmeticType|IntegerLiteralConvertible|IntegerType' + r'|IntervalType|MirrorType|MutableCollectionType|MutableSliceable' + r'|NilLiteralConvertible|OutputStreamType|Printable' + r'|RandomAccessIndexType|RangeReplaceableCollectionType' + r'|RawOptionSetType|RawRepresentable|Reflectable|SequenceType' + r'|SignedIntegerType|SignedNumberType|SinkType|Sliceable' + r'|Streamable|Strideable|StringInterpolationConvertible' + r'|StringLiteralConvertible|UnicodeCodecType' + r'|UnicodeScalarLiteralConvertible|UnsignedIntegerType' + r'|_ArrayBufferType|_BidirectionalIndexType|_CocoaStringType' + r'|_CollectionType|_Comparable|_ExtensibleCollectionType' + r'|_ForwardIndexType|_Incrementable|_IntegerArithmeticType' + r'|_IntegerType|_ObjectiveCBridgeable|_RandomAccessIndexType' + r'|_RawOptionSetType|_SequenceType|_Sequence_Type' + r'|_SignedIntegerType|_SignedNumberType|_Sliceable|_Strideable' + r'|_SwiftNSArrayRequiredOverridesType|_SwiftNSArrayType' + r'|_SwiftNSCopyingType|_SwiftNSDictionaryRequiredOverridesType' + r'|_SwiftNSDictionaryType|_SwiftNSEnumeratorType' + r'|_SwiftNSFastEnumerationType|_SwiftNSStringRequiredOverridesType' + r'|_SwiftNSStringType|_UnsignedIntegerType' + # Variables + r'|C_ARGC|C_ARGV|Process' + # Typealiases + r'|Any|AnyClass|BooleanLiteralType|CBool|CChar|CChar16|CChar32' + r'|CDouble|CFloat|CInt|CLong|CLongLong|CShort|CSignedChar' + r'|CUnsignedInt|CUnsignedLong|CUnsignedShort|CWideChar' + r'|ExtendedGraphemeClusterType|Float32|Float64|FloatLiteralType' + r'|IntMax|IntegerLiteralType|StringLiteralType|UIntMax|UWord' + r'|UnicodeScalarType|Void|Word' + # Foundation/Cocoa + r'|NSErrorPointer|NSObjectProtocol|Selector)\b', Name.Builtin), + # Functions + (r'(abs|advance|alignof|alignofValue|assert|assertionFailure' + r'|contains|count|countElements|debugPrint|debugPrintln|distance' + r'|dropFirst|dropLast|dump|enumerate|equal|extend|fatalError' + r'|filter|find|first|getVaList|indices|insert|isEmpty|join|last' + r'|lazy|lexicographicalCompare|map|max|maxElement|min|minElement' + r'|numericCast|overlaps|partition|precondition|preconditionFailure' + r'|prefix|print|println|reduce|reflect|removeAll|removeAtIndex' + r'|removeLast|removeRange|reverse|sizeof|sizeofValue|sort|sorted' + r'|splice|split|startsWith|stride|strideof|strideofValue|suffix' + r'|swap|toDebugString|toString|transcode|underestimateCount' + r'|unsafeAddressOf|unsafeBitCast|unsafeDowncast' + r'|withExtendedLifetime|withUnsafeMutablePointer' + r'|withUnsafeMutablePointers|withUnsafePointer|withUnsafePointers' + r'|withVaList)\b', Name.Builtin.Pseudo), + + # Implicit Block Variables + (r'\$\d+', Name.Variable), + + # Binary Literal + (r'0b[01_]+', Number.Bin), + # Octal Literal + (r'0o[0-7_]+', Number.Oct), + # Hexadecimal Literal + (r'0x[0-9a-fA-F_]+', Number.Hex), + # Decimal Literal + (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float), + (r'[0-9][0-9_]*', Number.Integer), + # String Literal + (r'"', String, 'string'), + + # Operators and Punctuation + (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation), + (r'[/=\-+!*%<>&|^?~]+', Operator), + + # Identifier + (r'[a-zA-Z_]\w*', Name) + ], + 'keywords': [ + (r'(break|case|continue|default|do|else|fallthrough|for|if|in' + r'|return|switch|where|while)\b', Keyword), + (r'@availability\([^)]+\)', Keyword.Reserved), + (r'(associativity|convenience|dynamic|didSet|final|get|infix|inout' + r'|lazy|left|mutating|none|nonmutating|optional|override|postfix' + r'|precedence|prefix|Protocol|required|right|set|Type|unowned|weak' + r'|willSet|@(availability|autoclosure|noreturn' + r'|NSApplicationMain|NSCopying|NSManaged|objc' + r'|UIApplicationMain|IBAction|IBDesignable|IBInspectable' + r'|IBOutlet))\b',Keyword.Reserved), + (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__' + r'|__FILE__|__FUNCTION__|__LINE__|_)\b', Keyword.Constant), + (r'import\b', Keyword.Declaration, 'module'), + (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Declaration, Text, Name.Class)), + (r'(func)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Declaration, Text, Name.Function)), + (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, + Text, Name.Variable)), + (r'(class|deinit|enum|extension|func|import|init|internal|let' + r'|operator|private|protocol|public|static|struct|subscript' + r'|typealias|var)\b', Keyword.Declaration) + ], + 'comment': [ + (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):', + Comment.Special) + ], + + # Nested + 'comment-single': [ + (r'\n', Text, '#pop'), + include('comment'), + (r'[^\n]', Comment.Single) + ], + 'comment-multi': [ + include('comment'), + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'module': [ + (r'\n', Text, '#pop'), + (r'[a-zA-Z_]\w*', Name.Class), + include('root') + ], + 'preproc': [ + (r'\n', Text, '#pop'), + include('keywords'), + (r'[A-Za-z]\w*', Comment.Preproc), + include('root') + ], + 'string': [ + (r'\\\(', String.Interpol, 'string-intp'), + (r'"', String, '#pop'), + (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), + (r'[^\\"]+', String), + (r'\\', String) + ], + 'string-intp': [ + (r'\(', String.Interpol, '#push'), + (r'\)', String.Interpol, '#pop'), + include('root') + ] + } + + def get_tokens_unprocessed(self, text): + from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ + COCOA_PROTOCOLS, COCOA_PRIMITIVES + + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name or token is Name.Class: + if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ + or value in COCOA_PRIMITIVES: + token = Name.Builtin.Pseudo + + yield index, token, value diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ooc.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ooc.py new file mode 100644 index 0000000..b58d347 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ooc.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ooc + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the Ooc language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['OocLexer'] + + +class OocLexer(RegexLexer): + """ + For `Ooc `_ source code + + .. versionadded:: 1.2 + """ + name = 'Ooc' + aliases = ['ooc'] + filenames = ['*.ooc'] + mimetypes = ['text/x-ooc'] + + tokens = { + 'root': [ + (words(( + 'class', 'interface', 'implement', 'abstract', 'extends', 'from', + 'this', 'super', 'new', 'const', 'final', 'static', 'import', + 'use', 'extern', 'inline', 'proto', 'break', 'continue', + 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do', + 'switch', 'case', 'as', 'in', 'version', 'return', 'true', + 'false', 'null'), prefix=r'\b', suffix=r'\b'), + Keyword), + (r'include\b', Keyword, 'include'), + (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)', + bygroups(Keyword, Text, Keyword, Text, Name.Class)), + (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)', + bygroups(Keyword, Text, Name.Function)), + (r'\bfunc\b', Keyword), + # Note: %= and ^= not listed on http://ooc-lang.org/syntax + (r'//.*', Comment), + (r'(?s)/\*.*?\*/', Comment.Multiline), + (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' + r'&&?|\|\|?|\^=?)', Operator), + (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, + Name.Function)), + (r'[A-Z][A-Z0-9_]+', Name.Constant), + (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class), + + (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()', + bygroups(Name.Function, Text)), + (r'[a-z]\w*', Name.Variable), + + # : introduces types + (r'[:(){}\[\];,]', Punctuation), + + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'0c[0-9]+', Number.Oct), + (r'0b[01]+', Number.Bin), + (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), + (r'[0-9_]+', Number.Decimal), + + (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"', + String.Double), + (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", + String.Char), + (r'@', Punctuation), # pointer dereference + (r'\.', Punctuation), # imports or chain operator + + (r'\\[ \t\n]', Text), + (r'[ \t]+', Text), + ], + 'include': [ + (r'[\w/]+', Name), + (r',', Punctuation), + (r'[ \t]', Text), + (r'[;\n]', Text, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/other.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/other.py new file mode 100644 index 0000000..cde764a --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/other.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.other + ~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer +from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ + TcshLexer +from pygments.lexers.robotframework import RobotFrameworkLexer +from pygments.lexers.testing import GherkinLexer +from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer +from pygments.lexers.prolog import LogtalkLexer +from pygments.lexers.snobol import SnobolLexer +from pygments.lexers.rebol import RebolLexer +from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer +from pygments.lexers.modeling import ModelicaLexer +from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \ + HybrisLexer +from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \ + AsymptoteLexer, PovrayLexer +from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \ + GoodDataCLLexer, MaqlLexer +from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer +from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \ + MscgenLexer, VGLLexer +from pygments.lexers.basic import CbmBasicV2Lexer +from pygments.lexers.pawn import SourcePawnLexer, PawnLexer +from pygments.lexers.ecl import ECLLexer +from pygments.lexers.urbi import UrbiscriptLexer +from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer +from pygments.lexers.installers import NSISLexer, RPMSpecLexer +from pygments.lexers.textedit import AwkLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/parsers.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/parsers.py similarity index 68% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/parsers.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/parsers.py index c1ad710..91add67 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/parsers.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/parsers.py @@ -5,7 +5,7 @@ Lexers for parser generators. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -14,22 +14,24 @@ import re from pygments.lexer import RegexLexer, DelegatingLexer, \ include, bygroups, using from pygments.token import Punctuation, Other, Text, Comment, Operator, \ - Keyword, Name, String, Number, Whitespace -from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \ - ObjectiveCLexer, DLexer + Keyword, Name, String, Number, Whitespace +from pygments.lexers.jvm import JavaLexer +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers.objective import ObjectiveCLexer +from pygments.lexers.d import DLexer from pygments.lexers.dotnet import CSharpLexer -from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer -from pygments.lexers.web import ActionScriptLexer - +from pygments.lexers.ruby import RubyLexer +from pygments.lexers.python import PythonLexer +from pygments.lexers.perl import PerlLexer __all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer', 'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer', 'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer', 'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer', - #'AntlrCLexer', + # 'AntlrCLexer', 'AntlrCSharpLexer', 'AntlrObjectiveCLexer', - 'AntlrJavaLexer', "AntlrActionScriptLexer", - 'TreetopLexer'] + 'AntlrJavaLexer', 'AntlrActionScriptLexer', + 'TreetopLexer', 'EbnfLexer'] class RagelLexer(RegexLexer): @@ -38,7 +40,7 @@ class RagelLexer(RegexLexer): fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead (or one of the language-specific subclasses). - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel' @@ -63,29 +65,29 @@ class RagelLexer(RegexLexer): (r'[+-]?[0-9]+', Number.Integer), ], 'literals': [ - (r'"(\\\\|\\"|[^"])*"', String), # double quote string - (r"'(\\\\|\\'|[^'])*'", String), # single quote string - (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals - (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions + (r'"(\\\\|\\"|[^"])*"', String), # double quote string + (r"'(\\\\|\\'|[^'])*'", String), # single quote string + (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals + (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions ], 'identifiers': [ - (r'[a-zA-Z_][a-zA-Z_0-9]*', Name.Variable), + (r'[a-zA-Z_]\w*', Name.Variable), ], 'operators': [ - (r',', Operator), # Join - (r'\||&|--?', Operator), # Union, Intersection and Subtraction - (r'\.|<:|:>>?', Operator), # Concatention - (r':', Operator), # Label - (r'->', Operator), # Epsilon Transition - (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions - (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions - (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions - (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions - (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions - (r'>|@|\$|%', Operator), # Transition Actions and Priorities - (r'\*|\?|\+|{[0-9]*,[0-9]*}', Operator), # Repetition - (r'!|\^', Operator), # Negation - (r'\(|\)', Operator), # Grouping + (r',', Operator), # Join + (r'\||&|--?', Operator), # Union, Intersection and Subtraction + (r'\.|<:|:>>?', Operator), # Concatention + (r':', Operator), # Label + (r'->', Operator), # Epsilon Transition + (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions + (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions + (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions + (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions + (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions + (r'>|@|\$|%', Operator), # Transition Actions and Priorities + (r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition + (r'!|\^', Operator), # Negation + (r'\(|\)', Operator), # Grouping ], 'root': [ include('literals'), @@ -95,21 +97,21 @@ class RagelLexer(RegexLexer): include('numbers'), include('identifiers'), include('operators'), - (r'{', Punctuation, 'host'), + (r'\{', Punctuation, 'host'), (r'=', Operator), (r';', Punctuation), ], 'host': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^{}\'"/#]+', # exclude unsafe characters - r'[^\\][\\][{}]', # allow escaped { or } + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^{}\'"/#]+', # exclude unsafe characters + r'[^\\]\\[{}]', # allow escaped { or } # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'\#.*$\n?', # ruby comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'\#.*$\n?', # ruby comment # regular expression: There's no reason for it to start # with a * and this stops confusion with comments. @@ -119,8 +121,8 @@ class RagelLexer(RegexLexer): r'/', )) + r')+', Other), - (r'{', Punctuation, '#push'), - (r'}', Punctuation, '#pop'), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), ], } @@ -132,7 +134,7 @@ class RagelEmbeddedLexer(RegexLexer): This will only highlight Ragel statements. If you want host language highlighting then call the language-specific Ragel lexer. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Embedded Ragel' @@ -141,17 +143,17 @@ class RagelEmbeddedLexer(RegexLexer): tokens = { 'root': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^%\'"/#]+', # exclude unsafe characters - r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^%\'"/#]+', # exclude unsafe characters + r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'//.*$\n?', # single line comment - r'\#.*$\n?', # ruby/ragel comment - r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'//.*$\n?', # single line comment + r'\#.*$\n?', # ruby/ragel comment + r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression # / is safe now that we've handled regex and javadoc comments r'/', @@ -165,15 +167,15 @@ class RagelEmbeddedLexer(RegexLexer): Punctuation, Text)), # Multi Line FSM. - (r'(%%%%|%%){', Punctuation, 'multi-line-fsm'), + (r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'), ], 'multi-line-fsm': [ - (r'(' + r'|'.join(( # keep ragel code in largest possible chunks. + (r'(' + r'|'.join(( # keep ragel code in largest possible chunks. r'(' + r'|'.join(( - r'[^}\'"\[/#]', # exclude unsafe characters - r'}(?=[^%]|$)', # } is okay as long as it's not followed by % - r'}%(?=[^%]|$)', # ...well, one %'s okay, just not two... - r'[^\\][\\][{}]', # ...and } is okay if it's escaped + r'[^}\'"\[/#]', # exclude unsafe characters + r'\}(?=[^%]|$)', # } is okay as long as it's not followed by % + r'\}%(?=[^%]|$)', # ...well, one %'s okay, just not two... + r'[^\\]\\[{}]', # ...and } is okay if it's escaped # allow / if it's preceded with one of these symbols # (ragel EOF actions) @@ -184,35 +186,35 @@ class RagelEmbeddedLexer(RegexLexer): r'/(?!\*)(\\\\|\\/|[^/])*/\*', # allow / as long as it's not followed by another / or by a * - r'/(?=[^/\*]|$)', + r'/(?=[^/*]|$)', # We want to match as many of these as we can in one block. # Not sure if we need the + sign here, # does it help performance? - )) + r')+', + )) + r')+', # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'//.*$\n?', # single line comment - r'\#.*$\n?', # ruby/ragel comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'//.*$\n?', # single line comment + r'\#.*$\n?', # ruby/ragel comment )) + r')+', using(RagelLexer)), - (r'}%%', Punctuation, '#pop'), + (r'\}%%', Punctuation, '#pop'), ] } def analyse_text(text): - return '@LANG: indep' in text or 0.1 + return '@LANG: indep' in text class RagelRubyLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a Ruby host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in Ruby Host' @@ -221,7 +223,7 @@ class RagelRubyLexer(DelegatingLexer): def __init__(self, **options): super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer, - **options) + **options) def analyse_text(text): return '@LANG: ruby' in text @@ -231,7 +233,7 @@ class RagelCLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a C host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in C Host' @@ -250,7 +252,7 @@ class RagelDLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a D host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in D Host' @@ -268,7 +270,7 @@ class RagelCppLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a CPP host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in CPP Host' @@ -286,7 +288,7 @@ class RagelObjectiveCLexer(DelegatingLexer): """ A lexer for `Ragel`_ in an Objective C host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in Objective C Host' @@ -306,7 +308,7 @@ class RagelJavaLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a Java host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in Java Host' @@ -327,7 +329,7 @@ class AntlrLexer(RegexLexer): Should not be called directly, instead use DelegatingLexer for your target language. - *New in Pygments 1.1.* + .. versionadded:: 1.1 .. _ANTLR: http://www.antlr.org/ """ @@ -336,9 +338,9 @@ class AntlrLexer(RegexLexer): aliases = ['antlr'] filenames = [] - _id = r'[A-Za-z][A-Za-z_0-9]*' - _TOKEN_REF = r'[A-Z][A-Za-z_0-9]*' - _RULE_REF = r'[a-z][A-Za-z_0-9]*' + _id = r'[A-Za-z]\w*' + _TOKEN_REF = r'[A-Z]\w*' + _RULE_REF = r'[a-z]\w*' _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\'' _INT = r'[0-9]+' @@ -362,17 +364,17 @@ class AntlrLexer(RegexLexer): # tokensSpec (r'tokens\b', Keyword, 'tokens'), # attrScope - (r'(scope)(\s*)(' + _id + ')(\s*)({)', + (r'(scope)(\s*)(' + _id + ')(\s*)(\{)', bygroups(Keyword, Whitespace, Name.Variable, Whitespace, Punctuation), 'action'), # exception (r'(catch|finally)\b', Keyword, 'exception'), # action - (r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)({)', + (r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)(\{)', bygroups(Name.Label, Whitespace, Punctuation, Whitespace, Name.Label, Whitespace, Punctuation), 'action'), # rule - (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', \ + (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', bygroups(Keyword, Whitespace, Name.Label, Punctuation), ('rule-alts', 'rule-prelims')), ], @@ -395,18 +397,18 @@ class AntlrLexer(RegexLexer): (r'(throws)(\s+)(' + _id + ')', bygroups(Keyword, Whitespace, Name.Label)), (r'(,)(\s*)(' + _id + ')', - bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws + bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws # optionsSpec (r'options\b', Keyword, 'options'), # ruleScopeSpec - scope followed by target language code or name of action # TODO finish implementing other possibilities for scope # L173 ANTLRv3.g from ANTLR book - (r'(scope)(\s+)({)', bygroups(Keyword, Whitespace, Punctuation), - 'action'), + (r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation), + 'action'), (r'(scope)(\s+)(' + _id + ')(\s*)(;)', bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)), # ruleAction - (r'(@' + _id + ')(\s*)({)', + (r'(@' + _id + ')(\s*)(\{)', bygroups(Name.Label, Whitespace, Punctuation), 'action'), # finished prelims, go to rule alts! (r':', Punctuation, '#pop') @@ -425,9 +427,9 @@ class AntlrLexer(RegexLexer): (r'<<([^>]|>[^>])>>', String), # identifiers # Tokens start with capital letter. - (r'\$?[A-Z_][A-Za-z_0-9]*', Name.Constant), + (r'\$?[A-Z_]\w*', Name.Constant), # Rules start with small letter. - (r'\$?[a-z_][A-Za-z_0-9]*', Name.Variable), + (r'\$?[a-z_]\w*', Name.Variable), # operators (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator), (r',', Punctuation), @@ -438,32 +440,32 @@ class AntlrLexer(RegexLexer): 'tokens': [ include('whitespace'), include('comments'), - (r'{', Punctuation), + (r'\{', Punctuation), (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL + ')?(\s*)(;)', bygroups(Name.Label, Whitespace, Punctuation, Whitespace, String, Whitespace, Punctuation)), - (r'}', Punctuation, '#pop'), + (r'\}', Punctuation, '#pop'), ], 'options': [ include('whitespace'), include('comments'), - (r'{', Punctuation), + (r'\{', Punctuation), (r'(' + _id + r')(\s*)(=)(\s*)(' + - '|'.join((_id, _STRING_LITERAL, _INT, '\*'))+ ')(\s*)(;)', + '|'.join((_id, _STRING_LITERAL, _INT, '\*')) + ')(\s*)(;)', bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, Text, Whitespace, Punctuation)), - (r'}', Punctuation, '#pop'), + (r'\}', Punctuation, '#pop'), ], 'action': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^\${}\'"/\\]+', # exclude unsafe characters + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^${}\'"/\\]+', # exclude unsafe characters # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment # regular expression: There's no reason for it to start # with a * and this stops confusion with comments. @@ -479,18 +481,18 @@ class AntlrLexer(RegexLexer): (r'(\\)(%)', bygroups(Punctuation, Other)), (r'(\$[a-zA-Z]+)(\.?)(text|value)?', bygroups(Name.Variable, Punctuation, Name.Property)), - (r'{', Punctuation, '#push'), - (r'}', Punctuation, '#pop'), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), ], 'nested-arg-action': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks. - r'[^\$\[\]\'"/]+', # exclude unsafe characters + (r'(' + r'|'.join(( # keep host code in largest possible chunks. + r'[^$\[\]\'"/]+', # exclude unsafe characters # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment # regular expression: There's no reason for it to start # with a * and this stops confusion with comments. @@ -520,11 +522,11 @@ class AntlrLexer(RegexLexer): # so just assume they're C++. No idea how to make Objective C work in the # future. -#class AntlrCLexer(DelegatingLexer): +# class AntlrCLexer(DelegatingLexer): # """ # ANTLR with C Target # -# *New in Pygments 1.1* +# .. versionadded:: 1.1 # """ # # name = 'ANTLR With C Target' @@ -537,11 +539,12 @@ class AntlrLexer(RegexLexer): # def analyse_text(text): # return re.match(r'^\s*language\s*=\s*C\s*;', text) + class AntlrCppLexer(DelegatingLexer): """ `ANTLR`_ with CPP Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With CPP Target' @@ -553,14 +556,14 @@ class AntlrCppLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*C\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*C\s*;', text, re.M) class AntlrObjectiveCLexer(DelegatingLexer): """ `ANTLR`_ with Objective-C Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With ObjectiveC Target' @@ -573,14 +576,14 @@ class AntlrObjectiveCLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*ObjC\s*;', text) + re.search(r'^\s*language\s*=\s*ObjC\s*;', text) class AntlrCSharpLexer(DelegatingLexer): """ `ANTLR`_ with C# Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With C# Target' @@ -593,14 +596,14 @@ class AntlrCSharpLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M) class AntlrPythonLexer(DelegatingLexer): """ `ANTLR`_ with Python Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With Python Target' @@ -613,14 +616,14 @@ class AntlrPythonLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M) class AntlrJavaLexer(DelegatingLexer): """ `ANTLR`_ with Java Target - *New in Pygments 1.1* + .. versionadded:: 1. """ name = 'ANTLR With Java Target' @@ -640,7 +643,7 @@ class AntlrRubyLexer(DelegatingLexer): """ `ANTLR`_ with Ruby Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With Ruby Target' @@ -653,14 +656,14 @@ class AntlrRubyLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M) class AntlrPerlLexer(DelegatingLexer): """ `ANTLR`_ with Perl Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With Perl Target' @@ -673,14 +676,14 @@ class AntlrPerlLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M) class AntlrActionScriptLexer(DelegatingLexer): """ `ANTLR`_ with ActionScript Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With ActionScript Target' @@ -688,19 +691,21 @@ class AntlrActionScriptLexer(DelegatingLexer): filenames = ['*.G', '*.g'] def __init__(self, **options): + from pygments.lexers.actionscript import ActionScriptLexer super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer, AntlrLexer, **options) def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M) + class TreetopBaseLexer(RegexLexer): """ A base lexer for `Treetop `_ grammars. Not for direct use; use TreetopLexer instead. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ tokens = { @@ -715,43 +720,43 @@ class TreetopBaseLexer(RegexLexer): include('end'), (r'module\b', Keyword, '#push'), (r'grammar\b', Keyword, 'grammar'), - (r'[A-Z][A-Za-z_0-9]*(?:::[A-Z][A-Za-z_0-9]*)*', Name.Namespace), + (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace), ], 'grammar': [ include('space'), include('end'), (r'rule\b', Keyword, 'rule'), (r'include\b', Keyword, 'include'), - (r'[A-Z][A-Za-z_0-9]*', Name), + (r'[A-Z]\w*', Name), ], 'include': [ include('space'), - (r'[A-Z][A-Za-z_0-9]*(?:::[A-Z][A-Za-z_0-9]*)*', Name.Class, '#pop'), + (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'), ], 'rule': [ include('space'), include('end'), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'([A-Za-z_][A-Za-z_0-9]*)(:)', bygroups(Name.Label, Punctuation)), - (r'[A-Za-z_][A-Za-z_0-9]*', Name), + (r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)), + (r'[A-Za-z_]\w*', Name), (r'[()]', Punctuation), (r'[?+*/&!~]', Operator), (r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex), (r'([0-9]*)(\.\.)([0-9]*)', bygroups(Number.Integer, Operator, Number.Integer)), (r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)), - (r'{', Punctuation, 'inline_module'), + (r'\{', Punctuation, 'inline_module'), (r'\.', String.Regex), ], 'inline_module': [ - (r'{', Other, 'ruby'), - (r'}', Punctuation, '#pop'), + (r'\{', Other, 'ruby'), + (r'\}', Punctuation, '#pop'), (r'[^{}]+', Other), ], 'ruby': [ - (r'{', Other, '#push'), - (r'}', Other, '#pop'), + (r'\{', Other, '#push'), + (r'\}', Other, '#pop'), (r'[^{}]+', Other), ], 'space': [ @@ -763,11 +768,12 @@ class TreetopBaseLexer(RegexLexer): ], } + class TreetopLexer(DelegatingLexer): """ A lexer for `Treetop `_ grammars. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'Treetop' @@ -776,3 +782,54 @@ class TreetopLexer(DelegatingLexer): def __init__(self, **options): super(TreetopLexer, self).__init__(RubyLexer, TreetopBaseLexer, **options) + + +class EbnfLexer(RegexLexer): + """ + Lexer for `ISO/IEC 14977 EBNF + `_ + grammars. + + .. versionadded:: 2.0 + """ + + name = 'EBNF' + aliases = ['ebnf'] + filenames = ['*.ebnf'] + mimetypes = ['text/x-ebnf'] + + tokens = { + 'root': [ + include('whitespace'), + include('comment_start'), + include('identifier'), + (r'=', Operator, 'production'), + ], + 'production': [ + include('whitespace'), + include('comment_start'), + include('identifier'), + (r'"[^"]*"', String.Double), + (r"'[^']*'", String.Single), + (r'(\?[^?]*\?)', Name.Entity), + (r'[\[\]{}(),|]', Punctuation), + (r'-', Operator), + (r';', Punctuation, '#pop'), + (r'\.', Punctuation, '#pop'), + ], + 'whitespace': [ + (r'\s+', Text), + ], + 'comment_start': [ + (r'\(\*', Comment.Multiline, 'comment'), + ], + 'comment': [ + (r'[^*)]', Comment.Multiline), + include('comment_start'), + (r'\*\)', Comment.Multiline, '#pop'), + (r'[*)]', Comment.Multiline), + ], + 'identifier': [ + (r'([a-zA-Z][\w \-]*)', Keyword), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/pascal.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/pascal.py new file mode 100644 index 0000000..5487748 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/pascal.py @@ -0,0 +1,831 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.pascal + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Pascal family languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, include, bygroups, words, \ + using, this, default +from pygments.util import get_bool_opt, get_list_opt +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error +from pygments.scanner import Scanner + +__all__ = ['DelphiLexer', 'Modula2Lexer', 'AdaLexer'] + + +class DelphiLexer(Lexer): + """ + For `Delphi `_ (Borland Object Pascal), + Turbo Pascal and Free Pascal source code. + + Additional options accepted: + + `turbopascal` + Highlight Turbo Pascal specific keywords (default: ``True``). + `delphi` + Highlight Borland Delphi specific keywords (default: ``True``). + `freepascal` + Highlight Free Pascal specific keywords (default: ``True``). + `units` + A list of units that should be considered builtin, supported are + ``System``, ``SysUtils``, ``Classes`` and ``Math``. + Default is to consider all of them builtin. + """ + name = 'Delphi' + aliases = ['delphi', 'pas', 'pascal', 'objectpascal'] + filenames = ['*.pas'] + mimetypes = ['text/x-pascal'] + + TURBO_PASCAL_KEYWORDS = ( + 'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case', + 'const', 'constructor', 'continue', 'destructor', 'div', 'do', + 'downto', 'else', 'end', 'file', 'for', 'function', 'goto', + 'if', 'implementation', 'in', 'inherited', 'inline', 'interface', + 'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator', + 'or', 'packed', 'procedure', 'program', 'record', 'reintroduce', + 'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to', + 'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor' + ) + + DELPHI_KEYWORDS = ( + 'as', 'class', 'except', 'exports', 'finalization', 'finally', + 'initialization', 'is', 'library', 'on', 'property', 'raise', + 'threadvar', 'try' + ) + + FREE_PASCAL_KEYWORDS = ( + 'dispose', 'exit', 'false', 'new', 'true' + ) + + BLOCK_KEYWORDS = set(( + 'begin', 'class', 'const', 'constructor', 'destructor', 'end', + 'finalization', 'function', 'implementation', 'initialization', + 'label', 'library', 'operator', 'procedure', 'program', 'property', + 'record', 'threadvar', 'type', 'unit', 'uses', 'var' + )) + + FUNCTION_MODIFIERS = set(( + 'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe', + 'pascal', 'register', 'safecall', 'softfloat', 'stdcall', + 'varargs', 'name', 'dynamic', 'near', 'virtual', 'external', + 'override', 'assembler' + )) + + # XXX: those aren't global. but currently we know no way for defining + # them just for the type context. + DIRECTIVES = set(( + 'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far', + 'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected', + 'published', 'public' + )) + + BUILTIN_TYPES = set(( + 'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool', + 'cardinal', 'char', 'comp', 'currency', 'double', 'dword', + 'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint', + 'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean', + 'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency', + 'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle', + 'pint64', 'pinteger', 'plongint', 'plongword', 'pointer', + 'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint', + 'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword', + 'pwordarray', 'pwordbool', 'real', 'real48', 'shortint', + 'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate', + 'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant', + 'widechar', 'widestring', 'word', 'wordbool' + )) + + BUILTIN_UNITS = { + 'System': ( + 'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8', + 'append', 'arctan', 'assert', 'assigned', 'assignfile', + 'beginthread', 'blockread', 'blockwrite', 'break', 'chdir', + 'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble', + 'concat', 'continue', 'copy', 'cos', 'dec', 'delete', + 'dispose', 'doubletocomp', 'endthread', 'enummodules', + 'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr', + 'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize', + 'fillchar', 'finalize', 'findclasshinstance', 'findhinstance', + 'findresourcehinstance', 'flush', 'frac', 'freemem', + 'get8087cw', 'getdir', 'getlasterror', 'getmem', + 'getmemorymanager', 'getmodulefilename', 'getvariantmanager', + 'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert', + 'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset', + 'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd', + 'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount', + 'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random', + 'randomize', 'read', 'readln', 'reallocmem', + 'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir', + 'round', 'runerror', 'seek', 'seekeof', 'seekeoln', + 'set8087cw', 'setlength', 'setlinebreakstyle', + 'setmemorymanager', 'setstring', 'settextbuf', + 'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt', + 'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar', + 'succ', 'swap', 'trunc', 'truncate', 'typeinfo', + 'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring', + 'upcase', 'utf8decode', 'utf8encode', 'utf8toansi', + 'utf8tounicode', 'val', 'vararrayredim', 'varclear', + 'widecharlentostring', 'widecharlentostrvar', + 'widechartostring', 'widechartostrvar', + 'widestringtoucs4string', 'write', 'writeln' + ), + 'SysUtils': ( + 'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks', + 'allocmem', 'ansicomparefilename', 'ansicomparestr', + 'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr', + 'ansilastchar', 'ansilowercase', 'ansilowercasefilename', + 'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext', + 'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp', + 'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan', + 'ansistrscan', 'ansistrupper', 'ansiuppercase', + 'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep', + 'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype', + 'callterminateprocs', 'changefileext', 'charlength', + 'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr', + 'comparetext', 'createdir', 'createguid', 'currentyear', + 'currtostr', 'currtostrf', 'date', 'datetimetofiledate', + 'datetimetostr', 'datetimetostring', 'datetimetosystemtime', + 'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate', + 'decodedatefully', 'decodetime', 'deletefile', 'directoryexists', + 'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime', + 'exceptionerrormessage', 'excludetrailingbackslash', + 'excludetrailingpathdelimiter', 'expandfilename', + 'expandfilenamecase', 'expanduncfilename', 'extractfiledir', + 'extractfiledrive', 'extractfileext', 'extractfilename', + 'extractfilepath', 'extractrelativepath', 'extractshortpathname', + 'fileage', 'fileclose', 'filecreate', 'filedatetodatetime', + 'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly', + 'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr', + 'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage', + 'findclose', 'findcmdlineswitch', 'findfirst', 'findnext', + 'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr', + 'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr', + 'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr', + 'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir', + 'getenvironmentvariable', 'getfileversion', 'getformatsettings', + 'getlocaleformatsettings', 'getmodulename', 'getpackagedescription', + 'getpackageinfo', 'gettime', 'guidtostring', 'incamonth', + 'includetrailingbackslash', 'includetrailingpathdelimiter', + 'incmonth', 'initializepackage', 'interlockeddecrement', + 'interlockedexchange', 'interlockedexchangeadd', + 'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter', + 'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident', + 'languages', 'lastdelimiter', 'loadpackage', 'loadstr', + 'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now', + 'outofmemoryerror', 'quotedstr', 'raiselastoserror', + 'raiselastwin32error', 'removedir', 'renamefile', 'replacedate', + 'replacetime', 'safeloadlibrary', 'samefilename', 'sametext', + 'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize', + 'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy', + 'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp', + 'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy', + 'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew', + 'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos', + 'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr', + 'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime', + 'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint', + 'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime', + 'strtotimedef', 'strupper', 'supports', 'syserrormessage', + 'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime', + 'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright', + 'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime', + 'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime', + 'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime', + 'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext', + 'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase', + 'widesamestr', 'widesametext', 'wideuppercase', 'win32check', + 'wraptext' + ), + 'Classes': ( + 'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize', + 'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect', + 'extractstrings', 'findclass', 'findglobalcomponent', 'getclass', + 'groupdescendantswith', 'hextobin', 'identtoint', + 'initinheritedcomponent', 'inttoident', 'invalidpoint', + 'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext', + 'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource', + 'pointsequal', 'readcomponentres', 'readcomponentresex', + 'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias', + 'registerclasses', 'registercomponents', 'registerintegerconsts', + 'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup', + 'teststreamformat', 'unregisterclass', 'unregisterclasses', + 'unregisterintegerconsts', 'unregistermoduleclasses', + 'writecomponentresfile' + ), + 'Math': ( + 'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec', + 'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil', + 'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc', + 'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle', + 'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance', + 'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask', + 'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg', + 'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate', + 'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero', + 'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue', + 'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue', + 'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods', + 'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance', + 'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd', + 'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant', + 'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode', + 'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev', + 'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation', + 'tan', 'tanh', 'totalvariance', 'variance' + ) + } + + ASM_REGISTERS = set(( + 'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0', + 'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0', + 'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx', + 'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp', + 'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6', + 'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5', + 'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', + 'xmm6', 'xmm7' + )) + + ASM_INSTRUCTIONS = set(( + 'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound', + 'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw', + 'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae', + 'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg', + 'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb', + 'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl', + 'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo', + 'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb', + 'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid', + 'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt', + 'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd', + 'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd', + 'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe', + 'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle', + 'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge', + 'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe', + 'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave', + 'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw', + 'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw', + 'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr', + 'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx', + 'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd', + 'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw', + 'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw', + 'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe', + 'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror', + 'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb', + 'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe', + 'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle', + 'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng', + 'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz', + 'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl', + 'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold', + 'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str', + 'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit', + 'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait', + 'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat', + 'xlatb', 'xor' + )) + + def __init__(self, **options): + Lexer.__init__(self, **options) + self.keywords = set() + if get_bool_opt(options, 'turbopascal', True): + self.keywords.update(self.TURBO_PASCAL_KEYWORDS) + if get_bool_opt(options, 'delphi', True): + self.keywords.update(self.DELPHI_KEYWORDS) + if get_bool_opt(options, 'freepascal', True): + self.keywords.update(self.FREE_PASCAL_KEYWORDS) + self.builtins = set() + for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)): + self.builtins.update(self.BUILTIN_UNITS[unit]) + + def get_tokens_unprocessed(self, text): + scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE) + stack = ['initial'] + in_function_block = False + in_property_block = False + was_dot = False + next_token_is_function = False + next_token_is_property = False + collect_labels = False + block_labels = set() + brace_balance = [0, 0] + + while not scanner.eos: + token = Error + + if stack[-1] == 'initial': + if scanner.scan(r'\s+'): + token = Text + elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): + if scanner.match.startswith('$'): + token = Comment.Preproc + else: + token = Comment.Multiline + elif scanner.scan(r'//.*?$'): + token = Comment.Single + elif scanner.scan(r'[-+*\/=<>:;,.@\^]'): + token = Operator + # stop label highlighting on next ";" + if collect_labels and scanner.match == ';': + collect_labels = False + elif scanner.scan(r'[\(\)\[\]]+'): + token = Punctuation + # abort function naming ``foo = Function(...)`` + next_token_is_function = False + # if we are in a function block we count the open + # braces because ootherwise it's impossible to + # determine the end of the modifier context + if in_function_block or in_property_block: + if scanner.match == '(': + brace_balance[0] += 1 + elif scanner.match == ')': + brace_balance[0] -= 1 + elif scanner.match == '[': + brace_balance[1] += 1 + elif scanner.match == ']': + brace_balance[1] -= 1 + elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): + lowercase_name = scanner.match.lower() + if lowercase_name == 'result': + token = Name.Builtin.Pseudo + elif lowercase_name in self.keywords: + token = Keyword + # if we are in a special block and a + # block ending keyword occours (and the parenthesis + # is balanced) we end the current block context + if (in_function_block or in_property_block) and \ + lowercase_name in self.BLOCK_KEYWORDS and \ + brace_balance[0] <= 0 and \ + brace_balance[1] <= 0: + in_function_block = False + in_property_block = False + brace_balance = [0, 0] + block_labels = set() + if lowercase_name in ('label', 'goto'): + collect_labels = True + elif lowercase_name == 'asm': + stack.append('asm') + elif lowercase_name == 'property': + in_property_block = True + next_token_is_property = True + elif lowercase_name in ('procedure', 'operator', + 'function', 'constructor', + 'destructor'): + in_function_block = True + next_token_is_function = True + # we are in a function block and the current name + # is in the set of registered modifiers. highlight + # it as pseudo keyword + elif in_function_block and \ + lowercase_name in self.FUNCTION_MODIFIERS: + token = Keyword.Pseudo + # if we are in a property highlight some more + # modifiers + elif in_property_block and \ + lowercase_name in ('read', 'write'): + token = Keyword.Pseudo + next_token_is_function = True + # if the last iteration set next_token_is_function + # to true we now want this name highlighted as + # function. so do that and reset the state + elif next_token_is_function: + # Look if the next token is a dot. If yes it's + # not a function, but a class name and the + # part after the dot a function name + if scanner.test(r'\s*\.\s*'): + token = Name.Class + # it's not a dot, our job is done + else: + token = Name.Function + next_token_is_function = False + # same for properties + elif next_token_is_property: + token = Name.Property + next_token_is_property = False + # Highlight this token as label and add it + # to the list of known labels + elif collect_labels: + token = Name.Label + block_labels.add(scanner.match.lower()) + # name is in list of known labels + elif lowercase_name in block_labels: + token = Name.Label + elif lowercase_name in self.BUILTIN_TYPES: + token = Keyword.Type + elif lowercase_name in self.DIRECTIVES: + token = Keyword.Pseudo + # builtins are just builtins if the token + # before isn't a dot + elif not was_dot and lowercase_name in self.builtins: + token = Name.Builtin + else: + token = Name + elif scanner.scan(r"'"): + token = String + stack.append('string') + elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'): + token = String.Char + elif scanner.scan(r'\$[0-9A-Fa-f]+'): + token = Number.Hex + elif scanner.scan(r'\d+(?![eE]|\.[^.])'): + token = Number.Integer + elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): + token = Number.Float + else: + # if the stack depth is deeper than once, pop + if len(stack) > 1: + stack.pop() + scanner.get_char() + + elif stack[-1] == 'string': + if scanner.scan(r"''"): + token = String.Escape + elif scanner.scan(r"'"): + token = String + stack.pop() + elif scanner.scan(r"[^']*"): + token = String + else: + scanner.get_char() + stack.pop() + + elif stack[-1] == 'asm': + if scanner.scan(r'\s+'): + token = Text + elif scanner.scan(r'end'): + token = Keyword + stack.pop() + elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): + if scanner.match.startswith('$'): + token = Comment.Preproc + else: + token = Comment.Multiline + elif scanner.scan(r'//.*?$'): + token = Comment.Single + elif scanner.scan(r"'"): + token = String + stack.append('string') + elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'): + token = Name.Label + elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): + lowercase_name = scanner.match.lower() + if lowercase_name in self.ASM_INSTRUCTIONS: + token = Keyword + elif lowercase_name in self.ASM_REGISTERS: + token = Name.Builtin + else: + token = Name + elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'): + token = Operator + elif scanner.scan(r'[\(\)\[\]]+'): + token = Punctuation + elif scanner.scan(r'\$[0-9A-Fa-f]+'): + token = Number.Hex + elif scanner.scan(r'\d+(?![eE]|\.[^.])'): + token = Number.Integer + elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): + token = Number.Float + else: + scanner.get_char() + stack.pop() + + # save the dot!!!11 + if scanner.match.strip(): + was_dot = scanner.match == '.' + yield scanner.start_pos, token, scanner.match or '' + + +class Modula2Lexer(RegexLexer): + """ + For `Modula-2 `_ source code. + + Additional options that determine which keywords are highlighted: + + `pim` + Select PIM Modula-2 dialect (default: True). + `iso` + Select ISO Modula-2 dialect (default: False). + `objm2` + Select Objective Modula-2 dialect (default: False). + `gm2ext` + Also highlight GNU extensions (default: False). + + .. versionadded:: 1.3 + """ + name = 'Modula-2' + aliases = ['modula2', 'm2'] + filenames = ['*.def', '*.mod'] + mimetypes = ['text/x-modula2'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'whitespace': [ + (r'\n+', Text), # blank lines + (r'\s+', Text), # whitespace + ], + 'identifiers': [ + (r'([a-zA-Z_$][\w$]*)', Name), + ], + 'numliterals': [ + (r'[01]+B', Number.Bin), # binary number (ObjM2) + (r'[0-7]+B', Number.Oct), # octal number (PIM + ISO) + (r'[0-7]+C', Number.Oct), # char code (PIM + ISO) + (r'[0-9A-F]+C', Number.Hex), # char code (ObjM2) + (r'[0-9A-F]+H', Number.Hex), # hexadecimal number + (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number + (r'[0-9]+\.[0-9]+', Number.Float), # real number + (r'[0-9]+', Number.Integer), # decimal whole number + ], + 'strings': [ + (r"'(\\\\|\\'|[^'])*'", String), # single quoted string + (r'"(\\\\|\\"|[^"])*"', String), # double quoted string + ], + 'operators': [ + (r'[*/+=#~&<>\^-]', Operator), + (r':=', Operator), # assignment + (r'@', Operator), # pointer deref (ISO) + (r'\.\.', Operator), # ellipsis or range + (r'`', Operator), # Smalltalk message (ObjM2) + (r'::', Operator), # type conversion (ObjM2) + ], + 'punctuation': [ + (r'[()\[\]{},.:;|]', Punctuation), + ], + 'comments': [ + (r'//.*?\n', Comment.Single), # ObjM2 + (r'/\*(.*?)\*/', Comment.Multiline), # ObjM2 + (r'\(\*([^$].*?)\*\)', Comment.Multiline), + # TO DO: nesting of (* ... *) comments + ], + 'pragmas': [ + (r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM + (r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2 + ], + 'root': [ + include('whitespace'), + include('comments'), + include('pragmas'), + include('identifiers'), + include('numliterals'), + include('strings'), + include('operators'), + include('punctuation'), + ] + } + + pim_reserved_words = [ + # 40 reserved words + 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', + 'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR', + 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', + 'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED', + 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', + 'UNTIL', 'VAR', 'WHILE', 'WITH', + ] + + pim_pervasives = [ + # 31 pervasives + 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC', + 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL', + 'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD', + 'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL', + ] + + iso_reserved_words = [ + # 46 reserved words + 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', + 'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY', + 'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', + 'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER', + 'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY', + 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', + 'WITH', + ] + + iso_pervasives = [ + # 42 pervasives + 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX', + 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', + 'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH', + 'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', + 'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE', + 'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', + ] + + objm2_reserved_words = [ + # base language, 42 reserved words + 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', + 'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF', + 'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD', + 'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE', + 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', + 'UNTIL', 'VAR', 'VARIADIC', 'WHILE', + # OO extensions, 16 reserved words + 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', + 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', + 'SUPER', 'TRY', + ] + + objm2_pervasives = [ + # base language, 38 pervasives + 'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE', + 'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD', + 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL', + 'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX', + 'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF', + # OO extensions, 3 pervasives + 'OBJECT', 'NO', 'YES', + ] + + gnu_reserved_words = [ + # 10 additional reserved words + 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', + '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', + ] + + gnu_pervasives = [ + # 21 identifiers, actually from pseudo-module SYSTEM + # but we will highlight them as if they were pervasives + 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', + 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', + 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', + 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', + ] + + def __init__(self, **options): + self.reserved_words = set() + self.pervasives = set() + # ISO Modula-2 + if get_bool_opt(options, 'iso', False): + self.reserved_words.update(self.iso_reserved_words) + self.pervasives.update(self.iso_pervasives) + # Objective Modula-2 + elif get_bool_opt(options, 'objm2', False): + self.reserved_words.update(self.objm2_reserved_words) + self.pervasives.update(self.objm2_pervasives) + # PIM Modula-2 (DEFAULT) + else: + self.reserved_words.update(self.pim_reserved_words) + self.pervasives.update(self.pim_pervasives) + # GNU extensions + if get_bool_opt(options, 'gm2ext', False): + self.reserved_words.update(self.gnu_reserved_words) + self.pervasives.update(self.gnu_pervasives) + # initialise + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): + # check for reserved words and pervasives + if token is Name: + if value in self.reserved_words: + token = Keyword.Reserved + elif value in self.pervasives: + token = Keyword.Pervasive + # return result + yield index, token, value + + +class AdaLexer(RegexLexer): + """ + For Ada source code. + + .. versionadded:: 1.3 + """ + + name = 'Ada' + aliases = ['ada', 'ada95', 'ada2005'] + filenames = ['*.adb', '*.ads', '*.ada'] + mimetypes = ['text/x-ada'] + + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + (r'[^\S\n]+', Text), + (r'--.*?\n', Comment.Single), + (r'[^\S\n]+', Text), + (r'function|procedure|entry', Keyword.Declaration, 'subprogram'), + (r'(subtype|type)(\s+)(\w+)', + bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), + (r'task|protected', Keyword.Declaration), + (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)), + (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'), + (r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text, + Comment.Preproc)), + (r'(true|false|null)\b', Keyword.Constant), + (words(( + 'Address', 'Byte', 'Boolean', 'Character', 'Controlled', 'Count', 'Cursor', + 'Duration', 'File_Mode', 'File_Type', 'Float', 'Generator', 'Integer', 'Long_Float', + 'Long_Integer', 'Long_Long_Float', 'Long_Long_Integer', 'Natural', 'Positive', + 'Reference_Type', 'Short_Float', 'Short_Integer', 'Short_Short_Float', + 'Short_Short_Integer', 'String', 'Wide_Character', 'Wide_String'), suffix=r'\b'), + Keyword.Type), + (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word), + (r'generic|private', Keyword.Declaration), + (r'package', Keyword.Declaration, 'package'), + (r'array\b', Keyword.Reserved, 'array_def'), + (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), + (r'(\w+)(\s*)(:)(\s*)(constant)', + bygroups(Name.Constant, Text, Punctuation, Text, + Keyword.Reserved)), + (r'<<\w+>>', Name.Label), + (r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)', + bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)), + (words(( + 'abort', 'abs', 'abstract', 'accept', 'access', 'aliased', 'all', + 'array', 'at', 'begin', 'body', 'case', 'constant', 'declare', + 'delay', 'delta', 'digits', 'do', 'else', 'elsif', 'end', 'entry', + 'exception', 'exit', 'interface', 'for', 'goto', 'if', 'is', 'limited', + 'loop', 'new', 'null', 'of', 'or', 'others', 'out', 'overriding', + 'pragma', 'protected', 'raise', 'range', 'record', 'renames', 'requeue', + 'return', 'reverse', 'select', 'separate', 'subtype', 'synchronized', + 'task', 'tagged', 'terminate', 'then', 'type', 'until', 'when', + 'while', 'xor'), prefix=r'\b', suffix=r'\b'), + Keyword.Reserved), + (r'"[^"]*"', String), + include('attribute'), + include('numbers'), + (r"'[^']'", String.Character), + (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))), + (r"(<>|=>|:=|[()|:;,.'])", Punctuation), + (r'[*<>+=/&-]', Operator), + (r'\n+', Text), + ], + 'numbers': [ + (r'[0-9_]+#[0-9a-f]+#', Number.Hex), + (r'[0-9_]+\.[0-9_]*', Number.Float), + (r'[0-9_]+', Number.Integer), + ], + 'attribute': [ + (r"(')(\w+)", bygroups(Punctuation, Name.Attribute)), + ], + 'subprogram': [ + (r'\(', Punctuation, ('#pop', 'formal_part')), + (r';', Punctuation, '#pop'), + (r'is\b', Keyword.Reserved, '#pop'), + (r'"[^"]+"|\w+', Name.Function), + include('root'), + ], + 'end': [ + ('(if|case|record|loop|select)', Keyword.Reserved), + ('"[^"]+"|[\w.]+', Name.Function), + ('\s+', Text), + (';', Punctuation, '#pop'), + ], + 'type_def': [ + (r';', Punctuation, '#pop'), + (r'\(', Punctuation, 'formal_part'), + (r'with|and|use', Keyword.Reserved), + (r'array\b', Keyword.Reserved, ('#pop', 'array_def')), + (r'record\b', Keyword.Reserved, ('record_def')), + (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'), + include('root'), + ], + 'array_def': [ + (r';', Punctuation, '#pop'), + (r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)), + include('root'), + ], + 'record_def': [ + (r'end record', Keyword.Reserved, '#pop'), + include('root'), + ], + 'import': [ + (r'[\w.]+', Name.Namespace, '#pop'), + default('#pop'), + ], + 'formal_part': [ + (r'\)', Punctuation, '#pop'), + (r'\w+', Name.Variable), + (r',|:[^=]', Punctuation), + (r'(in|not|null|out|access)\b', Keyword.Reserved), + include('root'), + ], + 'package': [ + ('body', Keyword.Declaration), + ('is\s+new|renames', Keyword.Reserved), + ('is', Keyword.Reserved, '#pop'), + (';', Punctuation, '#pop'), + ('\(', Punctuation, 'package_instantiation'), + ('([\w.]+)', Name.Class), + include('root'), + ], + 'package_instantiation': [ + (r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)), + (r'[\w.\'"]', Text), + (r'\)', Punctuation, '#pop'), + include('root'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/pawn.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/pawn.py new file mode 100644 index 0000000..d55e2cc --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/pawn.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.pawn + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Pawn languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error +from pygments.util import get_bool_opt + +__all__ = ['SourcePawnLexer', 'PawnLexer'] + + +class SourcePawnLexer(RegexLexer): + """ + For SourcePawn source code with preprocessor directives. + + .. versionadded:: 1.6 + """ + name = 'SourcePawn' + aliases = ['sp'] + filenames = ['*.sp'] + mimetypes = ['text/x-sourcepawn'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' + #: only one /* */ style comment + _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' + + tokens = { + 'root': [ + # preprocessor directives: without whitespace + ('^#if\s+0', Comment.Preproc, 'if0'), + ('^#', Comment.Preproc, 'macro'), + # or with whitespace + ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'), + ('^' + _ws1 + '#', Comment.Preproc, 'macro'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), + (r'[{}]', Punctuation), + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'0[0-7]+[LlUu]*', Number.Oct), + (r'\d+[LlUu]*', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.;]', Punctuation), + (r'(case|const|continue|native|' + r'default|else|enum|for|if|new|operator|' + r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), + (r'(bool|Float)\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + ('[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/\*(.|\n)*?\*/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?/-]', Operator), + (r'[()\[\],.;]', Punctuation), + (r'(switch|case|default|const|new|static|char|continue|break|' + r'if|else|for|while|do|operator|enum|' + r'public|return|sizeof|tagof|state|goto)\b', Keyword), + (r'(bool|Float)\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + ('[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/\*(.|\n)*?\*/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?`_ source code. + """ + + name = 'Perl' + aliases = ['perl', 'pl'] + filenames = ['*.pl', '*.pm', '*.t'] + mimetypes = ['text/x-perl', 'application/x-perl'] + + flags = re.DOTALL | re.MULTILINE + # TODO: give this to a perl guy who knows how to parse perl... + tokens = { + 'balanced-regex': [ + (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'), + (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'), + (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), + (r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'), + (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'), + (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'), + (r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'), + (r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'), + (r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'), + (r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'), + ], + 'root': [ + (r'\#.*?$', Comment.Single), + (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline), + (words(( + 'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach', + 'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then', + 'unless', 'until', 'while', 'use', 'print', 'new', 'BEGIN', + 'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'), + Keyword), + (r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)', + bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'), + (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word), + # common delimiters + (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', + String.Regex), + (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex), + (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex), + (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', + String.Regex), + (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', + String.Regex), + # balanced delimiters + (r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'), + (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'), + (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex, + 'balanced-regex'), + (r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex, + 'balanced-regex'), + + (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex), + (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), + (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*', + String.Regex), + (r'\s+', Text), + (words(( + 'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir', + 'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect', + 'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die', + 'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent', + 'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl', + 'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid', + 'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin', + 'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp', + 'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber', + 'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname', + 'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime', + 'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last', + 'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat', + 'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'no', 'oct', 'open', + 'opendir', 'ord', 'our', 'pack', 'package', 'pipe', 'pop', 'pos', 'printf', + 'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir', + 'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename', 'require', + 'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir', + 'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent', + 'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent', + 'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown', + 'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt', + 'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread', + 'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr', + 'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie', + 'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'), + Name.Builtin), + (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo), + (r'<<([\'"]?)([a-zA-Z_]\w*)\1;?\n.*?\n\2\n', String), + (r'__END__', Comment.Preproc, 'end-part'), + (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global), + (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global), + (r'[$@%#]+', Name.Variable, 'varname'), + (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), + (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), + (r'0b[01]+(_[01]+)*', Number.Bin), + (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', + Number.Float), + (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), + (r'\d+(_\d+)*', Number.Integer), + (r"'(\\\\|\\[^\\]|[^'\\])*'", String), + (r'"(\\\\|\\[^\\]|[^"\\])*"', String), + (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick), + (r'<([^\s>]+)>', String.Regex), + (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'), + (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'), + (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'), + (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'), + (r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other), + (r'package\s+', Keyword, 'modulename'), + (r'sub\s+', Keyword, 'funcname'), + (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|' + r'!~|&&?|\|\||\.{1,3})', Operator), + (r'[-+/*%=<>&^|!\\~]=?', Operator), + (r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage + # of punctuation in Perl! + (r'(?=\w)', Name, 'name'), + ], + 'format': [ + (r'\.\n', String.Interpol, '#pop'), + (r'[^\n]*\n', String.Interpol), + ], + 'varname': [ + (r'\s+', Text), + (r'\{', Punctuation, '#pop'), # hash syntax? + (r'\)|,', Punctuation, '#pop'), # argument specifier + (r'\w+::', Name.Namespace), + (r'[\w:]+', Name.Variable, '#pop'), + ], + 'name': [ + (r'\w+::', Name.Namespace), + (r'[\w:]+', Name, '#pop'), + (r'[A-Z_]+(?=\W)', Name.Constant, '#pop'), + (r'(?=\W)', Text, '#pop'), + ], + 'modulename': [ + (r'[a-zA-Z_]\w*', Name.Namespace, '#pop') + ], + 'funcname': [ + (r'[a-zA-Z_]\w*[!?]?', Name.Function), + (r'\s+', Text), + # argument declaration + (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)), + (r';', Punctuation, '#pop'), + (r'.*?\{', Punctuation, '#pop'), + ], + 'cb-string': [ + (r'\\[{}\\]', String.Other), + (r'\\', String.Other), + (r'\{', String.Other, 'cb-string'), + (r'\}', String.Other, '#pop'), + (r'[^{}\\]+', String.Other) + ], + 'rb-string': [ + (r'\\[()\\]', String.Other), + (r'\\', String.Other), + (r'\(', String.Other, 'rb-string'), + (r'\)', String.Other, '#pop'), + (r'[^()]+', String.Other) + ], + 'sb-string': [ + (r'\\[\[\]\\]', String.Other), + (r'\\', String.Other), + (r'\[', String.Other, 'sb-string'), + (r'\]', String.Other, '#pop'), + (r'[^\[\]]+', String.Other) + ], + 'lt-string': [ + (r'\\[<>\\]', String.Other), + (r'\\', String.Other), + (r'\<', String.Other, 'lt-string'), + (r'\>', String.Other, '#pop'), + (r'[^<>]+', String.Other) + ], + 'end-part': [ + (r'.+', Comment.Preproc, '#pop') + ] + } + + def analyse_text(text): + if shebang_matches(text, r'perl'): + return True + if re.search('(?:my|our)\s+[$@%(]', text): + return 0.9 + + +class Perl6Lexer(ExtendedRegexLexer): + """ + For `Perl 6 `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Perl6' + aliases = ['perl6', 'pl6'] + filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', + '*.6pm', '*.p6m', '*.pm6', '*.t'] + mimetypes = ['text/x-perl6', 'application/x-perl6'] + flags = re.MULTILINE | re.DOTALL | re.UNICODE + + PERL6_IDENTIFIER_RANGE = "['\w:-]" + + PERL6_KEYWORDS = ( + 'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT', + 'KEEP', 'LAST', 'LEAVE', 'NEXT', 'POST', 'PRE', 'START', 'TEMP', + 'UNDO', 'as', 'assoc', 'async', 'augment', 'binary', 'break', 'but', + 'cached', 'category', 'class', 'constant', 'contend', 'continue', + 'copy', 'deep', 'default', 'defequiv', 'defer', 'die', 'do', 'else', + 'elsif', 'enum', 'equiv', 'exit', 'export', 'fail', 'fatal', 'for', + 'gather', 'given', 'goto', 'grammar', 'handles', 'has', 'if', 'inline', + 'irs', 'is', 'last', 'leave', 'let', 'lift', 'loop', 'looser', 'macro', + 'make', 'maybe', 'method', 'module', 'multi', 'my', 'next', 'of', + 'ofs', 'only', 'oo', 'ors', 'our', 'package', 'parsed', 'prec', + 'proto', 'readonly', 'redo', 'ref', 'regex', 'reparsed', 'repeat', + 'require', 'required', 'return', 'returns', 'role', 'rule', 'rw', + 'self', 'slang', 'state', 'sub', 'submethod', 'subset', 'supersede', + 'take', 'temp', 'tighter', 'token', 'trusts', 'try', 'unary', + 'unless', 'until', 'use', 'warn', 'when', 'where', 'while', 'will', + ) + + PERL6_BUILTINS = ( + 'ACCEPTS', 'HOW', 'REJECTS', 'VAR', 'WHAT', 'WHENCE', 'WHERE', 'WHICH', + 'WHO', 'abs', 'acos', 'acosec', 'acosech', 'acosh', 'acotan', 'acotanh', + 'all', 'any', 'approx', 'arity', 'asec', 'asech', 'asin', 'asinh', + 'assuming', 'atan', 'atan2', 'atanh', 'attr', 'bless', 'body', 'by', + 'bytes', 'caller', 'callsame', 'callwith', 'can', 'capitalize', 'cat', + 'ceiling', 'chars', 'chmod', 'chomp', 'chop', 'chr', 'chroot', + 'circumfix', 'cis', 'classify', 'clone', 'close', 'cmp_ok', 'codes', + 'comb', 'connect', 'contains', 'context', 'cos', 'cosec', 'cosech', + 'cosh', 'cotan', 'cotanh', 'count', 'defined', 'delete', 'diag', + 'dies_ok', 'does', 'e', 'each', 'eager', 'elems', 'end', 'eof', 'eval', + 'eval_dies_ok', 'eval_elsewhere', 'eval_lives_ok', 'evalfile', 'exists', + 'exp', 'first', 'flip', 'floor', 'flunk', 'flush', 'fmt', 'force_todo', + 'fork', 'from', 'getc', 'gethost', 'getlogin', 'getpeername', 'getpw', + 'gmtime', 'graphs', 'grep', 'hints', 'hyper', 'im', 'index', 'infix', + 'invert', 'is_approx', 'is_deeply', 'isa', 'isa_ok', 'isnt', 'iterator', + 'join', 'key', 'keys', 'kill', 'kv', 'lastcall', 'lazy', 'lc', 'lcfirst', + 'like', 'lines', 'link', 'lives_ok', 'localtime', 'log', 'log10', 'map', + 'max', 'min', 'minmax', 'name', 'new', 'nextsame', 'nextwith', 'nfc', + 'nfd', 'nfkc', 'nfkd', 'nok_error', 'nonce', 'none', 'normalize', 'not', + 'nothing', 'ok', 'once', 'one', 'open', 'opendir', 'operator', 'ord', + 'p5chomp', 'p5chop', 'pack', 'pair', 'pairs', 'pass', 'perl', 'pi', + 'pick', 'plan', 'plan_ok', 'polar', 'pop', 'pos', 'postcircumfix', + 'postfix', 'pred', 'prefix', 'print', 'printf', 'push', 'quasi', + 'quotemeta', 'rand', 're', 'read', 'readdir', 'readline', 'reduce', + 'reverse', 'rewind', 'rewinddir', 'rindex', 'roots', 'round', + 'roundrobin', 'run', 'runinstead', 'sameaccent', 'samecase', 'say', + 'sec', 'sech', 'sech', 'seek', 'shape', 'shift', 'sign', 'signature', + 'sin', 'sinh', 'skip', 'skip_rest', 'sleep', 'slurp', 'sort', 'splice', + 'split', 'sprintf', 'sqrt', 'srand', 'strand', 'subst', 'substr', 'succ', + 'sum', 'symlink', 'tan', 'tanh', 'throws_ok', 'time', 'times', 'to', + 'todo', 'trim', 'trim_end', 'trim_start', 'true', 'truncate', 'uc', + 'ucfirst', 'undef', 'undefine', 'uniq', 'unlike', 'unlink', 'unpack', + 'unpolar', 'unshift', 'unwrap', 'use_ok', 'value', 'values', 'vec', + 'version_lt', 'void', 'wait', 'want', 'wrap', 'write', 'zip', + ) + + PERL6_BUILTIN_CLASSES = ( + 'Abstraction', 'Any', 'AnyChar', 'Array', 'Associative', 'Bag', 'Bit', + 'Blob', 'Block', 'Bool', 'Buf', 'Byte', 'Callable', 'Capture', 'Char', 'Class', + 'Code', 'Codepoint', 'Comparator', 'Complex', 'Decreasing', 'Exception', + 'Failure', 'False', 'Grammar', 'Grapheme', 'Hash', 'IO', 'Increasing', + 'Int', 'Junction', 'KeyBag', 'KeyExtractor', 'KeyHash', 'KeySet', + 'KitchenSink', 'List', 'Macro', 'Mapping', 'Match', 'Matcher', 'Method', + 'Module', 'Num', 'Object', 'Ordered', 'Ordering', 'OrderingPair', + 'Package', 'Pair', 'Positional', 'Proxy', 'Range', 'Rat', 'Regex', + 'Role', 'Routine', 'Scalar', 'Seq', 'Set', 'Signature', 'Str', 'StrLen', + 'StrPos', 'Sub', 'Submethod', 'True', 'UInt', 'Undef', 'Version', 'Void', + 'Whatever', 'bit', 'bool', 'buf', 'buf1', 'buf16', 'buf2', 'buf32', + 'buf4', 'buf64', 'buf8', 'complex', 'int', 'int1', 'int16', 'int2', + 'int32', 'int4', 'int64', 'int8', 'num', 'rat', 'rat1', 'rat16', 'rat2', + 'rat32', 'rat4', 'rat64', 'rat8', 'uint', 'uint1', 'uint16', 'uint2', + 'uint32', 'uint4', 'uint64', 'uint8', 'utf16', 'utf32', 'utf8', + ) + + PERL6_OPERATORS = ( + 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div', + 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm', + 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx', + '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^', + '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&', + 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^', + '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^', + '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv', + '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so', + 'not', '<==', '==>', '<<==', '==>>', + ) + + # Perl 6 has a *lot* of possible bracketing characters + # this list was lifted from STD.pm6 (https://github.com/perl6/std) + PERL6_BRACKETS = { + u'\u0028': u'\u0029', u'\u003c': u'\u003e', u'\u005b': u'\u005d', + u'\u007b': u'\u007d', u'\u00ab': u'\u00bb', u'\u0f3a': u'\u0f3b', + u'\u0f3c': u'\u0f3d', u'\u169b': u'\u169c', u'\u2018': u'\u2019', + u'\u201a': u'\u2019', u'\u201b': u'\u2019', u'\u201c': u'\u201d', + u'\u201e': u'\u201d', u'\u201f': u'\u201d', u'\u2039': u'\u203a', + u'\u2045': u'\u2046', u'\u207d': u'\u207e', u'\u208d': u'\u208e', + u'\u2208': u'\u220b', u'\u2209': u'\u220c', u'\u220a': u'\u220d', + u'\u2215': u'\u29f5', u'\u223c': u'\u223d', u'\u2243': u'\u22cd', + u'\u2252': u'\u2253', u'\u2254': u'\u2255', u'\u2264': u'\u2265', + u'\u2266': u'\u2267', u'\u2268': u'\u2269', u'\u226a': u'\u226b', + u'\u226e': u'\u226f', u'\u2270': u'\u2271', u'\u2272': u'\u2273', + u'\u2274': u'\u2275', u'\u2276': u'\u2277', u'\u2278': u'\u2279', + u'\u227a': u'\u227b', u'\u227c': u'\u227d', u'\u227e': u'\u227f', + u'\u2280': u'\u2281', u'\u2282': u'\u2283', u'\u2284': u'\u2285', + u'\u2286': u'\u2287', u'\u2288': u'\u2289', u'\u228a': u'\u228b', + u'\u228f': u'\u2290', u'\u2291': u'\u2292', u'\u2298': u'\u29b8', + u'\u22a2': u'\u22a3', u'\u22a6': u'\u2ade', u'\u22a8': u'\u2ae4', + u'\u22a9': u'\u2ae3', u'\u22ab': u'\u2ae5', u'\u22b0': u'\u22b1', + u'\u22b2': u'\u22b3', u'\u22b4': u'\u22b5', u'\u22b6': u'\u22b7', + u'\u22c9': u'\u22ca', u'\u22cb': u'\u22cc', u'\u22d0': u'\u22d1', + u'\u22d6': u'\u22d7', u'\u22d8': u'\u22d9', u'\u22da': u'\u22db', + u'\u22dc': u'\u22dd', u'\u22de': u'\u22df', u'\u22e0': u'\u22e1', + u'\u22e2': u'\u22e3', u'\u22e4': u'\u22e5', u'\u22e6': u'\u22e7', + u'\u22e8': u'\u22e9', u'\u22ea': u'\u22eb', u'\u22ec': u'\u22ed', + u'\u22f0': u'\u22f1', u'\u22f2': u'\u22fa', u'\u22f3': u'\u22fb', + u'\u22f4': u'\u22fc', u'\u22f6': u'\u22fd', u'\u22f7': u'\u22fe', + u'\u2308': u'\u2309', u'\u230a': u'\u230b', u'\u2329': u'\u232a', + u'\u23b4': u'\u23b5', u'\u2768': u'\u2769', u'\u276a': u'\u276b', + u'\u276c': u'\u276d', u'\u276e': u'\u276f', u'\u2770': u'\u2771', + u'\u2772': u'\u2773', u'\u2774': u'\u2775', u'\u27c3': u'\u27c4', + u'\u27c5': u'\u27c6', u'\u27d5': u'\u27d6', u'\u27dd': u'\u27de', + u'\u27e2': u'\u27e3', u'\u27e4': u'\u27e5', u'\u27e6': u'\u27e7', + u'\u27e8': u'\u27e9', u'\u27ea': u'\u27eb', u'\u2983': u'\u2984', + u'\u2985': u'\u2986', u'\u2987': u'\u2988', u'\u2989': u'\u298a', + u'\u298b': u'\u298c', u'\u298d': u'\u298e', u'\u298f': u'\u2990', + u'\u2991': u'\u2992', u'\u2993': u'\u2994', u'\u2995': u'\u2996', + u'\u2997': u'\u2998', u'\u29c0': u'\u29c1', u'\u29c4': u'\u29c5', + u'\u29cf': u'\u29d0', u'\u29d1': u'\u29d2', u'\u29d4': u'\u29d5', + u'\u29d8': u'\u29d9', u'\u29da': u'\u29db', u'\u29f8': u'\u29f9', + u'\u29fc': u'\u29fd', u'\u2a2b': u'\u2a2c', u'\u2a2d': u'\u2a2e', + u'\u2a34': u'\u2a35', u'\u2a3c': u'\u2a3d', u'\u2a64': u'\u2a65', + u'\u2a79': u'\u2a7a', u'\u2a7d': u'\u2a7e', u'\u2a7f': u'\u2a80', + u'\u2a81': u'\u2a82', u'\u2a83': u'\u2a84', u'\u2a8b': u'\u2a8c', + u'\u2a91': u'\u2a92', u'\u2a93': u'\u2a94', u'\u2a95': u'\u2a96', + u'\u2a97': u'\u2a98', u'\u2a99': u'\u2a9a', u'\u2a9b': u'\u2a9c', + u'\u2aa1': u'\u2aa2', u'\u2aa6': u'\u2aa7', u'\u2aa8': u'\u2aa9', + u'\u2aaa': u'\u2aab', u'\u2aac': u'\u2aad', u'\u2aaf': u'\u2ab0', + u'\u2ab3': u'\u2ab4', u'\u2abb': u'\u2abc', u'\u2abd': u'\u2abe', + u'\u2abf': u'\u2ac0', u'\u2ac1': u'\u2ac2', u'\u2ac3': u'\u2ac4', + u'\u2ac5': u'\u2ac6', u'\u2acd': u'\u2ace', u'\u2acf': u'\u2ad0', + u'\u2ad1': u'\u2ad2', u'\u2ad3': u'\u2ad4', u'\u2ad5': u'\u2ad6', + u'\u2aec': u'\u2aed', u'\u2af7': u'\u2af8', u'\u2af9': u'\u2afa', + u'\u2e02': u'\u2e03', u'\u2e04': u'\u2e05', u'\u2e09': u'\u2e0a', + u'\u2e0c': u'\u2e0d', u'\u2e1c': u'\u2e1d', u'\u2e20': u'\u2e21', + u'\u3008': u'\u3009', u'\u300a': u'\u300b', u'\u300c': u'\u300d', + u'\u300e': u'\u300f', u'\u3010': u'\u3011', u'\u3014': u'\u3015', + u'\u3016': u'\u3017', u'\u3018': u'\u3019', u'\u301a': u'\u301b', + u'\u301d': u'\u301e', u'\ufd3e': u'\ufd3f', u'\ufe17': u'\ufe18', + u'\ufe35': u'\ufe36', u'\ufe37': u'\ufe38', u'\ufe39': u'\ufe3a', + u'\ufe3b': u'\ufe3c', u'\ufe3d': u'\ufe3e', u'\ufe3f': u'\ufe40', + u'\ufe41': u'\ufe42', u'\ufe43': u'\ufe44', u'\ufe47': u'\ufe48', + u'\ufe59': u'\ufe5a', u'\ufe5b': u'\ufe5c', u'\ufe5d': u'\ufe5e', + u'\uff08': u'\uff09', u'\uff1c': u'\uff1e', u'\uff3b': u'\uff3d', + u'\uff5b': u'\uff5d', u'\uff5f': u'\uff60', u'\uff62': u'\uff63', + } + + def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''): + if boundary_regex_fragment is None: + return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \ + suffix + r')\b' + else: + return r'(? 0: + next_open_pos = text.find(opening_chars, search_pos + n_chars) + next_close_pos = text.find(closing_chars, search_pos + n_chars) + + if next_close_pos == -1: + next_close_pos = len(text) + nesting_level = 0 + elif next_open_pos != -1 and next_open_pos < next_close_pos: + nesting_level += 1 + search_pos = next_open_pos + else: # next_close_pos < next_open_pos + nesting_level -= 1 + search_pos = next_close_pos + + end_pos = next_close_pos + + if end_pos < 0: # if we didn't find a closer, just highlight the + # rest of the text in this class + end_pos = len(text) + + if adverbs is not None and re.search(r':to\b', adverbs): + heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos] + end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + + r'\s*$', text[end_pos:], re.MULTILINE) + + if end_heredoc: + end_pos += end_heredoc.end() + else: + end_pos = len(text) + + yield match.start(), token_class, text[match.start():end_pos + n_chars] + context.pos = end_pos + n_chars + + return callback + + def opening_brace_callback(lexer, match, context): + stack = context.stack + + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + + # if we encounter an opening brace and we're one level + # below a token state, it means we need to increment + # the nesting level for braces so we know later when + # we should return to the token rules. + if len(stack) > 2 and stack[-2] == 'token': + context.perl6_token_nesting_level += 1 + + def closing_brace_callback(lexer, match, context): + stack = context.stack + + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + + # if we encounter a free closing brace and we're one level + # below a token state, it means we need to check the nesting + # level to see if we need to return to the token state. + if len(stack) > 2 and stack[-2] == 'token': + context.perl6_token_nesting_level -= 1 + if context.perl6_token_nesting_level == 0: + stack.pop() + + def embedded_perl6_callback(lexer, match, context): + context.perl6_token_nesting_level = 1 + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + context.stack.append('root') + + # If you're modifying these rules, be careful if you need to process '{' or '}' + # characters. We have special logic for processing these characters (due to the fact + # that you can nest Perl 6 code in regex blocks), so if you need to process one of + # them, make sure you also process the corresponding one! + tokens = { + 'common': [ + (r'#[`|=](?P(?P[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', + brackets_callback(Comment.Multiline)), + (r'#[^\n]*$', Comment.Singleline), + (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline), + (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline), + (r'^=.*?\n\s*?\n', Comment.Multiline), + (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', + bygroups(Keyword, Name), 'token-sym-brackets'), + (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + ')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', + bygroups(Keyword, Name), 'pre-token'), + # deal with a special case in the Perl 6 grammar (role q { ... }) + (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)), + (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword), + (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'), + Name.Builtin), + (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin), + # copied from PerlLexer + (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', + Name.Variable), + (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), + (r'::\?\w+', Name.Variable.Global), + (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', + Name.Variable.Global), + (r'\$(?:<.*?>)+', Name.Variable), + (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P:[\w\s:]+)?\s*(?P(?P[^0-9a-zA-Z:\s])' + r'(?P=first_char)*)', brackets_callback(String)), + # copied from PerlLexer + (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), + (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), + (r'0b[01]+(_[01]+)*', Number.Bin), + (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', + Number.Float), + (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), + (r'\d+(_\d+)*', Number.Integer), + (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex), + (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex), + (r'm\w+(?=\()', Name), + (r'(?:m|ms|rx)\s*(?P:[\w\s:]+)?\s*(?P(?P[^\w:\s])' + r'(?P=first_char)*)', brackets_callback(String.Regex)), + (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', + String.Regex), + (r'<[^\s=].*?\S>', String), + (_build_word_match(PERL6_OPERATORS), Operator), + (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name), + (r"'(\\\\|\\[^\\]|[^'\\])*'", String), + (r'"(\\\\|\\[^\\]|[^"\\])*"', String), + ], + 'root': [ + include('common'), + (r'\{', opening_brace_callback), + (r'\}', closing_brace_callback), + (r'.+?', Text), + ], + 'pre-token': [ + include('common'), + (r'\{', Text, ('#pop', 'token')), + (r'.+?', Text), + ], + 'token-sym-brackets': [ + (r'(?P(?P[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', + brackets_callback(Name), ('#pop', 'pre-token')), + default(('#pop', 'pre-token')), + ], + 'token': [ + (r'\}', Text, '#pop'), + (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)), + # make sure that quotes in character classes aren't treated as strings + (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex), + # make sure that '#' characters in quotes aren't treated as comments + (r"(?my|our)\s+)?(?:module|class|role|enum|grammar)', line) + if class_decl: + if saw_perl_decl or class_decl.group('scope') is not None: + return True + rating = 0.05 + continue + break + + return rating + + def __init__(self, **options): + super(Perl6Lexer, self).__init__(**options) + self.encoding = options.get('encoding', 'utf-8') diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/php.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/php.py new file mode 100644 index 0000000..08dfa6a --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/php.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.php + ~~~~~~~~~~~~~~~~~~~ + + Lexers for PHP and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, using, this +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Other +from pygments.util import get_bool_opt, get_list_opt, iteritems + +__all__ = ['ZephirLexer', 'PhpLexer'] + + +class ZephirLexer(RegexLexer): + """ + For `Zephir language `_ source code. + + Zephir is a compiled high level language aimed + to the creation of C-extensions for PHP. + + .. versionadded:: 2.0 + """ + + name = 'Zephir' + aliases = ['zephir'] + filenames = ['*.zep'] + + zephir_keywords = ['fetch', 'echo', 'isset', 'empty'] + zephir_type = ['bit', 'bits', 'string'] + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'([gim]+\b|\B)', String.Regex, '#pop'), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + (r'^(?=\s|/|', Punctuation), + (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' + r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double), + (r"'(?:''|[^'])*'", String.Atom), # quoted atom + # Needs to not be followed by an atom. + # (r'=(?=\s|[a-zA-Z\[])', Operator), + (r'is\b', Operator), + (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])', + Operator), + (r'(mod|div|not)\b', Operator), + (r'_', Keyword), # The don't-care variable + (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)), + (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' + u'(\\s*)(:-|-->)', + bygroups(Name.Function, Text, Operator)), # function defn + (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' + u'(\\s*)(\\()', + bygroups(Name.Function, Text, Punctuation)), + (u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*', + String.Atom), # atom, characters + # This one includes ! + (u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+', + String.Atom), # atom, graphics + (r'[A-Z_]\w*', Name.Variable), + (u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text), + ], + 'nested-comment': [ + (r'\*/', Comment.Multiline, '#pop'), + (r'/\*', Comment.Multiline, '#push'), + (r'[^*/]+', Comment.Multiline), + (r'[*/]', Comment.Multiline), + ], + } + + def analyse_text(text): + return ':-' in text + + +class LogtalkLexer(RegexLexer): + """ + For `Logtalk `_ source code. + + .. versionadded:: 0.10 + """ + + name = 'Logtalk' + aliases = ['logtalk'] + filenames = ['*.lgt', '*.logtalk'] + mimetypes = ['text/x-logtalk'] + + tokens = { + 'root': [ + # Directives + (r'^\s*:-\s', Punctuation, 'directive'), + # Comments + (r'%.*?\n', Comment), + (r'/\*(.|\n)*?\*/', Comment), + # Whitespace + (r'\n', Text), + (r'\s+', Text), + # Numbers + (r"0'.", Number), + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), + # Variables + (r'([A-Z_]\w*)', Name.Variable), + # Event handlers + (r'(after|before)(?=[(])', Keyword), + # Message forwarding handler + (r'forward(?=[(])', Keyword), + # Execution-context methods + (r'(parameter|this|se(lf|nder))(?=[(])', Keyword), + # Reflection + (r'(current_predicate|predicate_property)(?=[(])', Keyword), + # DCGs and term expansion + (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword), + # Entity + (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword), + (r'(object|protocol|category)_property(?=[(])', Keyword), + # Entity relations + (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), + (r'extends_(object|protocol|category)(?=[(])', Keyword), + (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), + (r'(instantiat|specializ)es_class(?=[(])', Keyword), + # Events + (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), + # Flags + (r'(current|set)_logtalk_flag(?=[(])', Keyword), + # Compiling, loading, and library paths + (r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make)(?=[(])', Keyword), + (r'\blogtalk_make\b', Keyword), + # Database + (r'(clause|retract(all)?)(?=[(])', Keyword), + (r'a(bolish|ssert(a|z))(?=[(])', Keyword), + # Control constructs + (r'(ca(ll|tch)|throw)(?=[(])', Keyword), + (r'(fa(il|lse)|true)\b', Keyword), + # All solutions + (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), + # Multi-threading meta-predicates + (r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword), + # Term unification + (r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword), + # Term creation and decomposition + (r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword), + # Evaluable functors + (r'(rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword), + (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), + (r'(floor|truncate|round|ceiling)(?=[(])', Keyword), + # Other arithmetic functors + (r'(cos|a(cos|sin|tan)|exp|log|s(in|qrt))(?=[(])', Keyword), + # Term testing + (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|' + r'ground|acyclic_term)(?=[(])', Keyword), + # Term comparison + (r'compare(?=[(])', Keyword), + # Stream selection and control + (r'(curren|se)t_(in|out)put(?=[(])', Keyword), + (r'(open|close)(?=[(])', Keyword), + (r'flush_output(?=[(])', Keyword), + (r'(at_end_of_stream|flush_output)\b', Keyword), + (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword), + # Character and byte input/output + (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), + (r'\bnl\b', Keyword), + # Term input/output + (r'read(_term)?(?=[(])', Keyword), + (r'write(q|_(canonical|term))?(?=[(])', Keyword), + (r'(current_)?op(?=[(])', Keyword), + (r'(current_)?char_conversion(?=[(])', Keyword), + # Atomic term processing + (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), + (r'(char_code|sub_atom)(?=[(])', Keyword), + (r'number_c(har|ode)s(?=[(])', Keyword), + # Implementation defined hooks functions + (r'(se|curren)t_prolog_flag(?=[(])', Keyword), + (r'\bhalt\b', Keyword), + (r'halt(?=[(])', Keyword), + # Message sending operators + (r'(::|:|\^\^)', Operator), + # External call + (r'[{}]', Keyword), + # Logic and control + (r'(ignore|once)(?=[(])', Keyword), + (r'\brepeat\b', Keyword), + # Sorting + (r'(key)?sort(?=[(])', Keyword), + # Bitwise functors + (r'(>>|<<|/\\|\\\\|\\)', Operator), + # Predicate aliases + (r'\bas\b', Operator), + # Arithemtic evaluation + (r'\bis\b', Keyword), + # Arithemtic comparison + (r'(=:=|=\\=|<|=<|>=|>)', Operator), + # Term creation and decomposition + (r'=\.\.', Operator), + # Term unification + (r'(=|\\=)', Operator), + # Term comparison + (r'(==|\\==|@=<|@<|@>=|@>)', Operator), + # Evaluable functors + (r'(//|[-+*/])', Operator), + (r'\b(e|pi|mod|rem)\b', Operator), + # Other arithemtic functors + (r'\b\*\*\b', Operator), + # DCG rules + (r'-->', Operator), + # Control constructs + (r'([!;]|->)', Operator), + # Logic and control + (r'\\+', Operator), + # Mode operators + (r'[?@]', Operator), + # Existential quantifier + (r'\^', Operator), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # Ponctuation + (r'[()\[\],.|]', Text), + # Atoms + (r"[a-z]\w*", Text), + (r"'", String, 'quoted_atom'), + ], + + 'quoted_atom': [ + (r"''", String), + (r"'", String, '#pop'), + (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), + (r"[^\\'\n]+", String), + (r'\\', String), + ], + + 'directive': [ + # Conditional compilation directives + (r'(el)?if(?=[(])', Keyword, 'root'), + (r'(e(lse|ndif))[.]', Keyword, 'root'), + # Entity directives + (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), + (r'(end_(category|object|protocol))[.]', Keyword, 'root'), + # Predicate scope directives + (r'(public|protected|private)(?=[(])', Keyword, 'root'), + # Other directives + (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), + (r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'), + (r'(built_in|dynamic|synchronized|threaded)[.]', Keyword, 'root'), + (r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|' + r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'), + (r'op(?=[(])', Keyword, 'root'), + (r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'), + (r'[a-z]\w*(?=[(])', Text, 'root'), + (r'[a-z]\w*[.]', Text, 'root'), + ], + + 'entityrelations': [ + (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword), + # Numbers + (r"0'.", Number), + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), + # Variables + (r'([A-Z_]\w*)', Name.Variable), + # Atoms + (r"[a-z]\w*", Text), + (r"'", String, 'quoted_atom'), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # End of entity-opening directive + (r'([)]\.)', Text, 'root'), + # Scope operator + (r'(::)', Operator), + # Ponctuation + (r'[()\[\],.|]', Text), + # Comments + (r'%.*?\n', Comment), + (r'/\*(.|\n)*?\*/', Comment), + # Whitespace + (r'\n', Text), + (r'\s+', Text), + ] + } + + def analyse_text(text): + if ':- object(' in text: + return 1.0 + elif ':- protocol(' in text: + return 1.0 + elif ':- category(' in text: + return 1.0 + elif re.search('^:-\s[a-z]', text, re.M): + return 0.9 + else: + return 0.0 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/python.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/python.py new file mode 100644 index 0000000..259d1a9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/python.py @@ -0,0 +1,833 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.python + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Python and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \ + default, words, combined, do_insertions +from pygments.util import get_bool_opt, shebang_matches +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Other, Error +from pygments import unistring as uni + +__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', + 'Python3Lexer', 'Python3TracebackLexer', 'CythonLexer', + 'DgLexer', 'NumPyLexer'] + +line_re = re.compile('.*?\n') + + +class PythonLexer(RegexLexer): + """ + For `Python `_ source code. + """ + + name = 'Python' + aliases = ['python', 'py', 'sage'] + filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'] + mimetypes = ['text/x-python', 'application/x-python'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)), + (r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)), + (r'[^\S\n]+', Text), + (r'#.*$', Comment), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), + include('keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('builtins'), + include('backtick'), + ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), + ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), + ('[uU]?"""', String, combined('stringescape', 'tdqs')), + ("[uU]?'''", String, combined('stringescape', 'tsqs')), + ('[uU]?"', String, combined('stringescape', 'dqs')), + ("[uU]?'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', + 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass', + 'print', 'raise', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', + 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', + 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', + 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', + 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', + 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', + 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce', + 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', + 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', + 'unichr', 'unicode', 'vars', 'xrange', 'zip'), + prefix=r'(?`_ source code (version 3.0). + + .. versionadded:: 0.10 + """ + + name = 'Python 3' + aliases = ['python3', 'py3'] + filenames = [] # Nothing until Python 3 gets widespread + mimetypes = ['text/x-python3', 'application/x-python3'] + + flags = re.MULTILINE | re.UNICODE + + uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) + + tokens = PythonLexer.tokens.copy() + tokens['keywords'] = [ + (words(( + 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', + 'finally', 'for', 'global', 'if', 'lambda', 'pass', 'raise', + 'nonlocal', 'return', 'try', 'while', 'yield', 'yield from', 'as', + 'with', 'True', 'False', 'None'), suffix=r'\b'), + Keyword), + ] + tokens['builtins'] = [ + (words(( + '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes', + 'chr', 'classmethod', 'cmp', 'compile', 'complex', 'delattr', 'dict', + 'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format', + 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', + 'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list', + 'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct', + 'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed', + 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', + 'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?>> a = 'foo' + >>> print a + foo + >>> 1 / 0 + Traceback (most recent call last): + File "", line 1, in + ZeroDivisionError: integer division or modulo by zero + + Additional options: + + `python3` + Use Python 3 lexer for code. Default is ``False``. + + .. versionadded:: 1.0 + """ + name = 'Python console session' + aliases = ['pycon'] + mimetypes = ['text/x-python-doctest'] + + def __init__(self, **options): + self.python3 = get_bool_opt(options, 'python3', False) + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + if self.python3: + pylexer = Python3Lexer(**self.options) + tblexer = Python3TracebackLexer(**self.options) + else: + pylexer = PythonLexer(**self.options) + tblexer = PythonTracebackLexer(**self.options) + + curcode = '' + insertions = [] + curtb = '' + tbindex = 0 + tb = 0 + for match in line_re.finditer(text): + line = match.group() + if line.startswith(u'>>> ') or line.startswith(u'... '): + tb = 0 + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:4])])) + curcode += line[4:] + elif line.rstrip() == u'...' and not tb: + # only a new >>> prompt can end an exception block + # otherwise an ellipsis in place of the traceback frames + # will be mishandled + insertions.append((len(curcode), + [(0, Generic.Prompt, u'...')])) + curcode += line[3:] + else: + if curcode: + for item in do_insertions( + insertions, pylexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + if (line.startswith(u'Traceback (most recent call last):') or + re.match(u' File "[^"]+", line \\d+\\n$', line)): + tb = 1 + curtb = line + tbindex = match.start() + elif line == 'KeyboardInterrupt\n': + yield match.start(), Name.Class, line + elif tb: + curtb += line + if not (line.startswith(' ') or line.strip() == u'...'): + tb = 0 + for i, t, v in tblexer.get_tokens_unprocessed(curtb): + yield tbindex+i, t, v + else: + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions(insertions, + pylexer.get_tokens_unprocessed(curcode)): + yield item + if curtb: + for i, t, v in tblexer.get_tokens_unprocessed(curtb): + yield tbindex+i, t, v + + +class PythonTracebackLexer(RegexLexer): + """ + For Python tracebacks. + + .. versionadded:: 0.7 + """ + + name = 'Python Traceback' + aliases = ['pytb'] + filenames = ['*.pytb'] + mimetypes = ['text/x-python-traceback'] + + tokens = { + 'root': [ + (r'^Traceback \(most recent call last\):\n', + Generic.Traceback, 'intb'), + # SyntaxError starts with this. + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text)), + (r'^( )(.+)(\n)', + bygroups(Text, using(PythonLexer), Text)), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Text)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Text), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Text), '#pop') + ], + } + + +class Python3TracebackLexer(RegexLexer): + """ + For Python 3.0 tracebacks, with support for chained exceptions. + + .. versionadded:: 1.0 + """ + + name = 'Python 3.0 Traceback' + aliases = ['py3tb'] + filenames = ['*.py3tb'] + mimetypes = ['text/x-python3-traceback'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), + (r'^During handling of the above exception, another ' + r'exception occurred:\n\n', Generic.Traceback), + (r'^The above exception was the direct cause of the ' + r'following exception:\n\n', Generic.Traceback), + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text)), + (r'^( )(.+)(\n)', + bygroups(Text, using(Python3Lexer), Text)), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Text)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Text), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Text), '#pop') + ], + } + + +class CythonLexer(RegexLexer): + """ + For Pyrex and `Cython `_ source code. + + .. versionadded:: 1.1 + """ + + name = 'Cython' + aliases = ['cython', 'pyx', 'pyrex'] + filenames = ['*.pyx', '*.pxd', '*.pxi'] + mimetypes = ['text/x-cython', 'application/x-cython'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)), + (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)), + (r'[^\S\n]+', Text), + (r'#.*$', Comment), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'(<)([a-zA-Z0-9.?]+)(>)', + bygroups(Punctuation, Keyword.Type, Punctuation)), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), + (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', + bygroups(Keyword, Number.Integer, Operator, Name, Operator, + Name, Punctuation)), + include('keywords'), + (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), + (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), + (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), + (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), + include('builtins'), + include('backtick'), + ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), + ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), + ('[uU]?"""', String, combined('stringescape', 'tdqs')), + ("[uU]?'''", String, combined('stringescape', 'tsqs')), + ('[uU]?"', String, combined('stringescape', 'dqs')), + ("[uU]?'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', + 'else', 'except', 'except?', 'exec', 'finally', 'for', 'gil', + 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), + Keyword), + (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', + 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', + 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', + 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', + 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', + 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', + 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', + 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', + 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', + 'vars', 'xrange', 'zip'), prefix=r'(?`_, + a functional and object-oriented programming language + running on the CPython 3 VM. + + .. versionadded:: 1.6 + """ + name = 'dg' + aliases = ['dg'] + filenames = ['*.dg'] + mimetypes = ['text/x-dg'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?$', Comment.Single), + + (r'(?i)0b[01]+', Number.Bin), + (r'(?i)0o[0-7]+', Number.Oct), + (r'(?i)0x[0-9a-f]+', Number.Hex), + (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float), + (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float), + (r'(?i)[+-]?[0-9]+j?', Number.Integer), + + (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')), + (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')), + (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')), + (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')), + + (r"`\w+'*`", Operator), + (r'\b(and|in|is|or|where)\b', Operator.Word), + (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator), + + (words(( + 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'', + 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object', + 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', 'super', + 'tuple', 'tuple\'', 'type'), prefix=r'(?') or line.startswith('+'): + # Colorize the prompt as such, + # then put rest of line into current_code_block + insertions.append((len(current_code_block), + [(0, Generic.Prompt, line[:2])])) + current_code_block += line[2:] + else: + # We have reached a non-prompt line! + # If we have stored prompt lines, need to process them first. + if current_code_block: + # Weave together the prompts and highlight code. + for item in do_insertions( + insertions, slexer.get_tokens_unprocessed(current_code_block)): + yield item + # Reset vars for next code block. + current_code_block = '' + insertions = [] + # Now process the actual line itself, this is output from R. + yield match.start(), Generic.Output, line + + # If we happen to end on a code block with nothing after it, need to + # process the last code block. This is neither elegant nor DRY so + # should be changed. + if current_code_block: + for item in do_insertions( + insertions, slexer.get_tokens_unprocessed(current_code_block)): + yield item + + +class SLexer(RegexLexer): + """ + For S, S-plus, and R source code. + + .. versionadded:: 0.10 + """ + + name = 'S' + aliases = ['splus', 's', 'r'] + filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'] + mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', + 'text/x-R', 'text/x-r-history', 'text/x-r-profile'] + + builtins_base = ( + 'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE', + 'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf', + 'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame', + 'Math.difftime', 'Math.factor', 'Mod', 'NA_character_', + 'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN', + 'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame', + 'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered', + 'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string', + 'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall', + 'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt', + 'Summary.data.frame', 'Summary.difftime', 'Summary.factor', + 'Summary.numeric_version', 'Summary.ordered', 'Sys.Date', + 'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid', + 'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink', + 'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep', + 'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv', + 'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs', + 'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist', + 'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character', + 'all.equal.default', 'all.equal.factor', 'all.equal.formula', + 'all.equal.language', 'all.equal.list', 'all.equal.numeric', + 'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated', + 'anyDuplicated.array', 'anyDuplicated.data.frame', + 'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm', + 'aperm.default', 'aperm.table', 'append', 'apply', 'args', + 'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt', + 'as.Date.character', 'as.Date.date', 'as.Date.dates', + 'as.Date.default', 'as.Date.factor', 'as.Date.numeric', + 'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt', + 'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default', + 'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date', + 'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date', + 'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor', + 'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call', + 'as.character', 'as.character.Date', 'as.character.POSIXt', + 'as.character.condition', 'as.character.default', + 'as.character.error', 'as.character.factor', 'as.character.hexmode', + 'as.character.numeric_version', 'as.character.octmode', + 'as.character.srcref', 'as.complex', 'as.data.frame', + 'as.data.frame.AsIs', 'as.data.frame.Date', 'as.data.frame.POSIXct', + 'as.data.frame.POSIXlt', 'as.data.frame.array', + 'as.data.frame.character', 'as.data.frame.complex', + 'as.data.frame.data.frame', 'as.data.frame.default', + 'as.data.frame.difftime', 'as.data.frame.factor', + 'as.data.frame.integer', 'as.data.frame.list', + 'as.data.frame.logical', 'as.data.frame.matrix', + 'as.data.frame.model.matrix', 'as.data.frame.numeric', + 'as.data.frame.numeric_version', 'as.data.frame.ordered', + 'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts', + 'as.data.frame.vector', 'as.difftime', 'as.double', + 'as.double.POSIXlt', 'as.double.difftime', 'as.environment', + 'as.expression', 'as.expression.default', 'as.factor', + 'as.function', 'as.function.default', 'as.hexmode', 'as.integer', + 'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame', + 'as.list.default', 'as.list.environment', 'as.list.factor', + 'as.list.function', 'as.list.numeric_version', 'as.logical', + 'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt', + 'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote', + 'as.name', 'as.null', 'as.null.default', 'as.numeric', + 'as.numeric_version', 'as.octmode', 'as.ordered', + 'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single', + 'as.single.default', 'as.symbol', 'as.table', 'as.table.default', + 'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4', + 'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh', + 'attachNamespace', 'attr', 'attr.all.equal', 'attributes', + 'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename', + 'besselI', 'besselJ', 'besselK', 'besselY', 'beta', + 'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd', + 'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body', + 'bquote', 'browser', 'browserCondition', 'browserSetDebug', + 'browserText', 'builtins', 'by', 'by.data.frame', 'by.default', + 'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote', + 'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold', + 'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling', + 'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones', + 'chol', 'chol.default', 'chol2inv', 'choose', 'class', + 'clearPushBack', 'close', 'close.connection', 'close.srcfile', + 'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans', + 'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts', + 'conditionCall', 'conditionCall.condition', 'conditionMessage', + 'conditionMessage.condition', 'conflicts', 'contributors', 'cos', + 'cosh', 'crossprod', 'cummax', 'cummin', 'cumprod', 'cumsum', 'cut', + 'cut.Date', 'cut.POSIXt', 'cut.default', 'dQuote', 'data.class', + 'data.matrix', 'date', 'debug', 'debugonce', + 'default.stringsAsFactors', 'delayedAssign', 'deparse', 'det', + 'determinant', 'determinant.matrix', 'dget', 'diag', 'diff', + 'diff.Date', 'diff.POSIXt', 'diff.default', 'difftime', 'digamma', + 'dim', 'dim.data.frame', 'dimnames', 'dimnames.data.frame', 'dir', + 'dir.create', 'dirname', 'do.call', 'dput', 'drop', 'droplevels', + 'droplevels.data.frame', 'droplevels.factor', 'dump', 'duplicated', + 'duplicated.POSIXlt', 'duplicated.array', 'duplicated.data.frame', + 'duplicated.default', 'duplicated.matrix', + 'duplicated.numeric_version', 'dyn.load', 'dyn.unload', 'eapply', + 'eigen', 'else', 'emptyenv', 'enc2native', 'enc2utf8', + 'encodeString', 'enquote', 'env.profile', 'environment', + 'environmentIsLocked', 'environmentName', 'eval', 'eval.parent', + 'evalq', 'exists', 'exp', 'expand.grid', 'expm1', 'expression', + 'factor', 'factorial', 'fifo', 'file', 'file.access', 'file.append', + 'file.choose', 'file.copy', 'file.create', 'file.exists', + 'file.info', 'file.link', 'file.path', 'file.remove', 'file.rename', + 'file.show', 'file.symlink', 'find.package', 'findInterval', + 'findPackageEnv', 'findRestart', 'floor', 'flush', + 'flush.connection', 'force', 'formals', 'format', + 'format.AsIs', 'format.Date', 'format.POSIXct', 'format.POSIXlt', + 'format.data.frame', 'format.default', 'format.difftime', + 'format.factor', 'format.hexmode', 'format.info', + 'format.libraryIQR', 'format.numeric_version', 'format.octmode', + 'format.packageInfo', 'format.pval', 'format.summaryDefault', + 'formatC', 'formatDL', 'forwardsolve', 'gamma', 'gc', 'gc.time', + 'gcinfo', 'gctorture', 'gctorture2', 'get', 'getAllConnections', + 'getCallingDLL', 'getCallingDLLe', 'getConnection', + 'getDLLRegisteredRoutines', 'getDLLRegisteredRoutines.DLLInfo', + 'getDLLRegisteredRoutines.character', 'getElement', + 'getExportedValue', 'getHook', 'getLoadedDLLs', 'getNamespace', + 'getNamespaceExports', 'getNamespaceImports', 'getNamespaceInfo', + 'getNamespaceName', 'getNamespaceUsers', 'getNamespaceVersion', + 'getNativeSymbolInfo', 'getOption', 'getRversion', 'getSrcLines', + 'getTaskCallbackNames', 'geterrmessage', 'gettext', 'gettextf', + 'getwd', 'gl', 'globalenv', 'gregexpr', 'grep', 'grepRaw', 'grepl', + 'gsub', 'gzcon', 'gzfile', 'head', 'iconv', 'iconvlist', + 'icuSetCollate', 'identical', 'identity', 'ifelse', 'importIntoEnv', + 'in', 'inherits', 'intToBits', 'intToUtf8', 'interaction', 'interactive', + 'intersect', 'inverse.rle', 'invisible', 'invokeRestart', + 'invokeRestartInteractively', 'is.R', 'is.array', 'is.atomic', + 'is.call', 'is.character', 'is.complex', 'is.data.frame', + 'is.double', 'is.element', 'is.environment', 'is.expression', + 'is.factor', 'is.finite', 'is.function', 'is.infinite', + 'is.integer', 'is.language', 'is.list', 'is.loaded', 'is.logical', + 'is.matrix', 'is.na', 'is.na.POSIXlt', 'is.na.data.frame', + 'is.na.numeric_version', 'is.name', 'is.nan', 'is.null', + 'is.numeric', 'is.numeric.Date', 'is.numeric.POSIXt', + 'is.numeric.difftime', 'is.numeric_version', 'is.object', + 'is.ordered', 'is.package_version', 'is.pairlist', 'is.primitive', + 'is.qr', 'is.raw', 'is.recursive', 'is.single', 'is.symbol', + 'is.table', 'is.unsorted', 'is.vector', 'isBaseNamespace', + 'isIncomplete', 'isNamespace', 'isOpen', 'isRestart', 'isS4', + 'isSeekable', 'isSymmetric', 'isSymmetric.matrix', 'isTRUE', + 'isatty', 'isdebugged', 'jitter', 'julian', 'julian.Date', + 'julian.POSIXt', 'kappa', 'kappa.default', 'kappa.lm', 'kappa.qr', + 'kronecker', 'l10n_info', 'labels', 'labels.default', 'lapply', + 'lazyLoad', 'lazyLoadDBexec', 'lazyLoadDBfetch', 'lbeta', 'lchoose', + 'length', 'length.POSIXlt', 'letters', 'levels', 'levels.default', + 'lfactorial', 'lgamma', 'library.dynam', 'library.dynam.unload', + 'licence', 'license', 'list.dirs', 'list.files', 'list2env', 'load', + 'loadNamespace', 'loadedNamespaces', 'loadingNamespaceInfo', + 'local', 'lockBinding', 'lockEnvironment', 'log', 'log10', 'log1p', + 'log2', 'logb', 'lower.tri', 'ls', 'make.names', 'make.unique', + 'makeActiveBinding', 'mapply', 'margin.table', 'mat.or.vec', + 'match', 'match.arg', 'match.call', 'match.fun', 'max', 'max.col', + 'mean', 'mean.Date', 'mean.POSIXct', 'mean.POSIXlt', 'mean.default', + 'mean.difftime', 'mem.limits', 'memCompress', 'memDecompress', + 'memory.profile', 'merge', 'merge.data.frame', 'merge.default', + 'message', 'mget', 'min', 'missing', 'mode', 'month.abb', + 'month.name', 'months', 'months.Date', 'months.POSIXt', + 'months.abb', 'months.nameletters', 'names', 'names.POSIXlt', + 'namespaceExport', 'namespaceImport', 'namespaceImportClasses', + 'namespaceImportFrom', 'namespaceImportMethods', 'nargs', 'nchar', + 'ncol', 'new.env', 'ngettext', 'nlevels', 'noquote', 'norm', + 'normalizePath', 'nrow', 'numeric_version', 'nzchar', 'objects', + 'oldClass', 'on.exit', 'open', 'open.connection', 'open.srcfile', + 'open.srcfilealias', 'open.srcfilecopy', 'options', 'order', + 'ordered', 'outer', 'packBits', 'packageEvent', + 'packageHasNamespace', 'packageStartupMessage', 'package_version', + 'pairlist', 'parent.env', 'parent.frame', 'parse', + 'parseNamespaceFile', 'paste', 'paste0', 'path.expand', + 'path.package', 'pipe', 'pmatch', 'pmax', 'pmax.int', 'pmin', + 'pmin.int', 'polyroot', 'pos.to.env', 'pretty', 'pretty.default', + 'prettyNum', 'print', 'print.AsIs', 'print.DLLInfo', + 'print.DLLInfoList', 'print.DLLRegisteredRoutines', 'print.Date', + 'print.NativeRoutineList', 'print.POSIXct', 'print.POSIXlt', + 'print.by', 'print.condition', 'print.connection', + 'print.data.frame', 'print.default', 'print.difftime', + 'print.factor', 'print.function', 'print.hexmode', + 'print.libraryIQR', 'print.listof', 'print.noquote', + 'print.numeric_version', 'print.octmode', 'print.packageInfo', + 'print.proc_time', 'print.restart', 'print.rle', + 'print.simple.list', 'print.srcfile', 'print.srcref', + 'print.summary.table', 'print.summaryDefault', 'print.table', + 'print.warnings', 'prmatrix', 'proc.time', 'prod', 'prop.table', + 'provideDimnames', 'psigamma', 'pushBack', 'pushBackLength', 'q', + 'qr', 'qr.Q', 'qr.R', 'qr.X', 'qr.coef', 'qr.default', 'qr.fitted', + 'qr.qty', 'qr.qy', 'qr.resid', 'qr.solve', 'quarters', + 'quarters.Date', 'quarters.POSIXt', 'quit', 'quote', 'range', + 'range.default', 'rank', 'rapply', 'raw', 'rawConnection', + 'rawConnectionValue', 'rawShift', 'rawToBits', 'rawToChar', 'rbind', + 'rbind.data.frame', 'rcond', 'read.dcf', 'readBin', 'readChar', + 'readLines', 'readRDS', 'readRenviron', 'readline', 'reg.finalizer', + 'regexec', 'regexpr', 'registerS3method', 'registerS3methods', + 'regmatches', 'remove', 'removeTaskCallback', 'rep', 'rep.Date', + 'rep.POSIXct', 'rep.POSIXlt', 'rep.factor', 'rep.int', + 'rep.numeric_version', 'rep_len', 'replace', 'replicate', + 'requireNamespace', 'restartDescription', 'restartFormals', + 'retracemem', 'rev', 'rev.default', 'rle', 'rm', 'round', + 'round.Date', 'round.POSIXt', 'row', 'row.names', + 'row.names.data.frame', 'row.names.default', 'rowMeans', 'rowSums', + 'rownames', 'rowsum', 'rowsum.data.frame', 'rowsum.default', + 'sQuote', 'sample', 'sample.int', 'sapply', 'save', 'save.image', + 'saveRDS', 'scale', 'scale.default', 'scan', 'search', + 'searchpaths', 'seek', 'seek.connection', 'seq', 'seq.Date', + 'seq.POSIXt', 'seq.default', 'seq.int', 'seq_along', 'seq_len', + 'sequence', 'serialize', 'set.seed', 'setHook', 'setNamespaceInfo', + 'setSessionTimeLimit', 'setTimeLimit', 'setdiff', 'setequal', + 'setwd', 'shQuote', 'showConnections', 'sign', 'signalCondition', + 'signif', 'simpleCondition', 'simpleError', 'simpleMessage', + 'simpleWarning', 'simplify2array', 'sin', 'single', + 'sinh', 'sink', 'sink.number', 'slice.index', 'socketConnection', + 'socketSelect', 'solve', 'solve.default', 'solve.qr', 'sort', + 'sort.POSIXlt', 'sort.default', 'sort.int', 'sort.list', 'split', + 'split.Date', 'split.POSIXct', 'split.data.frame', 'split.default', + 'sprintf', 'sqrt', 'srcfile', 'srcfilealias', 'srcfilecopy', + 'srcref', 'standardGeneric', 'stderr', 'stdin', 'stdout', 'stop', + 'stopifnot', 'storage.mode', 'strftime', 'strptime', 'strsplit', + 'strtoi', 'strtrim', 'structure', 'strwrap', 'sub', 'subset', + 'subset.data.frame', 'subset.default', 'subset.matrix', + 'substitute', 'substr', 'substring', 'sum', 'summary', + 'summary.Date', 'summary.POSIXct', 'summary.POSIXlt', + 'summary.connection', 'summary.data.frame', 'summary.default', + 'summary.factor', 'summary.matrix', 'summary.proc_time', + 'summary.srcfile', 'summary.srcref', 'summary.table', + 'suppressMessages', 'suppressPackageStartupMessages', + 'suppressWarnings', 'svd', 'sweep', 'sys.call', 'sys.calls', + 'sys.frame', 'sys.frames', 'sys.function', 'sys.load.image', + 'sys.nframe', 'sys.on.exit', 'sys.parent', 'sys.parents', + 'sys.save.image', 'sys.source', 'sys.status', 'system', + 'system.file', 'system.time', 'system2', 't', 't.data.frame', + 't.default', 'table', 'tabulate', 'tail', 'tan', 'tanh', 'tapply', + 'taskCallbackManager', 'tcrossprod', 'tempdir', 'tempfile', + 'testPlatformEquivalence', 'textConnection', 'textConnectionValue', + 'toString', 'toString.default', 'tolower', 'topenv', 'toupper', + 'trace', 'traceback', 'tracemem', 'tracingState', 'transform', + 'transform.data.frame', 'transform.default', 'trigamma', 'trunc', + 'trunc.Date', 'trunc.POSIXt', 'truncate', 'truncate.connection', + 'try', 'tryCatch', 'typeof', 'unclass', 'undebug', 'union', + 'unique', 'unique.POSIXlt', 'unique.array', 'unique.data.frame', + 'unique.default', 'unique.matrix', 'unique.numeric_version', + 'units', 'units.difftime', 'unix.time', 'unlink', 'unlist', + 'unloadNamespace', 'unlockBinding', 'unname', 'unserialize', + 'unsplit', 'untrace', 'untracemem', 'unz', 'upper.tri', 'url', + 'utf8ToInt', 'vapply', 'version', 'warning', 'warnings', 'weekdays', + 'weekdays.Date', 'weekdays.POSIXt', 'which', 'which.max', + 'which.min', 'with', 'with.default', 'withCallingHandlers', + 'withRestarts', 'withVisible', 'within', 'within.data.frame', + 'within.list', 'write', 'write.dcf', 'writeBin', 'writeChar', + 'writeLines', 'xor', 'xor.hexmode', 'xor.octmode', + 'xpdrows.data.frame', 'xtfrm', 'xtfrm.AsIs', 'xtfrm.Date', + 'xtfrm.POSIXct', 'xtfrm.POSIXlt', 'xtfrm.Surv', 'xtfrm.default', + 'xtfrm.difftime', 'xtfrm.factor', 'xtfrm.numeric_version', 'xzfile', + 'zapsmall' + ) + + tokens = { + 'comments': [ + (r'#.*$', Comment.Single), + ], + 'valid_name': [ + (r'[a-zA-Z][\w.]*', Text), + # can begin with ., but not if that is followed by a digit + (r'\.[a-zA-Z_][\w.]*', Text), + ], + 'punctuation': [ + (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation), + ], + 'keywords': [ + (words(builtins_base, suffix=r'(?![\w. =])'), + Keyword.Pseudo), + (r'(if|else|for|while|repeat|in|next|break|return|switch|function)' + r'(?![\w.])', + Keyword.Reserved), + (r'(array|category|character|complex|double|function|integer|list|' + r'logical|matrix|numeric|vector|data.frame|c)' + r'(?![\w.])', + Keyword.Type), + (r'(library|require|attach|detach|source)' + r'(?![\w.])', + Keyword.Namespace) + ], + 'operators': [ + (r'<>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator), + (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator) + ], + 'builtin_symbols': [ + (r'(NULL|NA(_(integer|real|complex|character)_)?|' + r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))' + r'(?![\w.])', + Keyword.Constant), + (r'(T|F)\b', Name.Builtin.Pseudo), + ], + 'numbers': [ + # hex number + (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex), + # decimal number + (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?', + Number), + ], + 'statements': [ + include('comments'), + # whitespaces + (r'\s+', Text), + (r'`.*?`', String.Backtick), + (r'\'', String, 'string_squote'), + (r'\"', String, 'string_dquote'), + include('builtin_symbols'), + include('numbers'), + include('keywords'), + include('punctuation'), + include('operators'), + include('valid_name'), + ], + 'root': [ + include('statements'), + # blocks: + (r'\{|\}', Punctuation), + # (r'\{', Punctuation, 'block'), + (r'.', Text), + ], + # 'block': [ + # include('statements'), + # ('\{', Punctuation, '#push'), + # ('\}', Punctuation, '#pop') + # ], + 'string_squote': [ + (r'([^\'\\]|\\.)*\'', String, '#pop'), + ], + 'string_dquote': [ + (r'([^"\\]|\\.)*"', String, '#pop'), + ], + } + + def analyse_text(text): + if re.search(r'[a-z0-9_\])\s]<-(?!-)', text): + return 0.11 + + +class RdLexer(RegexLexer): + """ + Pygments Lexer for R documentation (Rd) files + + This is a very minimal implementation, highlighting little more + than the macros. A description of Rd syntax is found in `Writing R + Extensions `_ + and `Parsing Rd files `_. + + .. versionadded:: 1.6 + """ + name = 'Rd' + aliases = ['rd'] + filenames = ['*.Rd'] + mimetypes = ['text/x-r-doc'] + + # To account for verbatim / LaTeX-like / and R-like areas + # would require parsing. + tokens = { + 'root': [ + # catch escaped brackets and percent sign + (r'\\[\\{}%]', String.Escape), + # comments + (r'%.*$', Comment), + # special macros with no arguments + (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant), + # macros + (r'\\[a-zA-Z]+\b', Keyword), + # special preprocessor macros + (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc), + # non-escaped brackets + (r'[{}]', Name.Builtin), + # everything else + (r'[^\\%\n{}]+', Text), + (r'.', Text), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rdf.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rdf.py new file mode 100644 index 0000000..292b1ae --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rdf.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.rdf + ~~~~~~~~~~~~~~~~~~~ + + Lexers for semantic web and RDF query languages and markup. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, default +from pygments.token import Keyword, Punctuation, String, Number, Operator, \ + Whitespace, Name, Literal, Comment, Text + +__all__ = ['SparqlLexer'] + + +class SparqlLexer(RegexLexer): + """ + Lexer for `SPARQL `_ query language. + + .. versionadded:: 2.0 + """ + name = 'SPARQL' + aliases = ['sparql'] + filenames = ['*.rq', '*.sparql'] + mimetypes = ['application/sparql-query'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'\s+', Whitespace), + (r'(select|construct|describe|ask|where|filter|group\s+by|minus|' + r'distinct|reduced|from named|from|order\s+by|limit|' + r'offset|bindings|load|clear|drop|create|add|move|copy|' + r'insert\s+data|delete\s+data|delete\s+where|delete|insert|' + r'using named|using|graph|default|named|all|optional|service|' + r'silent|bind|union|not in|in|as|a)', Keyword), + (r'(prefix|base)(\s+)([a-z][\w-]*)(\s*)(\:)', + bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, + Punctuation)), + (r'\?[a-z_]\w*', Name.Variable), + (r'<[^>]+>', Name.Label), + (r'([a-z][\w-]*)(\:)([a-z][\w-]*)', + bygroups(Name.Namespace, Punctuation, Name.Tag)), + (r'(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|' + r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|' + r'contains|strstarts|strends|strbefore|strafter|year|month|day|' + r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|' + r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|' + r'isliteral|isnumeric|regex|substr|replace|exists|not exists|' + r'count|sum|min|max|avg|sample|group_concat|separator)\b', + Name.Function), + (r'(true|false)', Literal), + (r'[+\-]?\d*\.\d+', Number.Float), + (r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float), + (r'[+\-]?\d+', Number.Integer), + (r'(\|\||&&|=|\*|\-|\+|/)', Operator), + (r'[(){}.;,:^]', Punctuation), + (r'#[^\n]+', Comment), + (r'"""', String, 'triple-double-quoted-string'), + (r'"', String, 'single-double-quoted-string'), + (r"'''", String, 'triple-single-quoted-string'), + (r"'", String, 'single-single-quoted-string'), + ], + 'triple-double-quoted-string': [ + (r'"""', String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-double-quoted-string': [ + (r'"', String, 'end-of-string'), + (r'[^"\\\n]+', String), + (r'\\', String, 'string-escape'), + ], + 'triple-single-quoted-string': [ + (r"'''", String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-single-quoted-string': [ + (r"'", String, 'end-of-string'), + (r"[^'\\\n]+", String), + (r'\\', String, 'string-escape'), + ], + 'string-escape': [ + (r'.', String, '#pop'), + ], + 'end-of-string': [ + (r'(@)([a-z]+(:?-[a-z0-9]+)*)', + bygroups(Operator, Name.Function), '#pop:2'), + (r'\^\^', Operator, '#pop:2'), + default('#pop:2'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rebol.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rebol.py new file mode 100644 index 0000000..8ff97ee --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rebol.py @@ -0,0 +1,431 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.rebol + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the REBOL and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Generic, Whitespace + +__all__ = ['RebolLexer', 'RedLexer'] + + +class RebolLexer(RegexLexer): + """ + A `REBOL `_ lexer. + + .. versionadded:: 1.1 + """ + name = 'REBOL' + aliases = ['rebol'] + filenames = ['*.r', '*.r3', '*.reb'] + mimetypes = ['text/x-rebol'] + + flags = re.IGNORECASE | re.MULTILINE + + escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' + + def word_callback(lexer, match): + word = match.group() + + if re.match(".*:$", word): + yield match.start(), Generic.Subheading, word + elif re.match( + r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' + r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' + r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' + r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' + r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' + r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' + r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' + r'while|compress|decompress|secure|open|close|read|read-io|' + r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' + r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' + r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' + r'browse|launch|stats|get-modes|set-modes|to-local-file|' + r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' + r'hide|draw|show|size-text|textinfo|offset-to-caret|' + r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' + r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' + r'dsa-make-key|dsa-generate-key|dsa-make-signature|' + r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' + r'rsa-encrypt)$', word): + yield match.start(), Name.Builtin, word + elif re.match( + r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' + r'minimum|maximum|negate|complement|absolute|random|head|tail|' + r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' + r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' + r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' + r'copy)$', word): + yield match.start(), Name.Function, word + elif re.match( + r'(error|source|input|license|help|install|echo|Usage|with|func|' + r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' + r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' + r'remold|charset|array|replace|move|extract|forskip|forall|alter|' + r'first+|also|take|for|forever|dispatch|attempt|what-dir|' + r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' + r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' + r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' + r'write-user|save-user|set-user-name|protect-system|parse-xml|' + r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' + r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' + r'request-dir|center-face|do-events|net-error|decode-url|' + r'parse-header|parse-header-date|parse-email-addrs|import-email|' + r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' + r'find-key-face|do-face|viewtop|confine|find-window|' + r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' + r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' + r'read-thru|load-thru|do-thru|launch-thru|load-image|' + r'request-download|do-face-alt|set-font|set-para|get-style|' + r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' + r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' + r'resize-face|load-stock|load-stock-block|notify|request|flash|' + r'request-color|request-pass|request-text|request-list|' + r'request-date|request-file|dbug|editor|link-relative-path|' + r'emailer|parse-error)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match( + r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' + r'return|exit|break)$', word): + yield match.start(), Name.Exception, word + elif re.match('REBOL$', word): + yield match.start(), Generic.Heading, word + elif re.match("to-.*", word): + yield match.start(), Keyword, word + elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', + word): + yield match.start(), Operator, word + elif re.match(".*\?$", word): + yield match.start(), Keyword, word + elif re.match(".*\!$", word): + yield match.start(), Keyword.Type, word + elif re.match("'.*", word): + yield match.start(), Name.Variable.Instance, word # lit-word + elif re.match("#.*", word): + yield match.start(), Name.Label, word # issue + elif re.match("%.*", word): + yield match.start(), Name.Decorator, word # file + else: + yield match.start(), Name.Variable, word + + tokens = { + 'root': [ + (r'[^R]+', Comment), + (r'REBOL\s+\[', Generic.Strong, 'script'), + (r'R', Comment) + ], + 'script': [ + (r'\s+', Text), + (r'#"', String.Char, 'char'), + (r'#\{[0-9a-f]*\}', Number.Hex), + (r'2#\{', Number.Hex, 'bin2'), + (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), + (r'"', String, 'string'), + (r'\{', String, 'string2'), + (r';#+.*\n', Comment.Special), + (r';\*+.*\n', Comment.Preproc), + (r';.*\n', Comment), + (r'%"', Name.Decorator, 'stringFile'), + (r'%[^(^{")\s\[\]]+', Name.Decorator), + (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money + (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time + (r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?' + r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date + (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple + (r'\d+X\d+', Keyword.Constant), # pair + (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), + (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), + (r'[+-]?\d+(\'\d+)?', Number), + (r'[\[\]()]', Generic.Strong), + (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url + (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url + (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email + (r'comment\s"', Comment, 'commentString1'), + (r'comment\s\{', Comment, 'commentString2'), + (r'comment\s\[', Comment, 'commentBlock'), + (r'comment\s[^(\s{"\[]+', Comment), + (r'/[^(^{")\s/[\]]*', Name.Attribute), + (r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), + (r'<[\w:.-]*>', Name.Tag), + (r'<[^(<>\s")]+', Name.Tag, 'tag'), + (r'([^(^{")\s]+)', Text), + ], + 'string': [ + (r'[^(^")]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'"', String, '#pop'), + ], + 'string2': [ + (r'[^(^{})]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + 'stringFile': [ + (r'[^(^")]+', Name.Decorator), + (escape_re, Name.Decorator), + (r'\^.', Name.Decorator), + (r'"', Name.Decorator, '#pop'), + ], + 'char': [ + (escape_re + '"', String.Char, '#pop'), + (r'\^."', String.Char, '#pop'), + (r'."', String.Char, '#pop'), + ], + 'tag': [ + (escape_re, Name.Tag), + (r'"', Name.Tag, 'tagString'), + (r'[^(<>\r\n")]+', Name.Tag), + (r'>', Name.Tag, '#pop'), + ], + 'tagString': [ + (r'[^(^")]+', Name.Tag), + (escape_re, Name.Tag), + (r'[(|)]+', Name.Tag), + (r'\^.', Name.Tag), + (r'"', Name.Tag, '#pop'), + ], + 'tuple': [ + (r'(\d+\.)+', Keyword.Constant), + (r'\d+', Keyword.Constant, '#pop'), + ], + 'bin2': [ + (r'\s+', Number.Hex), + (r'([01]\s*){8}', Number.Hex), + (r'\}', Number.Hex, '#pop'), + ], + 'commentString1': [ + (r'[^(^")]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'"', Comment, '#pop'), + ], + 'commentString2': [ + (r'[^(^{})]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'\{', Comment, '#push'), + (r'\}', Comment, '#pop'), + ], + 'commentBlock': [ + (r'\[', Comment, '#push'), + (r'\]', Comment, '#pop'), + (r'"', Comment, "commentString1"), + (r'\{', Comment, "commentString2"), + (r'[^(\[\]"{)]+', Comment), + ], + } + + def analyse_text(text): + """ + Check if code contains REBOL header and so it probably not R code + """ + if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): + # The code starts with REBOL header + return 1.0 + elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE): + # The code contains REBOL header but also some text before it + return 0.5 + + +class RedLexer(RegexLexer): + """ + A `Red-language `_ lexer. + + .. versionadded:: 2.0 + """ + name = 'Red' + aliases = ['red', 'red/system'] + filenames = ['*.red', '*.reds'] + mimetypes = ['text/x-red', 'text/x-red-system'] + + flags = re.IGNORECASE | re.MULTILINE + + escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' + + def word_callback(lexer, match): + word = match.group() + + if re.match(".*:$", word): + yield match.start(), Generic.Subheading, word + elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|' + r'foreach|forall|func|function|does|has|switch|' + r'case|reduce|compose|get|set|print|prin|equal\?|' + r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|' + r'greater-or-equal\?|same\?|not|type\?|stats|' + r'bind|union|replace|charset|routine)$', word): + yield match.start(), Name.Builtin, word + elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|' + r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|' + r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|' + r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|' + r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|' + r'update|write)$', word): + yield match.start(), Name.Function, word + elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|' + r'none|crlf|dot|null-byte)$', word): + yield match.start(), Name.Builtin.Pseudo, word + elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|' + r'#switch|#default|#get-definition)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|' + r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|' + r'quote|forever)$', word): + yield match.start(), Name.Exception, word + elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|' + r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|' + r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|' + r'any-struct\?|none\?|word\?|any-series\?)$', word): + yield match.start(), Keyword, word + elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match("to-.*", word): + yield match.start(), Keyword, word + elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|' + '<<<|>>>|<<|>>|<|>%)$', word): + yield match.start(), Operator, word + elif re.match(".*\!$", word): + yield match.start(), Keyword.Type, word + elif re.match("'.*", word): + yield match.start(), Name.Variable.Instance, word # lit-word + elif re.match("#.*", word): + yield match.start(), Name.Label, word # issue + elif re.match("%.*", word): + yield match.start(), Name.Decorator, word # file + elif re.match(":.*", word): + yield match.start(), Generic.Subheading, word # get-word + else: + yield match.start(), Name.Variable, word + + tokens = { + 'root': [ + (r'[^R]+', Comment), + (r'Red/System\s+\[', Generic.Strong, 'script'), + (r'Red\s+\[', Generic.Strong, 'script'), + (r'R', Comment) + ], + 'script': [ + (r'\s+', Text), + (r'#"', String.Char, 'char'), + (r'#\{[0-9a-f\s]*\}', Number.Hex), + (r'2#\{', Number.Hex, 'bin2'), + (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), + (r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))', + bygroups(Number.Hex, Name.Variable, Whitespace)), + (r'"', String, 'string'), + (r'\{', String, 'string2'), + (r';#+.*\n', Comment.Special), + (r';\*+.*\n', Comment.Preproc), + (r';.*\n', Comment), + (r'%"', Name.Decorator, 'stringFile'), + (r'%[^(^{")\s\[\]]+', Name.Decorator), + (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money + (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time + (r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?' + r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date + (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple + (r'\d+X\d+', Keyword.Constant), # pair + (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), + (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), + (r'[+-]?\d+(\'\d+)?', Number), + (r'[\[\]()]', Generic.Strong), + (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url + (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url + (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email + (r'comment\s"', Comment, 'commentString1'), + (r'comment\s\{', Comment, 'commentString2'), + (r'comment\s\[', Comment, 'commentBlock'), + (r'comment\s[^(\s{"\[]+', Comment), + (r'/[^(^{^")\s/[\]]*', Name.Attribute), + (r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), + (r'<[\w:.-]*>', Name.Tag), + (r'<[^(<>\s")]+', Name.Tag, 'tag'), + (r'([^(^{")\s]+)', Text), + ], + 'string': [ + (r'[^(^")]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'"', String, '#pop'), + ], + 'string2': [ + (r'[^(^{})]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + 'stringFile': [ + (r'[^(^")]+', Name.Decorator), + (escape_re, Name.Decorator), + (r'\^.', Name.Decorator), + (r'"', Name.Decorator, '#pop'), + ], + 'char': [ + (escape_re + '"', String.Char, '#pop'), + (r'\^."', String.Char, '#pop'), + (r'."', String.Char, '#pop'), + ], + 'tag': [ + (escape_re, Name.Tag), + (r'"', Name.Tag, 'tagString'), + (r'[^(<>\r\n")]+', Name.Tag), + (r'>', Name.Tag, '#pop'), + ], + 'tagString': [ + (r'[^(^")]+', Name.Tag), + (escape_re, Name.Tag), + (r'[(|)]+', Name.Tag), + (r'\^.', Name.Tag), + (r'"', Name.Tag, '#pop'), + ], + 'tuple': [ + (r'(\d+\.)+', Keyword.Constant), + (r'\d+', Keyword.Constant, '#pop'), + ], + 'bin2': [ + (r'\s+', Number.Hex), + (r'([01]\s*){8}', Number.Hex), + (r'\}', Number.Hex, '#pop'), + ], + 'commentString1': [ + (r'[^(^")]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'"', Comment, '#pop'), + ], + 'commentString2': [ + (r'[^(^{})]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'\{', Comment, '#push'), + (r'\}', Comment, '#pop'), + ], + 'commentBlock': [ + (r'\[', Comment, '#push'), + (r'\]', Comment, '#pop'), + (r'"', Comment, "commentString1"), + (r'\{', Comment, "commentString2"), + (r'[^(\[\]"{)]+', Comment), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/resource.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/resource.py new file mode 100644 index 0000000..6a0da2f --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/resource.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.resource + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for resource definition files. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Comment, String, Number, Operator, Text, \ + Keyword, Name + +__all__ = ['ResourceLexer'] + + +class ResourceLexer(RegexLexer): + """Lexer for `ICU Resource bundles + `_. + + .. versionadded:: 2.0 + """ + name = 'ResourceBundle' + aliases = ['resource', 'resourcebundle'] + filenames = ['*.txt'] + + _types = (':table', ':array', ':string', ':bin', ':import', ':intvector', + ':int', ':alias') + + flags = re.MULTILINE | re.IGNORECASE + tokens = { + 'root': [ + (r'//.*?$', Comment), + (r'"', String, 'string'), + (r'-?\d+', Number.Integer), + (r'[,{}]', Operator), + (r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types), + bygroups(Name, Text, Keyword)), + (r'\s+', Text), + (words(_types), Keyword), + ], + 'string': [ + (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|' + r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String), + (r'\{', String.Escape, 'msgname'), + (r'"', String, '#pop') + ], + 'msgname': [ + (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message')) + ], + 'message': [ + (r'\{', String.Escape, 'msgname'), + (r'\}', String.Escape, '#pop'), + (r'(,)(\s*)([a-z]+)(\s*\})', + bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'), + (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)', + bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, + String.Escape, Operator.Word, String.Escape, Operator, + String.Escape, Number.Integer, String.Escape), 'choice'), + (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)', + bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, + String.Escape), 'choice'), + (r'\s+', String.Escape) + ], + 'choice': [ + (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)', + bygroups(Operator, Number.Integer, String.Escape), 'message'), + (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'), + (r'\}', String.Escape, ('#pop', '#pop')), + (r'\s+', String.Escape) + ], + 'str': [ + (r'\}', String.Escape, '#pop'), + (r'\{', String.Escape, 'msgname'), + (r'[^{}]+', String) + ] + } + + def analyse_text(text): + return text.startswith('root:table') diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_robotframeworklexer.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/robotframework.py similarity index 98% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_robotframeworklexer.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/robotframework.py index bc64e12..7b6f556 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/_robotframeworklexer.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/robotframework.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """ - pygments.lexers._robotframeworklexer - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + pygments.lexers.robotframework + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Robot Framework. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -27,6 +27,9 @@ import re from pygments.lexer import Lexer from pygments.token import Token +from pygments.util import text_type + +__all__ = ['RobotFrameworkLexer'] HEADING = Token.Generic.Heading @@ -57,10 +60,10 @@ class RobotFrameworkLexer(Lexer): Supports both space and pipe separated plain text formats. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'RobotFramework' - aliases = ['RobotFramework', 'robotframework'] + aliases = ['robotframework'] filenames = ['*.txt', '*.robot'] mimetypes = ['text/x-robotframework'] @@ -77,7 +80,7 @@ class RobotFrameworkLexer(Lexer): for value, token in row_tokenizer.tokenize(row): for value, token in var_tokenizer.tokenize(value, token): if value: - yield index, token, unicode(value) + yield index, token, text_type(value) index += len(value) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ruby.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ruby.py new file mode 100644 index 0000000..d346df9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/ruby.py @@ -0,0 +1,518 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ruby + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Ruby and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \ + bygroups, default, LexerContext, do_insertions, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Generic +from pygments.util import shebang_matches + +__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer'] + +line_re = re.compile('.*?\n') + + +RUBY_OPERATORS = ( + '*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~', + '[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '===' +) + + +class RubyLexer(ExtendedRegexLexer): + """ + For `Ruby `_ source code. + """ + + name = 'Ruby' + aliases = ['rb', 'ruby', 'duby'] + filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', + '*.rbx', '*.duby'] + mimetypes = ['text/x-ruby', 'application/x-ruby'] + + flags = re.DOTALL | re.MULTILINE + + def heredoc_callback(self, match, ctx): + # okay, this is the hardest part of parsing Ruby... + # match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line + + start = match.start(1) + yield start, Operator, match.group(1) # <<-? + yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` + yield match.start(3), Name.Constant, match.group(3) # heredoc name + yield match.start(4), String.Heredoc, match.group(4) # quote again + + heredocstack = ctx.__dict__.setdefault('heredocstack', []) + outermost = not bool(heredocstack) + heredocstack.append((match.group(1) == '<<-', match.group(3))) + + ctx.pos = match.start(5) + ctx.end = match.end(5) + # this may find other heredocs + for i, t, v in self.get_tokens_unprocessed(context=ctx): + yield i, t, v + ctx.pos = match.end() + + if outermost: + # this is the outer heredoc again, now we can process them all + for tolerant, hdname in heredocstack: + lines = [] + for match in line_re.finditer(ctx.text, ctx.pos): + if tolerant: + check = match.group().strip() + else: + check = match.group().rstrip() + if check == hdname: + for amatch in lines: + yield amatch.start(), String.Heredoc, amatch.group() + yield match.start(), Name.Constant, match.group() + ctx.pos = match.end() + break + else: + lines.append(match) + else: + # end of heredoc not found -- error! + for amatch in lines: + yield amatch.start(), Error, amatch.group() + ctx.end = len(ctx.text) + del heredocstack[:] + + def gen_rubystrings_rules(): + def intp_regex_callback(self, match, ctx): + yield match.start(1), String.Regex, match.group(1) # begin + nctx = LexerContext(match.group(3), 0, ['interpolated-regex']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Regex, match.group(4) # end[mixounse]* + ctx.pos = match.end() + + def intp_string_callback(self, match, ctx): + yield match.start(1), String.Other, match.group(1) + nctx = LexerContext(match.group(3), 0, ['interpolated-string']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Other, match.group(4) # end + ctx.pos = match.end() + + states = {} + states['strings'] = [ + # easy ones + (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol), + (words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol), + (r":'(\\\\|\\'|[^'])*'", String.Symbol), + (r"'(\\\\|\\'|[^'])*'", String.Single), + (r':"', String.Symbol, 'simple-sym'), + (r'([a-zA-Z_]\w*)(:)(?!:)', + bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9 + (r'"', String.Double, 'simple-string'), + (r'(?', '<>', 'ab'): + states[name+'-intp-string'] = [ + (r'\\[\\' + bracecc + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + include('string-intp-escaped'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + states['strings'].append((r'%[QWx]?' + lbrace, String.Other, + name+'-intp-string')) + states[name+'-string'] = [ + (r'\\[\\' + bracecc + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + states['strings'].append((r'%[qsw]' + lbrace, String.Other, + name+'-string')) + states[name+'-regex'] = [ + (r'\\[\\' + bracecc + ']', String.Regex), + (lbrace, String.Regex, '#push'), + (rbrace + '[mixounse]*', String.Regex, '#pop'), + include('string-intp'), + (r'[\\#' + bracecc + ']', String.Regex), + (r'[^\\#' + bracecc + ']+', String.Regex), + ] + states['strings'].append((r'%r' + lbrace, String.Regex, + name+'-regex')) + + # these must come after %! + states['strings'] += [ + # %r regex + (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)', + intp_regex_callback), + # regular fancy strings with qsw + (r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other), + (r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + # special forms of fancy strings after operators or + # in method calls with braces + (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # and because of fixed width lookbehinds the whole thing a + # second time for line startings... + (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # all regular fancy strings without qsw + (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + ] + + return states + + tokens = { + 'root': [ + (r'#.*?$', Comment.Single), + (r'=begin\s.*?\n=end.*?$', Comment.Multiline), + # keywords + (words(( + 'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?', + 'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo', + 'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef', + 'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'), + Keyword), + # start of function, class and module names + (r'(module)(\s+)([a-zA-Z_]\w*' + r'(?:::[a-zA-Z_]\w*)*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'), + (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + # special methods + (words(( + 'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader', + 'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private', + 'module_function', 'public', 'protected', 'true', 'false', 'nil'), + suffix=r'\b'), + Keyword.Pseudo), + (r'(not|and|or)\b', Operator.Word), + (words(( + 'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include', + 'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil', + 'private_method_defined', 'protected_method_defined', + 'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'), + Name.Builtin), + (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin), + (words(( + 'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort', + 'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller', + 'catch', 'chomp', 'chop', 'class_eval', 'class_variables', + 'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set', + 'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork', + 'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub', + 'hash', 'id', 'included_modules', 'inspect', 'instance_eval', + 'instance_method', 'instance_methods', + 'instance_variable_get', 'instance_variable_set', 'instance_variables', + 'lambda', 'load', 'local_variables', 'loop', + 'method', 'method_missing', 'methods', 'module_eval', 'name', + 'object_id', 'open', 'p', 'print', 'printf', 'private_class_method', + 'private_instance_methods', + 'private_methods', 'proc', 'protected_instance_methods', + 'protected_methods', 'public_class_method', + 'public_instance_methods', 'public_methods', + 'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require', + 'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep', + 'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint', + 'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint', + 'untrace_var', 'warn'), prefix=r'(?~!:])|' + r'(?<=(?:\s|;)when\s)|' + r'(?<=(?:\s|;)or\s)|' + r'(?<=(?:\s|;)and\s)|' + r'(?<=(?:\s|;|\.)index\s)|' + r'(?<=(?:\s|;|\.)scan\s)|' + r'(?<=(?:\s|;|\.)sub\s)|' + r'(?<=(?:\s|;|\.)sub!\s)|' + r'(?<=(?:\s|;|\.)gsub\s)|' + r'(?<=(?:\s|;|\.)gsub!\s)|' + r'(?<=(?:\s|;|\.)match\s)|' + r'(?<=(?:\s|;)if\s)|' + r'(?<=(?:\s|;)elsif\s)|' + r'(?<=^when\s)|' + r'(?<=^index\s)|' + r'(?<=^scan\s)|' + r'(?<=^sub\s)|' + r'(?<=^gsub\s)|' + r'(?<=^sub!\s)|' + r'(?<=^gsub!\s)|' + r'(?<=^match\s)|' + r'(?<=^if\s)|' + r'(?<=^elsif\s)' + r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'), + # multiline regex (in method calls or subscripts) + (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'), + # multiline regex (this time the funny no whitespace rule) + (r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex), + 'multiline-regex'), + # lex numbers and ignore following regular expressions which + # are division operators in fact (grrrr. i hate that. any + # better ideas?) + # since pygments 0.7 we also eat a "?" operator after numbers + # so that the char operator does not work. Chars are not allowed + # there so that you can use the ternary operator. + # stupid example: + # x>=0?n[x]:"" + (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', + bygroups(Number.Oct, Text, Operator)), + (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', + bygroups(Number.Hex, Text, Operator)), + (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?', + bygroups(Number.Bin, Text, Operator)), + (r'([\d]+(?:_\d+)*)(\s*)([/?])?', + bygroups(Number.Integer, Text, Operator)), + # Names + (r'@@[a-zA-Z_]\w*', Name.Variable.Class), + (r'@[a-zA-Z_]\w*', Name.Variable.Instance), + (r'\$\w+', Name.Variable.Global), + (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global), + (r'\$-[0adFiIlpvw]', Name.Variable.Global), + (r'::', Operator), + include('strings'), + # chars + (r'\?(\\[MC]-)*' # modifiers + r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)' + r'(?!\w)', + String.Char), + (r'[A-Z]\w+', Name.Constant), + # this is needed because ruby attributes can look + # like keywords (class) or like this: ` ?!? + (words(RUBY_OPERATORS, prefix=r'(\.|::)'), + bygroups(Operator, Name.Operator)), + (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])', + bygroups(Operator, Name)), + (r'[a-zA-Z_]\w*[!?]?', Name), + (r'(\[|\]|\*\*|<>?|>=|<=|<=>|=~|={3}|' + r'!~|&&?|\|\||\.{1,3})', Operator), + (r'[-+/*%=<>&!^|~]=?', Operator), + (r'[(){};,/?:\\]', Punctuation), + (r'\s+', Text) + ], + 'funcname': [ + (r'\(', Punctuation, 'defexpr'), + (r'(?:([a-zA-Z_]\w*)(\.))?' + r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|' + r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', + bygroups(Name.Class, Operator, Name.Function), '#pop'), + default('#pop') + ], + 'classname': [ + (r'\(', Punctuation, 'defexpr'), + (r'<<', Operator, '#pop'), + (r'[A-Z_]\w*', Name.Class, '#pop'), + default('#pop') + ], + 'defexpr': [ + (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'), + (r'\(', Operator, '#push'), + include('root') + ], + 'in-intp': [ + (r'\{', String.Interpol, '#push'), + (r'\}', String.Interpol, '#pop'), + include('root'), + ], + 'string-intp': [ + (r'#\{', String.Interpol, 'in-intp'), + (r'#@@?[a-zA-Z_]\w*', String.Interpol), + (r'#\$[a-zA-Z_]\w*', String.Interpol) + ], + 'string-intp-escaped': [ + include('string-intp'), + (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', + String.Escape) + ], + 'interpolated-regex': [ + include('string-intp'), + (r'[\\#]', String.Regex), + (r'[^\\#]+', String.Regex), + ], + 'interpolated-string': [ + include('string-intp'), + (r'[\\#]', String.Other), + (r'[^\\#]+', String.Other), + ], + 'multiline-regex': [ + include('string-intp'), + (r'\\\\', String.Regex), + (r'\\/', String.Regex), + (r'[\\#]', String.Regex), + (r'[^\\/#]+', String.Regex), + (r'/[mixounse]*', String.Regex, '#pop'), + ], + 'end-part': [ + (r'.+', Comment.Preproc, '#pop') + ] + } + tokens.update(gen_rubystrings_rules()) + + def analyse_text(text): + return shebang_matches(text, r'ruby(1\.\d)?') + + +class RubyConsoleLexer(Lexer): + """ + For Ruby interactive console (**irb**) output like: + + .. sourcecode:: rbcon + + irb(main):001:0> a = 1 + => 1 + irb(main):002:0> puts a + 1 + => nil + """ + name = 'Ruby irb session' + aliases = ['rbcon', 'irb'] + mimetypes = ['text/x-ruby-shellsession'] + + _prompt_re = re.compile('irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] ' + '|>> |\?> ') + + def get_tokens_unprocessed(self, text): + rblexer = RubyLexer(**self.options) + + curcode = '' + insertions = [] + for match in line_re.finditer(text): + line = match.group() + m = self._prompt_re.match(line) + if m is not None: + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions( + insertions, rblexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions( + insertions, rblexer.get_tokens_unprocessed(curcode)): + yield item + + +class FancyLexer(RegexLexer): + """ + Pygments Lexer For `Fancy `_. + + Fancy is a self-hosted, pure object-oriented, dynamic, + class-based, concurrent general-purpose programming language + running on Rubinius, the Ruby VM. + + .. versionadded:: 1.5 + """ + name = 'Fancy' + filenames = ['*.fy', '*.fancypack'] + aliases = ['fancy', 'fy'] + mimetypes = ['text/x-fancysrc'] + + tokens = { + # copied from PerlLexer: + 'balanced-regex': [ + (r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'), + (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'), + (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), + (r'\{(\\\\|\\\}|[^}])*\}[egimosx]*', String.Regex, '#pop'), + (r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'), + (r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'), + (r'\((\\\\|\\\)|[^)])*\)[egimosx]*', String.Regex, '#pop'), + (r'@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex, '#pop'), + (r'%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex, '#pop'), + (r'\$(\\\\|\\\$|[^$])*\$[egimosx]*', String.Regex, '#pop'), + ], + 'root': [ + (r'\s+', Text), + + # balanced delimiters (copied from PerlLexer): + (r's\{(\\\\|\\\}|[^}])*\}\s*', String.Regex, 'balanced-regex'), + (r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'), + (r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'), + (r's\((\\\\|\\\)|[^)])*\)\s*', String.Regex, 'balanced-regex'), + (r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex), + (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), + + # Comments + (r'#(.*?)\n', Comment.Single), + # Symbols + (r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol), + # Multi-line DoubleQuotedString + (r'"""(\\\\|\\"|[^"])*"""', String), + # DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"', String), + # keywords + (r'(def|class|try|catch|finally|retry|return|return_local|match|' + r'case|->|=>)\b', Keyword), + # constants + (r'(self|super|nil|false|true)\b', Name.Constant), + (r'[(){};,/?|:\\]', Punctuation), + # names + (words(( + 'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String', + 'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass', + 'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set', + 'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'), + Name.Builtin), + # functions + (r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function), + # operators, must be below functions + (r'[-+*/~,<>=&!?%^\[\].$]+', Operator), + ('[A-Z]\w*', Name.Constant), + ('@[a-zA-Z_]\w*', Name.Variable.Instance), + ('@@[a-zA-Z_]\w*', Name.Variable.Class), + ('@@?', Operator), + ('[a-zA-Z_]\w*', Name), + # numbers - / checks are necessary to avoid mismarking regexes, + # see comment in RubyLexer + (r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', + bygroups(Number.Oct, Text, Operator)), + (r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', + bygroups(Number.Hex, Text, Operator)), + (r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?', + bygroups(Number.Bin, Text, Operator)), + (r'([\d]+(?:_\d+)*)(\s*)([/?])?', + bygroups(Number.Integer, Text, Operator)), + (r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rust.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rust.py new file mode 100644 index 0000000..4447e1d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/rust.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.rust + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Rust language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, words, default +from pygments.token import Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Whitespace + +__all__ = ['RustLexer'] + + +class RustLexer(RegexLexer): + """ + Lexer for the Rust programming language (version 0.9). + + .. versionadded:: 1.6 + """ + name = 'Rust' + filenames = ['*.rs'] + aliases = ['rust'] + mimetypes = ['text/x-rustsrc'] + + tokens = { + 'root': [ + # Whitespace and Comments + (r'\n', Whitespace), + (r'\s+', Whitespace), + (r'//[/!](.*?)\n', Comment.Doc), + (r'//(.*?)\n', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + + # Lifetime + (r"""'[a-zA-Z_]\w*""", Name.Label), + # Macro parameters + (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), + # Keywords + (words(( + 'as', 'box', 'break', 'continue', 'do', 'else', 'enum', 'extern', + 'fn', 'for', 'if', 'impl', 'in', 'loop', 'match', 'mut', 'priv', + 'proc', 'pub', 'ref', 'return', 'static', '\'static', 'struct', + 'trait', 'true', 'type', 'unsafe', 'while'), suffix=r'\b'), + Keyword), + (words(('alignof', 'be', 'const', 'offsetof', 'pure', 'sizeof', + 'typeof', 'once', 'unsized', 'yield'), suffix=r'\b'), + Keyword.Reserved), + (r'(mod|use)\b', Keyword.Namespace), + (r'(true|false)\b', Keyword.Constant), + (r'let\b', Keyword.Declaration), + (words(('u8', 'u16', 'u32', 'u64', 'i8', 'i16', 'i32', 'i64', 'uint', + 'int', 'f32', 'f64', 'str', 'bool'), suffix=r'\b'), + Keyword.Type), + (r'self\b', Name.Builtin.Pseudo), + # Prelude + (words(( + 'Freeze', 'Pod', 'Send', 'Sized', 'Add', 'Sub', 'Mul', 'Div', 'Rem', 'Neg', 'Not', 'BitAnd', + 'BitOr', 'BitXor', 'Drop', 'Shl', 'Shr', 'Index', 'Option', 'Some', 'None', 'Result', + 'Ok', 'Err', 'from_str', 'range', 'print', 'println', 'Any', 'AnyOwnExt', 'AnyRefExt', + 'AnyMutRefExt', 'Ascii', 'AsciiCast', 'OnwedAsciiCast', 'AsciiStr', + 'IntoBytes', 'Bool', 'ToCStr', 'Char', 'Clone', 'DeepClone', 'Eq', 'ApproxEq', + 'Ord', 'TotalEq', 'Ordering', 'Less', 'Equal', 'Greater', 'Equiv', 'Container', + 'Mutable', 'Map', 'MutableMap', 'Set', 'MutableSet', 'Default', 'FromStr', + 'Hash', 'FromIterator', 'Extendable', 'Iterator', 'DoubleEndedIterator', + 'RandomAccessIterator', 'CloneableIterator', 'OrdIterator', + 'MutableDoubleEndedIterator', 'ExactSize', 'Times', 'Algebraic', + 'Trigonometric', 'Exponential', 'Hyperbolic', 'Bitwise', 'BitCount', + 'Bounded', 'Integer', 'Fractional', 'Real', 'RealExt', 'Num', 'NumCast', + 'CheckedAdd', 'CheckedSub', 'CheckedMul', 'Orderable', 'Signed', + 'Unsigned', 'Round', 'Primitive', 'Int', 'Float', 'ToStrRadix', + 'ToPrimitive', 'FromPrimitive', 'GenericPath', 'Path', 'PosixPath', + 'WindowsPath', 'RawPtr', 'Buffer', 'Writer', 'Reader', 'Seek', + 'SendStr', 'SendStrOwned', 'SendStrStatic', 'IntoSendStr', 'Str', + 'StrVector', 'StrSlice', 'OwnedStr', 'IterBytes', 'ToStr', 'IntoStr', + 'CopyableTuple', 'ImmutableTuple', 'ImmutableEqVector', 'ImmutableTotalOrdVector', + 'ImmutableCopyableVector', 'OwnedVector', 'OwnedCopyableVector', + 'OwnedEqVector', 'MutableVector', 'MutableTotalOrdVector', + 'Vector', 'VectorVector', 'CopyableVector', 'ImmutableVector', + 'Port', 'Chan', 'SharedChan', 'spawn', 'drop'), suffix=r'\b'), + Name.Builtin), + (r'(ImmutableTuple\d+|Tuple\d+)\b', Name.Builtin), + # Borrowed pointer + (r'(&)(\'[A-Za-z_]\w*)?', bygroups(Operator, Name)), + # Labels + (r'\'[A-Za-z_]\w*:', Name.Label), + # Character Literal + (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", + String.Char), + # Binary Literal + (r'0b[01_]+', Number.Bin, 'number_lit'), + # Octal Literal + (r'0o[0-7_]+', Number.Oct, 'number_lit'), + # Hexadecimal Literal + (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), + # Decimal Literal + (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float, 'number_lit'), + (r'[0-9][0-9_]*', Number.Integer, 'number_lit'), + # String Literal + (r'"', String, 'string'), + (r'r(#*)".*?"\1', String.Raw), + + # Operators and Punctuation + (r'[{}()\[\],.;]', Punctuation), + (r'[+\-*/%&|<>^!~@=:?]', Operator), + + # Identifier + (r'[a-zA-Z_]\w*', Name), + + # Attributes + (r'#!?\[', Comment.Preproc, 'attribute['), + # Macros + (r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\s*)(\{)', + bygroups(Comment.Preproc, Punctuation, Whitespace, Name, + Whitespace, Punctuation), 'macro{'), + (r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\()', + bygroups(Comment.Preproc, Punctuation, Whitespace, Name, + Punctuation), 'macro('), + ], + 'comment': [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'number_lit': [ + (r'[ui](8|16|32|64)', Keyword, '#pop'), + (r'f(32|64)', Keyword, '#pop'), + default('#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), + (r'[^\\"]+', String), + (r'\\', String), + ], + 'macro{': [ + (r'\{', Operator, '#push'), + (r'\}', Operator, '#pop'), + ], + 'macro(': [ + (r'\(', Operator, '#push'), + (r'\)', Operator, '#pop'), + ], + 'attribute_common': [ + (r'"', String, 'string'), + (r'\[', Comment.Preproc, 'attribute['), + (r'\(', Comment.Preproc, 'attribute('), + ], + 'attribute[': [ + include('attribute_common'), + (r'\];?', Comment.Preproc, '#pop'), + (r'[^"\]]+', Comment.Preproc), + ], + 'attribute(': [ + include('attribute_common'), + (r'\);?', Comment.Preproc, '#pop'), + (r'[^")]+', Comment.Preproc), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/scripting.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/scripting.py new file mode 100644 index 0000000..678cab2 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/scripting.py @@ -0,0 +1,923 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.scripting + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for scripting and embedded languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, combined, \ + words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Whitespace +from pygments.util import get_bool_opt, get_list_opt, iteritems + +__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer', + 'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer'] + + +class LuaLexer(RegexLexer): + """ + For `Lua `_ source code. + + Additional options accepted: + + `func_name_highlighting` + If given and ``True``, highlight builtin function names + (default: ``True``). + `disabled_modules` + If given, must be a list of module names whose function names + should not be highlighted. By default all modules are highlighted. + + To get a list of allowed modules have a look into the + `_lua_builtins` module: + + .. sourcecode:: pycon + + >>> from pygments.lexers._lua_builtins import MODULES + >>> MODULES.keys() + ['string', 'coroutine', 'modules', 'io', 'basic', ...] + """ + + name = 'Lua' + aliases = ['lua'] + filenames = ['*.lua', '*.wlua'] + mimetypes = ['text/x-lua', 'application/x-lua'] + + tokens = { + 'root': [ + # lua allows a file to start with a shebang + (r'#!(.*?)$', Comment.Preproc), + default('base'), + ], + 'base': [ + (r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline), + ('--.*$', Comment.Single), + + (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), + (r'(?i)\d+e[+-]?\d+', Number.Float), + ('(?i)0x[0-9a-f]*', Number.Hex), + (r'\d+', Number.Integer), + + (r'\n', Text), + (r'[^\S\n]', Text), + # multiline strings + (r'(?s)\[(=*)\[.*?\]\1\]', String), + + (r'(==|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#])', Operator), + (r'[\[\]{}().,:;]', Punctuation), + (r'(and|or|not)\b', Operator.Word), + + ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|' + r'while)\b', Keyword), + (r'(local)\b', Keyword.Declaration), + (r'(true|false|nil)\b', Keyword.Constant), + + (r'(function)\b', Keyword, 'funcname'), + + (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), + + ("'", String.Single, combined('stringescape', 'sqs')), + ('"', String.Double, combined('stringescape', 'dqs')) + ], + + 'funcname': [ + (r'\s+', Text), + ('(?:([A-Za-z_]\w*)(\.))?([A-Za-z_]\w*)', + bygroups(Name.Class, Punctuation, Name.Function), '#pop'), + # inline function + ('\(', Punctuation, '#pop'), + ], + + # if I understand correctly, every character is valid in a lua string, + # so this state is only for later corrections + 'string': [ + ('.', String) + ], + + 'stringescape': [ + (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) + ], + + 'sqs': [ + ("'", String, '#pop'), + include('string') + ], + + 'dqs': [ + ('"', String, '#pop'), + include('string') + ] + } + + def __init__(self, **options): + self.func_name_highlighting = get_bool_opt( + options, 'func_name_highlighting', True) + self.disabled_modules = get_list_opt(options, 'disabled_modules', []) + + self._functions = set() + if self.func_name_highlighting: + from pygments.lexers._lua_builtins import MODULES + for mod, func in iteritems(MODULES): + if mod not in self.disabled_modules: + self._functions.update(func) + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name: + if value in self._functions: + yield index, Name.Builtin, value + continue + elif '.' in value: + a, b = value.split('.') + yield index, Name, a + yield index + len(a), Punctuation, u'.' + yield index + len(a) + 1, Name, b + continue + yield index, token, value + + +class MoonScriptLexer(LuaLexer): + """ + For `MoonScript `_ source code. + + .. versionadded:: 1.5 + """ + + name = "MoonScript" + aliases = ["moon", "moonscript"] + filenames = ["*.moon"] + mimetypes = ['text/x-moonscript', 'application/x-moonscript'] + + tokens = { + 'root': [ + (r'#!(.*?)$', Comment.Preproc), + default('base'), + ], + 'base': [ + ('--.*$', Comment.Single), + (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), + (r'(?i)\d+e[+-]?\d+', Number.Float), + (r'(?i)0x[0-9a-f]*', Number.Hex), + (r'\d+', Number.Integer), + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'(?s)\[(=*)\[.*?\]\1\]', String), + (r'(->|=>)', Name.Function), + (r':[a-zA-Z_]\w*', Name.Variable), + (r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator), + (r'[;,]', Punctuation), + (r'[\[\]{}()]', Keyword.Type), + (r'[a-zA-Z_]\w*:', Name.Variable), + (words(( + 'class', 'extends', 'if', 'then', 'super', 'do', 'with', + 'import', 'export', 'while', 'elseif', 'return', 'for', 'in', + 'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch', + 'break'), suffix=r'\b'), + Keyword), + (r'(true|false|nil)\b', Keyword.Constant), + (r'(and|or|not)\b', Operator.Word), + (r'(self)\b', Name.Builtin.Pseudo), + (r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class), + (r'[A-Z]\w*', Name.Class), # proper name + (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), + ("'", String.Single, combined('stringescape', 'sqs')), + ('"', String.Double, combined('stringescape', 'dqs')) + ], + 'stringescape': [ + (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) + ], + 'sqs': [ + ("'", String.Single, '#pop'), + (".", String) + ], + 'dqs': [ + ('"', String.Double, '#pop'), + (".", String) + ] + } + + def get_tokens_unprocessed(self, text): + # set . as Operator instead of Punctuation + for index, token, value in LuaLexer.get_tokens_unprocessed(self, text): + if token == Punctuation and value == ".": + token = Operator + yield index, token, value + + +class ChaiscriptLexer(RegexLexer): + """ + For `ChaiScript `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'ChaiScript' + aliases = ['chai', 'chaiscript'] + filenames = ['*.chai'] + mimetypes = ['text/x-chaiscript', 'application/x-chaiscript'] + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'^\#.*?\n', Comment.Single) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'([gim]+\b|\B)', String.Regex, '#pop'), + (r'(?=/)', Text, ('#pop', 'badregex')), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + include('commentsandwhitespace'), + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.' + r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), + (r'[{(\[;,]', Punctuation, 'slashstartsregex'), + (r'[})\].]', Punctuation), + (r'[=+\-*/]', Operator), + (r'(for|in|while|do|break|return|continue|if|else|' + r'throw|try|catch' + r')\b', Keyword, 'slashstartsregex'), + (r'(var)\b', Keyword.Declaration, 'slashstartsregex'), + (r'(attr|def|fun)\b', Keyword.Reserved), + (r'(true|false)\b', Keyword.Constant), + (r'(eval|throw)\b', Name.Builtin), + (r'`\S+`', Name.Builtin), + (r'[$a-zA-Z_]\w*', Name.Other), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'"', String.Double, 'dqstring'), + (r"'(\\\\|\\'|[^'])*'", String.Single), + ], + 'dqstring': [ + (r'\$\{[^"}]+?\}', String.Interpol), + (r'\$', String.Double), + (r'\\\\', String.Double), + (r'\\"', String.Double), + (r'[^\\"$]+', String.Double), + (r'"', String.Double, '#pop'), + ], + } + + +class LSLLexer(RegexLexer): + """ + For Second Life's Linden Scripting Language source code. + + .. versionadded:: 2.0 + """ + + name = 'LSL' + aliases = ['lsl'] + filenames = ['*.lsl'] + mimetypes = ['text/x-lsl'] + + flags = re.MULTILINE + + lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b' + lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b' + lsl_states = r'\b(?:(?:state)\s+\w+|default)\b' + lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b' + lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b' + lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b' + lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b' + lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b' + lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b' + lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b' + lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b' + lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b' + lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b' + lsl_invalid_illegal = r'\b(?:event)\b' + lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b' + lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b' + lsl_reserved_log = r'\b(?:print)\b' + lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?' + + tokens = { + 'root': + [ + (r'//.*?\n', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + (r'"', String.Double, 'string'), + (lsl_keywords, Keyword), + (lsl_types, Keyword.Type), + (lsl_states, Name.Class), + (lsl_events, Name.Builtin), + (lsl_functions_builtin, Name.Function), + (lsl_constants_float, Keyword.Constant), + (lsl_constants_integer, Keyword.Constant), + (lsl_constants_integer_boolean, Keyword.Constant), + (lsl_constants_rotation, Keyword.Constant), + (lsl_constants_string, Keyword.Constant), + (lsl_constants_vector, Keyword.Constant), + (lsl_invalid_broken, Error), + (lsl_invalid_deprecated, Error), + (lsl_invalid_illegal, Error), + (lsl_invalid_unimplemented, Error), + (lsl_reserved_godmode, Keyword.Reserved), + (lsl_reserved_log, Keyword.Reserved), + (r'\b([a-zA-Z_]\w*)\b', Name.Variable), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float), + (r'(\d+\.\d*|\.\d+)', Number.Float), + (r'0[xX][0-9a-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + (lsl_operators, Operator), + (r':=?', Error), + (r'[,;{}()\[\]]', Punctuation), + (r'\n+', Whitespace), + (r'\s+', Whitespace) + ], + 'comment': + [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'string': + [ + (r'\\([nt"\\])', String.Escape), + (r'"', String.Double, '#pop'), + (r'\\.', Error), + (r'[^"\\]+', String.Double), + ] + } + + +class AppleScriptLexer(RegexLexer): + """ + For `AppleScript source code + `_, + including `AppleScript Studio + `_. + Contributed by Andreas Amann . + + .. versionadded:: 1.0 + """ + + name = 'AppleScript' + aliases = ['applescript'] + filenames = ['*.applescript'] + + flags = re.MULTILINE | re.DOTALL + + Identifiers = r'[a-zA-Z]\w*' + + # XXX: use words() for all of these + Literals = ('AppleScript', 'current application', 'false', 'linefeed', + 'missing value', 'pi', 'quote', 'result', 'return', 'space', + 'tab', 'text item delimiters', 'true', 'version') + Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ', + 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', + 'real ', 'record ', 'reference ', 'RGB color ', 'script ', + 'text ', 'unit types', '(?:Unicode )?text', 'string') + BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month', + 'paragraph', 'word', 'year') + HandlerParams = ('about', 'above', 'against', 'apart from', 'around', + 'aside from', 'at', 'below', 'beneath', 'beside', + 'between', 'for', 'given', 'instead of', 'on', 'onto', + 'out of', 'over', 'since') + Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL', + 'choose application', 'choose color', 'choose file( name)?', + 'choose folder', 'choose from list', + 'choose remote application', 'clipboard info', + 'close( access)?', 'copy', 'count', 'current date', 'delay', + 'delete', 'display (alert|dialog)', 'do shell script', + 'duplicate', 'exists', 'get eof', 'get volume settings', + 'info for', 'launch', 'list (disks|folder)', 'load script', + 'log', 'make', 'mount volume', 'new', 'offset', + 'open( (for access|location))?', 'path to', 'print', 'quit', + 'random number', 'read', 'round', 'run( script)?', + 'say', 'scripting components', + 'set (eof|the clipboard to|volume)', 'store script', + 'summarize', 'system attribute', 'system info', + 'the clipboard', 'time to GMT', 'write', 'quoted form') + References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', + 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', + 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', + 'before', 'behind', 'every', 'front', 'index', 'last', + 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose') + Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not", + "isn't", "isn't equal( to)?", "is not equal( to)?", + "doesn't equal", "does not equal", "(is )?greater than", + "comes after", "is not less than or equal( to)?", + "isn't less than or equal( to)?", "(is )?less than", + "comes before", "is not greater than or equal( to)?", + "isn't greater than or equal( to)?", + "(is )?greater than or equal( to)?", "is not less than", + "isn't less than", "does not come before", + "doesn't come before", "(is )?less than or equal( to)?", + "is not greater than", "isn't greater than", + "does not come after", "doesn't come after", "starts? with", + "begins? with", "ends? with", "contains?", "does not contain", + "doesn't contain", "is in", "is contained by", "is not in", + "is not contained by", "isn't contained by", "div", "mod", + "not", "(a )?(ref( to)?|reference to)", "is", "does") + Control = ('considering', 'else', 'error', 'exit', 'from', 'if', + 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', + 'try', 'until', 'using terms from', 'while', 'whith', + 'with timeout( of)?', 'with transaction', 'by', 'continue', + 'end', 'its?', 'me', 'my', 'return', 'of', 'as') + Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get') + Reserved = ('but', 'put', 'returning', 'the') + StudioClasses = ('action cell', 'alert reply', 'application', 'box', + 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', + 'clip view', 'color well', 'color-panel', + 'combo box( item)?', 'control', + 'data( (cell|column|item|row|source))?', 'default entry', + 'dialog reply', 'document', 'drag info', 'drawer', + 'event', 'font(-panel)?', 'formatter', + 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', + 'movie( view)?', 'open-panel', 'outline view', 'panel', + 'pasteboard', 'plugin', 'popup button', + 'progress indicator', 'responder', 'save-panel', + 'scroll view', 'secure text field( cell)?', 'slider', + 'sound', 'split view', 'stepper', 'tab view( item)?', + 'table( (column|header cell|header view|view))', + 'text( (field( cell)?|view))?', 'toolbar( item)?', + 'user-defaults', 'view', 'window') + StudioEvents = ('accept outline drop', 'accept table drop', 'action', + 'activated', 'alert ended', 'awake from nib', 'became key', + 'became main', 'begin editing', 'bounds changed', + 'cell value', 'cell value changed', 'change cell value', + 'change item value', 'changed', 'child of item', + 'choose menu item', 'clicked', 'clicked toolbar item', + 'closed', 'column clicked', 'column moved', + 'column resized', 'conclude drop', 'data representation', + 'deminiaturized', 'dialog ended', 'document nib name', + 'double clicked', 'drag( (entered|exited|updated))?', + 'drop', 'end editing', 'exposed', 'idle', 'item expandable', + 'item value', 'item value changed', 'items changed', + 'keyboard down', 'keyboard up', 'launched', + 'load data representation', 'miniaturized', 'mouse down', + 'mouse dragged', 'mouse entered', 'mouse exited', + 'mouse moved', 'mouse up', 'moved', + 'number of browser rows', 'number of items', + 'number of rows', 'open untitled', 'opened', 'panel ended', + 'parameters updated', 'plugin loaded', 'prepare drop', + 'prepare outline drag', 'prepare outline drop', + 'prepare table drag', 'prepare table drop', + 'read from file', 'resigned active', 'resigned key', + 'resigned main', 'resized( sub views)?', + 'right mouse down', 'right mouse dragged', + 'right mouse up', 'rows changed', 'scroll wheel', + 'selected tab view item', 'selection changed', + 'selection changing', 'should begin editing', + 'should close', 'should collapse item', + 'should end editing', 'should expand item', + 'should open( untitled)?', + 'should quit( after last window closed)?', + 'should select column', 'should select item', + 'should select row', 'should select tab view item', + 'should selection change', 'should zoom', 'shown', + 'update menu item', 'update parameters', + 'update toolbar item', 'was hidden', 'was miniaturized', + 'will become active', 'will close', 'will dismiss', + 'will display browser cell', 'will display cell', + 'will display item cell', 'will display outline cell', + 'will finish launching', 'will hide', 'will miniaturize', + 'will move', 'will open', 'will pop up', 'will quit', + 'will resign active', 'will resize( sub views)?', + 'will select tab view item', 'will show', 'will zoom', + 'write to file', 'zoomed') + StudioCommands = ('animate', 'append', 'call method', 'center', + 'close drawer', 'close panel', 'display', + 'display alert', 'display dialog', 'display panel', 'go', + 'hide', 'highlight', 'increment', 'item for', + 'load image', 'load movie', 'load nib', 'load panel', + 'load sound', 'localized string', 'lock focus', 'log', + 'open drawer', 'path for', 'pause', 'perform action', + 'play', 'register', 'resume', 'scroll', 'select( all)?', + 'show', 'size to fit', 'start', 'step back', + 'step forward', 'stop', 'synchronize', 'unlock focus', + 'update') + StudioProperties = ('accepts arrow key', 'action method', 'active', + 'alignment', 'allowed identifiers', + 'allows branch selection', 'allows column reordering', + 'allows column resizing', 'allows column selection', + 'allows customization', + 'allows editing text attributes', + 'allows empty selection', 'allows mixed state', + 'allows multiple selection', 'allows reordering', + 'allows undo', 'alpha( value)?', 'alternate image', + 'alternate increment value', 'alternate title', + 'animation delay', 'associated file name', + 'associated object', 'auto completes', 'auto display', + 'auto enables items', 'auto repeat', + 'auto resizes( outline column)?', + 'auto save expanded items', 'auto save name', + 'auto save table columns', 'auto saves configuration', + 'auto scroll', 'auto sizes all columns to fit', + 'auto sizes cells', 'background color', 'bezel state', + 'bezel style', 'bezeled', 'border rect', 'border type', + 'bordered', 'bounds( rotation)?', 'box type', + 'button returned', 'button type', + 'can choose directories', 'can choose files', + 'can draw', 'can hide', + 'cell( (background color|size|type))?', 'characters', + 'class', 'click count', 'clicked( data)? column', + 'clicked data item', 'clicked( data)? row', + 'closeable', 'collating', 'color( (mode|panel))', + 'command key down', 'configuration', + 'content(s| (size|view( margins)?))?', 'context', + 'continuous', 'control key down', 'control size', + 'control tint', 'control view', + 'controller visible', 'coordinate system', + 'copies( on scroll)?', 'corner view', 'current cell', + 'current column', 'current( field)? editor', + 'current( menu)? item', 'current row', + 'current tab view item', 'data source', + 'default identifiers', 'delta (x|y|z)', + 'destination window', 'directory', 'display mode', + 'displayed cell', 'document( (edited|rect|view))?', + 'double value', 'dragged column', 'dragged distance', + 'dragged items', 'draws( cell)? background', + 'draws grid', 'dynamically scrolls', 'echos bullets', + 'edge', 'editable', 'edited( data)? column', + 'edited data item', 'edited( data)? row', 'enabled', + 'enclosing scroll view', 'ending page', + 'error handling', 'event number', 'event type', + 'excluded from windows menu', 'executable path', + 'expanded', 'fax number', 'field editor', 'file kind', + 'file name', 'file type', 'first responder', + 'first visible column', 'flipped', 'floating', + 'font( panel)?', 'formatter', 'frameworks path', + 'frontmost', 'gave up', 'grid color', 'has data items', + 'has horizontal ruler', 'has horizontal scroller', + 'has parent data item', 'has resize indicator', + 'has shadow', 'has sub menu', 'has vertical ruler', + 'has vertical scroller', 'header cell', 'header view', + 'hidden', 'hides when deactivated', 'highlights by', + 'horizontal line scroll', 'horizontal page scroll', + 'horizontal ruler view', 'horizontally resizable', + 'icon image', 'id', 'identifier', + 'ignores multiple clicks', + 'image( (alignment|dims when disabled|frame style|scaling))?', + 'imports graphics', 'increment value', + 'indentation per level', 'indeterminate', 'index', + 'integer value', 'intercell spacing', 'item height', + 'key( (code|equivalent( modifier)?|window))?', + 'knob thickness', 'label', 'last( visible)? column', + 'leading offset', 'leaf', 'level', 'line scroll', + 'loaded', 'localized sort', 'location', 'loop mode', + 'main( (bunde|menu|window))?', 'marker follows cell', + 'matrix mode', 'maximum( content)? size', + 'maximum visible columns', + 'menu( form representation)?', 'miniaturizable', + 'miniaturized', 'minimized image', 'minimized title', + 'minimum column width', 'minimum( content)? size', + 'modal', 'modified', 'mouse down state', + 'movie( (controller|file|rect))?', 'muted', 'name', + 'needs display', 'next state', 'next text', + 'number of tick marks', 'only tick mark values', + 'opaque', 'open panel', 'option key down', + 'outline table column', 'page scroll', 'pages across', + 'pages down', 'palette label', 'pane splitter', + 'parent data item', 'parent window', 'pasteboard', + 'path( (names|separator))?', 'playing', + 'plays every frame', 'plays selection only', 'position', + 'preferred edge', 'preferred type', 'pressure', + 'previous text', 'prompt', 'properties', + 'prototype cell', 'pulls down', 'rate', + 'released when closed', 'repeated', + 'requested print time', 'required file type', + 'resizable', 'resized column', 'resource path', + 'returns records', 'reuses columns', 'rich text', + 'roll over', 'row height', 'rulers visible', + 'save panel', 'scripts path', 'scrollable', + 'selectable( identifiers)?', 'selected cell', + 'selected( data)? columns?', 'selected data items?', + 'selected( data)? rows?', 'selected item identifier', + 'selection by rect', 'send action on arrow key', + 'sends action when done editing', 'separates columns', + 'separator item', 'sequence number', 'services menu', + 'shared frameworks path', 'shared support path', + 'sheet', 'shift key down', 'shows alpha', + 'shows state by', 'size( mode)?', + 'smart insert delete enabled', 'sort case sensitivity', + 'sort column', 'sort order', 'sort type', + 'sorted( data rows)?', 'sound', 'source( mask)?', + 'spell checking enabled', 'starting page', 'state', + 'string value', 'sub menu', 'super menu', 'super view', + 'tab key traverses cells', 'tab state', 'tab type', + 'tab view', 'table view', 'tag', 'target( printer)?', + 'text color', 'text container insert', + 'text container origin', 'text returned', + 'tick mark position', 'time stamp', + 'title(d| (cell|font|height|position|rect))?', + 'tool tip', 'toolbar', 'trailing offset', 'transparent', + 'treat packages as directories', 'truncated labels', + 'types', 'unmodified characters', 'update views', + 'use sort indicator', 'user defaults', + 'uses data source', 'uses ruler', + 'uses threaded animation', + 'uses title from previous column', 'value wraps', + 'version', + 'vertical( (line scroll|page scroll|ruler view))?', + 'vertically resizable', 'view', + 'visible( document rect)?', 'volume', 'width', 'window', + 'windows menu', 'wraps', 'zoomable', 'zoomed') + + tokens = { + 'root': [ + (r'\s+', Text), + (u'¬\\n', String.Escape), + (r"'s\s+", Text), # This is a possessive, consider moving + (r'(--|#).*?$', Comment), + (r'\(\*', Comment.Multiline, 'comment'), + (r'[(){}!,.:]', Punctuation), + (u'(«)([^»]+)(»)', + bygroups(Text, Name.Builtin, Text)), + (r'\b((?:considering|ignoring)\s*)' + r'(application responses|case|diacriticals|hyphens|' + r'numeric strings|punctuation|white space)', + bygroups(Keyword, Name.Builtin)), + (u'(-|\\*|\\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\\^)', Operator), + (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), + (r'^(\s*(?:on|end)\s+)' + r'(%s)' % '|'.join(StudioEvents[::-1]), + bygroups(Keyword, Name.Function)), + (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), + (r'\b(as )(%s)\b' % '|'.join(Classes), + bygroups(Keyword, Name.Class)), + (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), + (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), + (r'\b(%s)\b' % '|'.join(Control), Keyword), + (r'\b(%s)\b' % '|'.join(Declarations), Keyword), + (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), + (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), + (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), + (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), + (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), + (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), + (r'\b(%s)\b' % '|'.join(References), Name.Builtin), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r'\b(%s)\b' % Identifiers, Name.Variable), + (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), + (r'[-+]?\d+', Number.Integer), + ], + 'comment': [ + ('\(\*', Comment.Multiline, '#push'), + ('\*\)', Comment.Multiline, '#pop'), + ('[^*(]+', Comment.Multiline), + ('[*(]', Comment.Multiline), + ], + } + + +class RexxLexer(RegexLexer): + """ + `Rexx `_ is a scripting language available for + a wide range of different platforms with its roots found on mainframe + systems. It is popular for I/O- and data based tasks and can act as glue + language to bind different applications together. + + .. versionadded:: 2.0 + """ + name = 'Rexx' + aliases = ['rexx', 'arexx'] + filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] + mimetypes = ['text/x-rexx'] + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'\s', Whitespace), + (r'/\*', Comment.Multiline, 'comment'), + (r'"', String, 'string_double'), + (r"'", String, 'string_single'), + (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), + (r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b', + bygroups(Name.Function, Whitespace, Operator, Whitespace, + Keyword.Declaration)), + (r'([a-z_]\w*)(\s*)(:)', + bygroups(Name.Label, Whitespace, Operator)), + include('function'), + include('keyword'), + include('operator'), + (r'[a-z_]\w*', Text), + ], + 'function': [ + (words(( + 'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor', + 'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare', + 'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr', + 'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert', + 'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max', + 'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign', + 'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol', + 'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word', + 'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d', + 'xrange'), suffix=r'(\s*)(\()'), + bygroups(Name.Builtin, Whitespace, Operator)), + ], + 'keyword': [ + (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' + r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' + r'pull|push|queue|return|say|select|signal|to|then|trace|until|' + r'while)\b', Keyword.Reserved), + ], + 'operator': [ + (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' + r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' + r'¬>>|¬>|¬|\.|,)', Operator), + ], + 'string_double': [ + (r'[^"\n]+', String), + (r'""', String), + (r'"', String, '#pop'), + (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. + ], + 'string_single': [ + (r'[^\'\n]', String), + (r'\'\'', String), + (r'\'', String, '#pop'), + (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. + ], + 'comment': [ + (r'[^*]+', Comment.Multiline), + (r'\*/', Comment.Multiline, '#pop'), + (r'\*', Comment.Multiline), + ] + } + + _c = lambda s: re.compile(s, re.MULTILINE) + _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') + _ADDRESS_PATTERN = _c(r'^\s*address\s+') + _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') + _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') + _PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b') + _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') + _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') + PATTERNS_AND_WEIGHTS = ( + (_ADDRESS_COMMAND_PATTERN, 0.2), + (_ADDRESS_PATTERN, 0.05), + (_DO_WHILE_PATTERN, 0.1), + (_ELSE_DO_PATTERN, 0.1), + (_IF_THEN_DO_PATTERN, 0.1), + (_PROCEDURE_PATTERN, 0.5), + (_PARSE_ARG_PATTERN, 0.2), + ) + + def analyse_text(text): + """ + Check for inital comment and patterns that distinguish Rexx from other + C-like languages. + """ + if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): + # Header matches MVS Rexx requirements, this is certainly a Rexx + # script. + return 1.0 + elif text.startswith('/*'): + # Header matches general Rexx requirements; the source code might + # still be any language using C comments such as C++, C# or Java. + lowerText = text.lower() + result = sum(weight + for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS + if pattern.search(lowerText)) + 0.01 + return min(result, 1.0) + + +class MOOCodeLexer(RegexLexer): + """ + For `MOOCode `_ (the MOO scripting + language). + + .. versionadded:: 0.9 + """ + name = 'MOOCode' + filenames = ['*.moo'] + aliases = ['moocode', 'moo'] + mimetypes = ['text/x-moocode'] + + tokens = { + 'root': [ + # Numbers + (r'(0|[1-9][0-9_]*)', Number.Integer), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # exceptions + (r'(E_PERM|E_DIV)', Name.Exception), + # db-refs + (r'((#[-0-9]+)|(\$\w+))', Name.Entity), + # Keywords + (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' + r'|endwhile|break|continue|return|try' + r'|except|endtry|finally|in)\b', Keyword), + # builtins + (r'(random|length)', Name.Builtin), + # special variables + (r'(player|caller|this|args)', Name.Variable.Instance), + # skip whitespace + (r'\s+', Text), + (r'\n', Text), + # other operators + (r'([!;=,{}&|:.\[\]@()<>?]+)', Operator), + # function call + (r'(\w+)(\()', bygroups(Name.Function, Operator)), + # variables + (r'(\w+)', Text), + ] + } + + +class HybrisLexer(RegexLexer): + """ + For `Hybris `_ source code. + + .. versionadded:: 1.4 + """ + + name = 'Hybris' + aliases = ['hybris', 'hy'] + filenames = ['*.hy', '*.hyb'] + mimetypes = ['text/x-hybris', 'application/x-hybris'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'root': [ + # method names + (r'^(\s*(?:function|method|operator\s+)+?)' + r'([a-zA-Z_]\w*)' + r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), + (r'[^\S\n]+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'@[a-zA-Z_][\w.]*', Name.Decorator), + (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' + r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), + (r'(extends|private|protected|public|static|throws|function|method|' + r'operator)\b', Keyword.Declaration), + (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' + r'__INC_PATH__)\b', Keyword.Constant), + (r'(class|struct)(\s+)', + bygroups(Keyword.Declaration, Text), 'class'), + (r'(import|include)(\s+)', + bygroups(Keyword.Namespace, Text), 'import'), + (words(( + 'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold', + 'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32', 'sha2', + 'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'exp', + 'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', + 'isint', 'isfloat', 'ischar', 'isstring', 'isarray', 'ismap', 'isalias', 'typeof', + 'sizeof', 'toint', 'tostring', 'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval', + 'var_names', 'var_values', 'user_functions', 'dyn_functions', 'methods', 'call', + 'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks', 'usleep', + 'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink', 'dllcall', 'dllcall_argv', + 'dllclose', 'env', 'exec', 'fork', 'getpid', 'wait', 'popen', 'pclose', 'exit', 'kill', + 'pthread_create', 'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill', + 'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind', 'listen', + 'accept', 'getsockname', 'getpeername', 'settimeout', 'connect', 'server', 'recv', + 'send', 'close', 'print', 'println', 'printf', 'input', 'readline', 'serial_open', + 'serial_fcntl', 'serial_get_attr', 'serial_get_ispeed', 'serial_get_ospeed', + 'serial_set_attr', 'serial_set_ispeed', 'serial_set_ospeed', 'serial_write', + 'serial_read', 'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell', + 'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir', 'pcre_replace', 'size', + 'pop', 'unmap', 'has', 'keys', 'values', 'length', 'find', 'substr', 'replace', 'split', + 'trim', 'remove', 'contains', 'join'), suffix=r'\b'), + Name.Builtin), + (words(( + 'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process', + 'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket', + 'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'), + Keyword.Type), + (r'"(\\\\|\\"|[^"])*"', String), + (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), + (r'(\.)([a-zA-Z_]\w*)', + bygroups(Operator, Name.Attribute)), + (r'[a-zA-Z_]\w*:', Name.Label), + (r'[a-zA-Z_$]\w*', Name), + (r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-f]+', Number.Hex), + (r'[0-9]+L?', Number.Integer), + (r'\n', Text), + ], + 'class': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop') + ], + 'import': [ + (r'[\w.]+\*?', Name.Namespace, '#pop') + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/shell.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/shell.py similarity index 87% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/shell.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/shell.py index 5ec9dea..1bbfd7a 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/shell.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/shell.py @@ -5,7 +5,7 @@ Lexers for various shells. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -27,28 +27,32 @@ class BashLexer(RegexLexer): """ Lexer for (ba|k|)sh shell scripts. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'Bash' - aliases = ['bash', 'sh', 'ksh'] + aliases = ['bash', 'sh', 'ksh', 'shell'] filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', - '.bashrc', 'bashrc', '.bash_*', 'bash_*'] + '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'] mimetypes = ['application/x-sh', 'application/x-shellscript'] tokens = { 'root': [ include('basic'), - (r'\$\(\(', Keyword, 'math'), - (r'\$\(', Keyword, 'paren'), - (r'\${#?', Keyword, 'curly'), (r'`', String.Backtick, 'backticks'), include('data'), + include('interp'), + ], + 'interp': [ + (r'\$\(\(', Keyword, 'math'), + (r'\$\(', Keyword, 'paren'), + (r'\$\{#?', String.Interpol, 'curly'), + (r'\$#?(\w+|.)', Name.Variable), ], 'basic': [ (r'\b(if|fi|else|while|do|done|for|then|return|function|case|' - r'select|continue|until|esac|elif)\s*\b', - Keyword), + r'select|continue|until|esac|elif)(\s*)\b', + bygroups(Keyword, Text)), (r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|' r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|' r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|' @@ -65,22 +69,28 @@ class BashLexer(RegexLexer): (r'&&|\|\|', Operator), ], 'data': [ - (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), - (r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double), + (r'"', String.Double, 'string'), + (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r"(?s)'.*?'", String.Single), (r';', Punctuation), (r'&', Punctuation), (r'\|', Punctuation), (r'\s+', Text), - (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), (r'\d+(?= |\Z)', Number), - (r'\$#?(\w+|.)', Name.Variable), + (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), (r'<', Text), ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double), + include('interp'), + ], 'curly': [ - (r'}', Keyword, '#pop'), + (r'\}', String.Interpol, '#pop'), (r':-', Keyword), - (r'[a-zA-Z0-9_]+', Name.Variable), - (r'[^}:"\'`$]+', Punctuation), + (r'\w+', Name.Variable), + (r'[^}:"\'`$\\]+', Punctuation), (r':', Punctuation), include('root'), ], @@ -91,6 +101,8 @@ class BashLexer(RegexLexer): 'math': [ (r'\)\)', Keyword, '#pop'), (r'[-+*/%^|&]|\*\*|\|\|', Operator), + (r'\d+#\d+', Number), + (r'\d+#(?! )', Number), (r'\d+', Number), include('root'), ], @@ -111,7 +123,7 @@ class BashSessionLexer(Lexer): """ Lexer for simplistic shell sessions. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Bash Session' @@ -162,7 +174,7 @@ class ShellSessionLexer(Lexer): """ Lexer for shell sessions that works with different command prompts - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'Shell Session' @@ -179,7 +191,7 @@ class ShellSessionLexer(Lexer): for match in line_re.finditer(text): line = match.group() - m = re.match(r'^((?:\[?\S+@[^$#%]+)[$#%])(.*\n?)', line) + m = re.match(r'^((?:\[?\S+@[^$#%]+\]?\s*)[$#%])(.*\n?)', line) if m: # To support output lexers (say diff output), the output # needs to be broken by prompts whenever the output lexer @@ -208,10 +220,10 @@ class BatchLexer(RegexLexer): """ Lexer for the DOS/Windows Batch file format. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'Batchfile' - aliases = ['bat', 'dosbatch', 'winbatch'] + aliases = ['bat', 'batch', 'dosbatch', 'winbatch'] filenames = ['*.bat', '*.cmd'] mimetypes = ['application/x-dos-batch'] @@ -228,9 +240,9 @@ class BatchLexer(RegexLexer): # like %~$VAR:zlt (r'%%?[~$:\w]+%?', Name.Variable), (r'::.*', Comment), # Technically :: only works at BOL - (r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)), - (r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)), - (r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), + (r'\b(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)), + (r'\b(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)), + (r'\b(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), (r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|' r'setlocal|shift|errorlevel|exist|defined|cmdextversion|' r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword), @@ -264,7 +276,7 @@ class TcshLexer(RegexLexer): """ Lexer for tcsh scripts. - *New in Pygments 0.10.* + .. versionadded:: 0.10 """ name = 'Tcsh' @@ -276,7 +288,7 @@ class TcshLexer(RegexLexer): 'root': [ include('basic'), (r'\$\(', Keyword, 'paren'), - (r'\${#?', Keyword, 'curly'), + (r'\$\{#?', Keyword, 'curly'), (r'`', String.Backtick, 'backticks'), include('data'), ], @@ -294,24 +306,25 @@ class TcshLexer(RegexLexer): r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|' r'ver|wait|warp|watchlog|where|which)\s*\b', Name.Builtin), - (r'#.*\n', Comment), + (r'#.*', Comment), (r'\\[\w\W]', String.Escape), (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), (r'[\[\]{}()=]+', Operator), (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), + (r';', Punctuation), ], 'data': [ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), (r'\s+', Text), - (r'[^=\s\[\]{}()$"\'`\\]+', Text), + (r'[^=\s\[\]{}()$"\'`\\;#]+', Text), (r'\d+(?= |\Z)', Number), (r'\$#?(\w+|.)', Name.Variable), ], 'curly': [ - (r'}', Keyword, '#pop'), + (r'\}', Keyword, '#pop'), (r':-', Keyword), - (r'[a-zA-Z0-9_]+', Name.Variable), + (r'\w+', Name.Variable), (r'[^}:"\'`$]+', Punctuation), (r':', Punctuation), include('root'), @@ -331,7 +344,7 @@ class PowerShellLexer(RegexLexer): """ For Windows PowerShell code. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PowerShell' aliases = ['powershell', 'posh', 'ps1', 'psm1'] @@ -387,13 +400,13 @@ class PowerShellLexer(RegexLexer): (r'`[\'"$@-]', Punctuation), (r'"', String.Double, 'string'), (r"'([^']|'')*'", String.Single), - (r'(\$|@@|@)((global|script|private|env):)?[a-z0-9_]+', + (r'(\$|@@|@)((global|script|private|env):)?\w+', Name.Variable), (r'(%s)\b' % '|'.join(keywords), Keyword), (r'-(%s)\b' % '|'.join(operators), Operator), - (r'(%s)-[a-z_][a-z0-9_]*\b' % '|'.join(verbs), Name.Builtin), - (r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s - (r'-[a-z_][a-z0-9_]*', Name), + (r'(%s)-[a-z_]\w*\b' % '|'.join(verbs), Name.Builtin), + (r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant), # .net [type]s + (r'-[a-z_]\w*', Name), (r'\w+', Name), (r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation), ], @@ -408,7 +421,7 @@ class PowerShellLexer(RegexLexer): (r'[#&.]', Comment.Multiline), ], 'string': [ - (r"`[0abfnrtv'\"\$]", String.Escape), + (r"`[0abfnrtv'\"$`]", String.Escape), (r'[^$`"]+', String.Double), (r'\$\(', Punctuation, 'child'), (r'""', String.Double), diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/smalltalk.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/smalltalk.py new file mode 100644 index 0000000..4e78ac0 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/smalltalk.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.smalltalk + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Smalltalk and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['SmalltalkLexer', 'NewspeakLexer'] + + +class SmalltalkLexer(RegexLexer): + """ + For `Smalltalk `_ syntax. + Contributed by Stefan Matthias Aust. + Rewritten by Nils Winter. + + .. versionadded:: 0.10 + """ + name = 'Smalltalk' + filenames = ['*.st'] + aliases = ['smalltalk', 'squeak', 'st'] + mimetypes = ['text/x-smalltalk'] + + tokens = { + 'root': [ + (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), + include('squeak fileout'), + include('whitespaces'), + include('method definition'), + (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), + include('objects'), + (r'\^|\:=|\_', Operator), + # temporaries + (r'[\]({}.;!]', Text), + ], + 'method definition': [ + # Not perfect can't allow whitespaces at the beginning and the + # without breaking everything + (r'([a-zA-Z]+\w*:)(\s*)(\w+)', + bygroups(Name.Function, Text, Name.Variable)), + (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), + (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', + bygroups(Name.Function, Text, Name.Variable, Text)), + ], + 'blockvariables': [ + include('whitespaces'), + (r'(:)(\s*)(\w+)', + bygroups(Operator, Text, Name.Variable)), + (r'\|', Operator, '#pop'), + default('#pop'), # else pop + ], + 'literals': [ + (r"'(''|[^'])*'", String, 'afterobject'), + (r'\$.', String.Char, 'afterobject'), + (r'#\(', String.Symbol, 'parenth'), + (r'\)', Text, 'afterobject'), + (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), + ], + '_parenth_helper': [ + include('whitespaces'), + (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), + (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), + # literals + (r"'(''|[^'])*'", String), + (r'\$.', String.Char), + (r'#*\(', String.Symbol, 'inner_parenth'), + ], + 'parenth': [ + # This state is a bit tricky since + # we can't just pop this state + (r'\)', String.Symbol, ('root', 'afterobject')), + include('_parenth_helper'), + ], + 'inner_parenth': [ + (r'\)', String.Symbol, '#pop'), + include('_parenth_helper'), + ], + 'whitespaces': [ + # skip whitespace and comments + (r'\s+', Text), + (r'"(""|[^"])*"', Comment), + ], + 'objects': [ + (r'\[', Text, 'blockvariables'), + (r'\]', Text, 'afterobject'), + (r'\b(self|super|true|false|nil|thisContext)\b', + Name.Builtin.Pseudo, 'afterobject'), + (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), + (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), + (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', + String.Symbol, 'afterobject'), + include('literals'), + ], + 'afterobject': [ + (r'! !$', Keyword, '#pop'), # squeak chunk delimiter + include('whitespaces'), + (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', + Name.Builtin, '#pop'), + (r'\b(new\b(?!:))', Name.Builtin), + (r'\:=|\_', Operator, '#pop'), + (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), + (r'\b[a-zA-Z]+\w*', Name.Function), + (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), + (r'\.', Punctuation, '#pop'), + (r';', Punctuation), + (r'[\])}]', Text), + (r'[\[({]', Text, '#pop'), + ], + 'squeak fileout': [ + # Squeak fileout format (optional) + (r'^"(""|[^"])*"!', Keyword), + (r"^'(''|[^'])*'!", Keyword), + (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', + bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), + (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", + bygroups(Keyword, Name.Class, Keyword, String, Keyword)), + (r'^(\w+)( subclass: )(#\w+)' + r'(\s+instanceVariableNames: )(.*?)' + r'(\s+classVariableNames: )(.*?)' + r'(\s+poolDictionaries: )(.*?)' + r'(\s+category: )(.*?)(!)', + bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, + String, Keyword, String, Keyword, String, Keyword)), + (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', + bygroups(Name.Class, Keyword, String, Keyword)), + (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), + (r'! !$', Keyword), + ], + } + + +class NewspeakLexer(RegexLexer): + """ + For `Newspeak ` syntax. + + .. versionadded:: 1.1 + """ + name = 'Newspeak' + filenames = ['*.ns2'] + aliases = ['newspeak', ] + mimetypes = ['text/x-newspeak'] + + tokens = { + 'root': [ + (r'\b(Newsqueak2)\b', Keyword.Declaration), + (r"'[^']*'", String), + (r'\b(class)(\s+)(\w+)(\s*)', + bygroups(Keyword.Declaration, Text, Name.Class, Text)), + (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', + Keyword), + (r'(\w+\:)(\s*)([a-zA-Z_]\w+)', + bygroups(Name.Function, Text, Name.Variable)), + (r'(\w+)(\s*)(=)', + bygroups(Name.Attribute, Text, Operator)), + (r'<\w+>', Comment.Special), + include('expressionstat'), + include('whitespace') + ], + + 'expressionstat': [ + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'\d+', Number.Integer), + (r':\w+', Name.Variable), + (r'(\w+)(::)', bygroups(Name.Variable, Operator)), + (r'\w+:', Name.Function), + (r'\w+', Name.Variable), + (r'\(|\)', Punctuation), + (r'\[|\]', Punctuation), + (r'\{|\}', Punctuation), + + (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), + (r'\.|;', Punctuation), + include('whitespace'), + include('literals'), + ], + 'literals': [ + (r'\$.', String), + (r"'[^']*'", String), + (r"#'[^']*'", String.Symbol), + (r"#\w+:?", String.Symbol), + (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) + ], + 'whitespace': [ + (r'\s+', Text), + (r'"[^"]*"', Comment) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/snobol.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/snobol.py new file mode 100644 index 0000000..88455f9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/snobol.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.snobol + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the SNOBOL language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['SnobolLexer'] + + +class SnobolLexer(RegexLexer): + """ + Lexer for the SNOBOL4 programming language. + + Recognizes the common ASCII equivalents of the original SNOBOL4 operators. + Does not require spaces around binary operators. + + .. versionadded:: 1.5 + """ + + name = "Snobol" + aliases = ["snobol"] + filenames = ['*.snobol'] + mimetypes = ['text/x-snobol'] + + tokens = { + # root state, start of line + # comments, continuation lines, and directives start in column 1 + # as do labels + 'root': [ + (r'\*.*\n', Comment), + (r'[+.] ', Punctuation, 'statement'), + (r'-.*\n', Comment), + (r'END\s*\n', Name.Label, 'heredoc'), + (r'[A-Za-z$][\w$]*', Name.Label, 'statement'), + (r'\s+', Text, 'statement'), + ], + # statement state, line after continuation or label + 'statement': [ + (r'\s*\n', Text, '#pop'), + (r'\s+', Text), + (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' + r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' + r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' + r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', + Name.Builtin), + (r'[A-Za-z][\w.]*', Name), + # ASCII equivalents of original operators + # | for the EBCDIC equivalent, ! likewise + # \ for EBCDIC negation + (r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator), + (r'"[^"]*"', String), + (r"'[^']*'", String), + # Accept SPITBOL syntax for real numbers + # as well as Macro SNOBOL4 + (r'[0-9]+(?=[^.EeDd])', Number.Integer), + (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), + # Goto + (r':', Punctuation, 'goto'), + (r'[()<>,;]', Punctuation), + ], + # Goto block + 'goto': [ + (r'\s*\n', Text, "#pop:2"), + (r'\s+', Text), + (r'F|S', Keyword), + (r'(\()([A-Za-z][\w.]*)(\))', + bygroups(Punctuation, Name.Label, Punctuation)) + ], + # everything after the END statement is basically one + # big heredoc. + 'heredoc': [ + (r'.*\n', String.Heredoc) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/special.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/special.py similarity index 86% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/special.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/special.py index 75fdae5..bd86904 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/special.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/special.py @@ -5,16 +5,15 @@ Special lexers. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re -import io from pygments.lexer import Lexer from pygments.token import Token, Error, Text -from pygments.util import get_choice_opt, b +from pygments.util import get_choice_opt, text_type, BytesIO __all__ = ['TextLexer', 'RawTokenLexer'] @@ -35,7 +34,8 @@ class TextLexer(Lexer): _ttype_cache = {} -line_re = re.compile(b('.*?\n')) +line_re = re.compile(b'.*?\n') + class RawTokenLexer(Lexer): """ @@ -60,12 +60,12 @@ class RawTokenLexer(Lexer): Lexer.__init__(self, **options) def get_tokens(self, text): - if isinstance(text, str): + if isinstance(text, text_type): # raw token stream never has any non-ASCII characters text = text.encode('ascii') if self.compress == 'gz': import gzip - gzipfile = gzip.GzipFile('', 'rb', 9, io.StringIO(text)) + gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text)) text = gzipfile.read() elif self.compress == 'bz2': import bz2 @@ -73,7 +73,7 @@ class RawTokenLexer(Lexer): # do not call Lexer.get_tokens() because we do not want Unicode # decoding to occur, and stripping is not optional. - text = text.strip(b('\n')) + b('\n') + text = text.strip(b'\n') + b'\n' for i, t, v in self.get_tokens_unprocessed(text): yield t, v @@ -81,9 +81,9 @@ class RawTokenLexer(Lexer): length = 0 for match in line_re.finditer(text): try: - ttypestr, val = match.group().split(b('\t'), 1) + ttypestr, val = match.group().split(b'\t', 1) except ValueError: - val = match.group().decode(self.encoding) + val = match.group().decode('ascii', 'replace') ttype = Error else: ttype = _ttype_cache.get(ttypestr) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/sql.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/sql.py similarity index 59% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/sql.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/sql.py index 06b44b8..98425cd 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/sql.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/sql.py @@ -34,28 +34,30 @@ The ``tests/examplefiles`` contains a few test files with data to be parsed by these lexers. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re -from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups +from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words from pygments.token import Punctuation, \ - Text, Comment, Operator, Keyword, Name, String, Number, Generic + Text, Comment, Operator, Keyword, Name, String, Number, Generic from pygments.lexers import get_lexer_by_name, ClassNotFound +from pygments.util import iteritems from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \ - PSEUDO_TYPES, PLPGSQL_KEYWORDS + PSEUDO_TYPES, PLPGSQL_KEYWORDS __all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer', - 'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer'] + 'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'RqlLexer'] line_re = re.compile('.*?\n') language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE) + def language_callback(lexer, match): """Parse the content of a $-string using a lexer @@ -101,7 +103,7 @@ class PostgresBase(object): if lang.lower() == 'sql': return get_lexer_by_name('postgresql', **self.options) - tries = [ lang ] + tries = [lang] if lang.startswith('pl'): tries.append(lang[2:]) if lang.endswith('u'): @@ -124,7 +126,7 @@ class PostgresLexer(PostgresBase, RegexLexer): """ Lexer for the PostgreSQL dialect of SQL. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PostgreSQL SQL dialect' @@ -137,29 +139,29 @@ class PostgresLexer(PostgresBase, RegexLexer): (r'\s+', Text), (r'--.*?\n', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), - (r'(' + '|'.join([s.replace(" ", "\s+") - for s in DATATYPES + PSEUDO_TYPES]) - + r')\b', Name.Builtin), - (r'(' + '|'.join(KEYWORDS) + r')\b', Keyword), + (r'(' + '|'.join(s.replace(" ", "\s+") + for s in DATATYPES + PSEUDO_TYPES) + + r')\b', Name.Builtin), + (words(KEYWORDS, suffix=r'\b'), Keyword), (r'[+*/<>=~!@#%^&|`?-]+', Operator), (r'::', Operator), # cast (r'\$\d+', Name.Variable), (r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float), (r'[0-9]+', Number.Integer), (r"(E|U&)?'(''|[^'])*'", String.Single), - (r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier - (r'(?s)(\$[^\$]*\$)(.*?)(\1)', language_callback), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), + (r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier + (r'(?s)(\$[^$]*\$)(.*?)(\1)', language_callback), + (r'[a-z_]\w*', Name), # psql variable in SQL - (r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable), + (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable), - (r'[;:()\[\]\{\},\.]', Punctuation), + (r'[;:()\[\]{},.]', Punctuation), ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), + (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ], } @@ -169,20 +171,20 @@ class PlPgsqlLexer(PostgresBase, RegexLexer): """ Handle the extra syntax in Pl/pgSQL language. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PL/pgSQL' aliases = ['plpgsql'] mimetypes = ['text/x-plpgsql'] flags = re.IGNORECASE - tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.items()) + tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens)) # extend the keywords list for i, pattern in enumerate(tokens['root']): if pattern[1] == Keyword: tokens['root'][i] = ( - r'(' + '|'.join(KEYWORDS + PLPGSQL_KEYWORDS) + r')\b', + words(KEYWORDS + PLPGSQL_KEYWORDS, suffix=r'\b'), Keyword) del i break @@ -191,10 +193,10 @@ class PlPgsqlLexer(PostgresBase, RegexLexer): # Add specific PL/pgSQL rules (before the SQL ones) tokens['root'][:0] = [ - (r'\%[a-z][a-z0-9_]*\b', Name.Builtin), # actually, a datatype + (r'\%[a-z]\w*\b', Name.Builtin), # actually, a datatype (r':=', Operator), - (r'\<\<[a-z][a-z0-9_]*\>\>', Name.Label), - (r'\#[a-z][a-z0-9_]*\b', Keyword.Pseudo), # #variable_conflict + (r'\<\<[a-z]\w*\>\>', Name.Label), + (r'\#[a-z]\w*\b', Keyword.Pseudo), # #variable_conflict ] @@ -210,7 +212,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer): aliases = [] # not public flags = re.IGNORECASE - tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.items()) + tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens)) tokens['root'].append( (r'\\[^\s]+', Keyword.Pseudo, 'psql-command')) @@ -218,7 +220,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer): (r'\n', Text, 'root'), (r'\s+', Text), (r'\\[^\s]+', Keyword.Pseudo), - (r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable), + (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable), (r"'(''|[^'])*'", String.Single), (r"`([^`])*`", String.Backtick), (r"[^\s]+", String.Symbol), @@ -239,24 +241,28 @@ class lookahead(object): def __init__(self, x): self.iter = iter(x) self._nextitem = None + def __iter__(self): return self + def send(self, i): self._nextitem = i return i + def __next__(self): if self._nextitem is not None: ni = self._nextitem self._nextitem = None return ni return next(self.iter) + next = __next__ class PostgresConsoleLexer(Lexer): """ Lexer for psql sessions. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PostgreSQL console (psql)' @@ -303,12 +309,12 @@ class PostgresConsoleLexer(Lexer): # TODO: better handle multiline comments at the end with # a lexer with an external state? if re_psql_command.match(curcode) \ - or re_end_command.search(curcode): + or re_end_command.search(curcode): break # Emit the combined stream of command and prompt(s) for item in do_insertions(insertions, - sql.get_tokens_unprocessed(curcode)): + sql.get_tokens_unprocessed(curcode)): yield item # Emit the output lines @@ -324,7 +330,7 @@ class PostgresConsoleLexer(Lexer): mmsg = re_message.match(line) if mmsg is not None: if mmsg.group(1).startswith("ERROR") \ - or mmsg.group(1).startswith("FATAL"): + or mmsg.group(1).startswith("FATAL"): out_token = Generic.Error yield (mmsg.start(1), Generic.Strong, mmsg.group(1)) yield (mmsg.start(2), out_token, mmsg.group(2)) @@ -349,97 +355,100 @@ class SqlLexer(RegexLexer): (r'\s+', Text), (r'--.*?\n', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), - (r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|' - r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|' - r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|' - r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|' - r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|' - r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|' - r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|' - r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|' - r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|' - r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|' - r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|' - r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|' - r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|' - r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|' - r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|' - r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|' - r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|' - r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|' - r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|' - r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|' - r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|' - r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|' - r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|' - r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|' - r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|' - r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|' - r'EXCEPTION|EXCEPT|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|' - r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|' - r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|' - r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|' - r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|' - r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|' - r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|' - r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|' - r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|' - r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|' - r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|' - r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|' - r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|' - r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|' - r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|' - r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|' - r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|' - r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|' - r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|' - r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|' - r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|' - r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|' - r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|' - r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|' - r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|' - r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|' - r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|' - r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|' - r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|' - r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|' - r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|' - r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|' - r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|' - r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|' - r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|' - r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|' - r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|' - r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|' - r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|' - r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|' - r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|' - r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|' - r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|' - r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|' - r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|' - r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|' - r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|' - r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|' - r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword), - (r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|' - r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|' - r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b', + (words(( + 'ABORT', 'ABS', 'ABSOLUTE', 'ACCESS', 'ADA', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', + 'ALIAS', 'ALL', 'ALLOCATE', 'ALTER', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARE', 'AS', + 'ASC', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT', 'ASYMMETRIC', 'AT', 'ATOMIC', + 'AUTHORIZATION', 'AVG', 'BACKWARD', 'BEFORE', 'BEGIN', 'BETWEEN', 'BITVAR', + 'BIT_LENGTH', 'BOTH', 'BREADTH', 'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY', + 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CATALOG_NAME', 'CHAIN', + 'CHARACTERISTICS', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG', + 'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHAR_LENGTH', 'CHECK', + 'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE', 'CLUSTER', + 'COALSECE', 'COBOL', 'COLLATE', 'COLLATION', 'COLLATION_CATALOG', + 'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLUMN', 'COLUMN_NAME', + 'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT', 'COMMIT', + 'COMMITTED', 'COMPLETION', 'CONDITION_NUMBER', 'CONNECT', 'CONNECTION', + 'CONNECTION_NAME', 'CONSTRAINT', 'CONSTRAINTS', 'CONSTRAINT_CATALOG', + 'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA', 'CONSTRUCTOR', 'CONTAINS', + 'CONTINUE', 'CONVERSION', 'CONVERT', 'COPY', 'CORRESPONTING', 'COUNT', + 'CREATE', 'CREATEDB', 'CREATEUSER', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_DATE', + 'CURRENT_PATH', 'CURRENT_ROLE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', + 'CURRENT_USER', 'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE', + 'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY', + 'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED', + 'DEFINED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DEREF', 'DESC', + 'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR', 'DETERMINISTIC', + 'DIAGNOSTICS', 'DICTIONARY', 'DISCONNECT', 'DISPATCH', 'DISTINCT', 'DO', + 'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE', + 'EACH', 'ELSE', 'ENCODING', 'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY', + 'EXCEPTION', 'EXCEPT', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING', + 'EXISTS', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FOR', + 'FORCE', 'FOREIGN', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE', 'FREEZE', 'FROM', 'FULL', + 'FUNCTION', 'G', 'GENERAL', 'GENERATED', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GRANTED', + 'GROUP', 'GROUPING', 'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY', + 'IGNORE', 'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT', 'IN', + 'INCLUDING', 'INCREMENT', 'INDEX', 'INDITCATOR', 'INFIX', 'INHERITS', 'INITIALIZE', + 'INITIALLY', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTANTIABLE', + 'INSTEAD', 'INTERSECT', 'INTO', 'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'ITERATE', 'JOIN', + 'KEY', 'KEY_MEMBER', 'KEY_TYPE', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST', + 'LATERAL', 'LEADING', 'LEFT', 'LENGTH', 'LESS', 'LEVEL', 'LIKE', 'LIMIT', 'LISTEN', 'LOAD', + 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION', 'LOCATOR', 'LOCK', 'LOWER', + 'MAP', 'MATCH', 'MAX', 'MAXVALUE', 'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH', + 'MESSAGE_TEXT', 'METHOD', 'MIN', 'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES', + 'MODIFY', 'MONTH', 'MORE', 'MOVE', 'MUMPS', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', + 'NCLOB', 'NEW', 'NEXT', 'NO', 'NOCREATEDB', 'NOCREATEUSER', 'NONE', 'NOT', 'NOTHING', + 'NOTIFY', 'NOTNULL', 'NULL', 'NULLABLE', 'NULLIF', 'OBJECT', 'OCTET_LENGTH', 'OF', 'OFF', + 'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY', 'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS', + 'OR', 'ORDER', 'ORDINALITY', 'OUT', 'OUTER', 'OUTPUT', 'OVERLAPS', 'OVERLAY', 'OVERRIDING', + 'OWNER', 'PAD', 'PARAMETER', 'PARAMETERS', 'PARAMETER_MODE', 'PARAMATER_NAME', + 'PARAMATER_ORDINAL_POSITION', 'PARAMETER_SPECIFIC_CATALOG', + 'PARAMETER_SPECIFIC_NAME', 'PARAMATER_SPECIFIC_SCHEMA', 'PARTIAL', + 'PASCAL', 'PENDANT', 'PLACING', 'PLI', 'POSITION', 'POSTFIX', 'PRECISION', 'PREFIX', + 'PREORDER', 'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL', + 'PROCEDURE', 'PUBLIC', 'READ', 'READS', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES', + 'REFERENCING', 'REINDEX', 'RELATIVE', 'RENAME', 'REPEATABLE', 'REPLACE', 'RESET', + 'RESTART', 'RESTRICT', 'RESULT', 'RETURN', 'RETURNED_LENGTH', + 'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE', 'RETURNS', 'REVOKE', 'RIGHT', + 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINE_CATALOG', 'ROUTINE_NAME', + 'ROUTINE_SCHEMA', 'ROW', 'ROWS', 'ROW_COUNT', 'RULE', 'SAVE_POINT', 'SCALE', 'SCHEMA', + 'SCHEMA_NAME', 'SCOPE', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SELF', + 'SENSITIVE', 'SERIALIZABLE', 'SERVER_NAME', 'SESSION', 'SESSION_USER', 'SET', + 'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SIZE', 'SOME', 'SOURCE', 'SPACE', + 'SPECIFIC', 'SPECIFICTYPE', 'SPECIFIC_NAME', 'SQL', 'SQLCODE', 'SQLERROR', + 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNINIG', 'STABLE', 'START', 'STATE', 'STATEMENT', + 'STATIC', 'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRUCTURE', 'STYPE', + 'SUBCLASS_ORIGIN', 'SUBLIST', 'SUBSTRING', 'SUM', 'SYMMETRIC', 'SYSID', 'SYSTEM', + 'SYSTEM_USER', 'TABLE', 'TABLE_NAME', ' TEMP', 'TEMPLATE', 'TEMPORARY', 'TERMINATE', + 'THAN', 'THEN', 'TIMESTAMP', 'TIMEZONE_HOUR', 'TIMEZONE_MINUTE', 'TO', 'TOAST', + 'TRAILING', 'TRANSATION', 'TRANSACTIONS_COMMITTED', + 'TRANSACTIONS_ROLLED_BACK', 'TRANSATION_ACTIVE', 'TRANSFORM', + 'TRANSFORMS', 'TRANSLATE', 'TRANSLATION', 'TREAT', 'TRIGGER', 'TRIGGER_CATALOG', + 'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE', + 'UNCOMMITTED', 'UNDER', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', + 'UNNAMED', 'UNNEST', 'UNTIL', 'UPDATE', 'UPPER', 'USAGE', 'USER', + 'USER_DEFINED_TYPE_CATALOG', 'USER_DEFINED_TYPE_NAME', + 'USER_DEFINED_TYPE_SCHEMA', 'USING', 'VACUUM', 'VALID', 'VALIDATOR', 'VALUES', + 'VARIABLE', 'VERBOSE', 'VERSION', 'VIEW', 'VOLATILE', 'WHEN', 'WHENEVER', 'WHERE', + 'WITH', 'WITHOUT', 'WORK', 'WRITE', 'YEAR', 'ZONE'), suffix=r'\b'), + Keyword), + (words(( + 'ARRAY', 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'CHARACTER', 'DATE', + 'DEC', 'DECIMAL', 'FLOAT', 'INT', 'INTEGER', 'INTERVAL', 'NUMBER', 'NUMERIC', 'REAL', + 'SERIAL', 'SMALLINT', 'VARCHAR', 'VARYING', 'INT8', 'SERIAL8', 'TEXT'), suffix=r'\b'), Name.Builtin), (r'[+*/<>=~!@#%^&|`?-]', Operator), (r'[0-9]+', Number.Integer), # TODO: Backslash escapes? (r"'(''|[^'])*'", String.Single), - (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - (r'[;:()\[\],\.]', Punctuation) + (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL + (r'[a-z_][\w$]*', Name), # allow $s in strings for Oracle + (r'[;:()\[\],.]', Punctuation) ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), + (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } @@ -462,10 +471,9 @@ class MySqlLexer(RegexLexer): (r'/\*', Comment.Multiline, 'multiline-comments'), (r'[0-9]+', Number.Integer), (r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float), - # TODO: add backslash escapes - (r"'(''|[^'])*'", String.Single), - (r'"(""|[^"])*"', String.Double), - (r"`(``|[^`])*`", String.Symbol), + (r"'(\\\\|\\'|''|[^'])*'", String.Single), + (r'"(\\\\|\\"|""|[^"])*"', String.Double), + (r"`(\\\\|\\`|``|[^`])*`", String.Symbol), (r'[+*/<>=~!@#%^&|`?-]', Operator), (r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|' r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|' @@ -504,16 +512,16 @@ class MySqlLexer(RegexLexer): # TODO: this list is not complete (r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo), (r'(true|false|null)', Name.Constant), - (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', + (r'([a-z_]\w*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - (r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable), - (r'[;:()\[\],\.]', Punctuation) + (r'[a-z_]\w*', Name), + (r'@[a-z0-9]*[._]*[a-z0-9]*', Name.Variable), + (r'[;:()\[\],.]', Punctuation) ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), + (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } @@ -523,7 +531,7 @@ class SqliteConsoleLexer(Lexer): """ Lexer for example sessions using sqlite3. - *New in Pygments 0.11.* + .. versionadded:: 0.11 """ name = 'sqlite3con' @@ -557,3 +565,34 @@ class SqliteConsoleLexer(Lexer): for item in do_insertions(insertions, sql.get_tokens_unprocessed(curcode)): yield item + + +class RqlLexer(RegexLexer): + """ + Lexer for Relation Query Language. + + `RQL `_ + + .. versionadded:: 2.0 + """ + name = 'RQL' + aliases = ['rql'] + filenames = ['*.rql'] + mimetypes = ['text/x-rql'] + + flags = re.IGNORECASE + tokens = { + 'root': [ + (r'\s+', Text), + (r'(DELETE|SET|INSERT|UNION|DISTINCT|WITH|WHERE|BEING|OR' + r'|AND|NOT|GROUPBY|HAVING|ORDERBY|ASC|DESC|LIMIT|OFFSET' + r'|TODAY|NOW|TRUE|FALSE|NULL|EXISTS)\b', Keyword), + (r'[+*/<>=%-]', Operator), + (r'(Any|is|instance_of|CWEType|CWRelation)\b', Name.Builtin), + (r'[0-9]+', Number.Integer), + (r'[A-Z_]\w*\??', Name), + (r"'(''|[^'])*'", String.Single), + (r'"(""|[^"])*"', String.Single), + (r'[;:()\[\],.]', Punctuation) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/tcl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/tcl.py new file mode 100644 index 0000000..966dc24 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/tcl.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.tcl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for Tcl and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number +from pygments.util import shebang_matches + +__all__ = ['TclLexer'] + + +class TclLexer(RegexLexer): + """ + For Tcl source code. + + .. versionadded:: 0.10 + """ + + keyword_cmds_re = words(( + 'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error', + 'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return', + 'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable', + 'vwait', 'while'), prefix=r'\b', suffix=r'\b') + + builtin_cmds_re = words(( + 'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict', + 'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file', + 'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp', + 'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk', + 'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc', + 'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex', + 'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan', + 'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string', + 'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b') + + name = 'Tcl' + aliases = ['tcl'] + filenames = ['*.tcl', '*.rvt'] + mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl'] + + def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""): + return [ + (keyword_cmds_re, Keyword, 'params' + context), + (builtin_cmds_re, Name.Builtin, 'params' + context), + (r'([\w.-]+)', Name.Variable, 'params' + context), + (r'#', Comment, 'comment'), + ] + + tokens = { + 'root': [ + include('command'), + include('basic'), + include('data'), + (r'\}', Keyword), # HACK: somehow we miscounted our braces + ], + 'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re), + 'command-in-brace': _gen_command_rules(keyword_cmds_re, + builtin_cmds_re, + "-in-brace"), + 'command-in-bracket': _gen_command_rules(keyword_cmds_re, + builtin_cmds_re, + "-in-bracket"), + 'command-in-paren': _gen_command_rules(keyword_cmds_re, + builtin_cmds_re, + "-in-paren"), + 'basic': [ + (r'\(', Keyword, 'paren'), + (r'\[', Keyword, 'bracket'), + (r'\{', Keyword, 'brace'), + (r'"', String.Double, 'string'), + (r'(eq|ne|in|ni)\b', Operator.Word), + (r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator), + ], + 'data': [ + (r'\s+', Text), + (r'0x[a-fA-F0-9]+', Number.Hex), + (r'0[0-7]+', Number.Oct), + (r'\d+\.\d+', Number.Float), + (r'\d+', Number.Integer), + (r'\$([\w.:-]+)', Name.Variable), + (r'([\w.:-]+)', Text), + ], + 'params': [ + (r';', Keyword, '#pop'), + (r'\n', Text, '#pop'), + (r'(else|elseif|then)\b', Keyword), + include('basic'), + include('data'), + ], + 'params-in-brace': [ + (r'\}', Keyword, ('#pop', '#pop')), + include('params') + ], + 'params-in-paren': [ + (r'\)', Keyword, ('#pop', '#pop')), + include('params') + ], + 'params-in-bracket': [ + (r'\]', Keyword, ('#pop', '#pop')), + include('params') + ], + 'string': [ + (r'\[', String.Double, 'string-square'), + (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double), + (r'"', String.Double, '#pop') + ], + 'string-square': [ + (r'\[', String.Double, 'string-square'), + (r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double), + (r'\]', String.Double, '#pop') + ], + 'brace': [ + (r'\}', Keyword, '#pop'), + include('command-in-brace'), + include('basic'), + include('data'), + ], + 'paren': [ + (r'\)', Keyword, '#pop'), + include('command-in-paren'), + include('basic'), + include('data'), + ], + 'bracket': [ + (r'\]', Keyword, '#pop'), + include('command-in-bracket'), + include('basic'), + include('data'), + ], + 'comment': [ + (r'.*[^\\]\n', Comment, '#pop'), + (r'.*\\\n', Comment), + ], + } + + def analyse_text(text): + return shebang_matches(text, r'(tcl)') diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/templates.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/templates.py similarity index 73% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/templates.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/templates.py index 63fc5f3..c153f51 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/templates.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/templates.py @@ -5,21 +5,24 @@ Lexers for various template engines' markup. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re -from pygments.lexers.web import \ - PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer, LassoLexer -from pygments.lexers.agile import PythonLexer, PerlLexer -from pygments.lexers.compiled import JavaLexer -from pygments.lexers.jvm import TeaLangLexer +from pygments.lexers.html import HtmlLexer, XmlLexer +from pygments.lexers.javascript import JavascriptLexer, LassoLexer +from pygments.lexers.css import CssLexer +from pygments.lexers.php import PhpLexer +from pygments.lexers.python import PythonLexer +from pygments.lexers.perl import PerlLexer +from pygments.lexers.jvm import JavaLexer, TeaLangLexer +from pygments.lexers.data import YamlLexer from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \ - include, using, this -from pygments.token import Error, Punctuation, \ - Text, Comment, Operator, Keyword, Name, String, Number, Other, Token + include, using, this, default, combined +from pygments.token import Error, Punctuation, Whitespace, \ + Text, Comment, Operator, Keyword, Name, String, Number, Other, Token from pygments.util import html_doctype_matches, looks_like_xml __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer', @@ -36,9 +39,12 @@ __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer', 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer', 'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer', - 'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer', - 'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer', - 'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer'] + 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer', + 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer', + 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer', + 'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer', + 'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer', + 'TwigLexer', 'TwigHtmlLexer'] class ErbLexer(Lexer): @@ -59,7 +65,7 @@ class ErbLexer(Lexer): _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M) def __init__(self, **options): - from pygments.lexers.agile import RubyLexer + from pygments.lexers.ruby import RubyLexer self.ruby_lexer = RubyLexer(**options) Lexer.__init__(self, **options) @@ -102,7 +108,7 @@ class ErbLexer(Lexer): data = tokens.pop() r_idx = 0 for r_idx, r_token, r_value in \ - self.ruby_lexer.get_tokens_unprocessed(data): + self.ruby_lexer.get_tokens_unprocessed(data): yield r_idx + idx, r_token, r_value idx += len(data) state = 2 @@ -115,7 +121,7 @@ class ErbLexer(Lexer): yield idx, Comment.Preproc, tag[0] r_idx = 0 for r_idx, r_token, r_value in \ - self.ruby_lexer.get_tokens_unprocessed(tag[1:]): + self.ruby_lexer.get_tokens_unprocessed(tag[1:]): yield idx + 1 + r_idx, r_token, r_value idx += len(tag) state = 0 @@ -159,22 +165,23 @@ class SmartyLexer(RegexLexer): (r'(\{php\})(.*?)(\{/php\})', bygroups(Comment.Preproc, using(PhpLexer, startinline=True), Comment.Preproc)), - (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)', + (r'(\{)(/?[a-zA-Z_]\w*)(\s*)', bygroups(Comment.Preproc, Name.Function, Text), 'smarty'), (r'\{', Comment.Preproc, 'smarty') ], 'smarty': [ (r'\s+', Text), + (r'\{', Comment.Preproc, '#push'), (r'\}', Comment.Preproc, '#pop'), - (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable), - (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable), - (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator), + (r'#[a-zA-Z_]\w*#', Name.Variable), + (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable), + (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator), (r'(true|false|null)\b', Keyword.Constant), (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" r"0[xX][0-9a-fA-F]+[Ll]?", Number), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute) + (r'[a-zA-Z_]\w*', Name.Attribute) ] } @@ -201,11 +208,11 @@ class VelocityLexer(RegexLexer): name = 'Velocity' aliases = ['velocity'] - filenames = ['*.vm','*.fhtml'] + filenames = ['*.vm', '*.fhtml'] flags = re.MULTILINE | re.DOTALL - identifier = r'[a-zA-Z_][a-zA-Z0-9_]*' + identifier = r'[a-zA-Z_]\w*' tokens = { 'root': [ @@ -227,10 +234,10 @@ class VelocityLexer(RegexLexer): (r'(\.)(' + identifier + r')', bygroups(Punctuation, Name.Variable), '#push'), (r'\}', Punctuation, '#pop'), - (r'', Other, '#pop') + default('#pop') ], 'directiveparams': [ - (r'(&&|\|\||==?|!=?|[-<>+*%&\|\^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b', + (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b', Operator), (r'\[', Operator, 'rangeoperator'), (r'\b' + identifier + r'\b', Name.Function), @@ -251,7 +258,9 @@ class VelocityLexer(RegexLexer): (r"\b[0-9]+\b", Number), (r'(true|false|null)\b', Keyword.Constant), (r'\(', Punctuation, '#push'), - (r'\)', Punctuation, '#pop') + (r'\)', Punctuation, '#pop'), + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), ] } @@ -263,39 +272,39 @@ class VelocityLexer(RegexLexer): rv += 0.15 if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text): rv += 0.15 - if re.search(r'\$\{?[a-zA-Z_][a-zA-Z0-9_]*(\([^)]*\))?' - r'(\.[a-zA-Z0-9_]+(\([^)]*\))?)*\}?', text): + if re.search(r'\$\{?[a-zA-Z_]\w*(\([^)]*\))?' + r'(\.\w+(\([^)]*\))?)*\}?', text): rv += 0.01 return rv class VelocityHtmlLexer(DelegatingLexer): """ - Subclass of the `VelocityLexer` that highlights unlexer data + Subclass of the `VelocityLexer` that highlights unlexed data with the `HtmlLexer`. """ name = 'HTML+Velocity' aliases = ['html+velocity'] - alias_filenames = ['*.html','*.fhtml'] + alias_filenames = ['*.html', '*.fhtml'] mimetypes = ['text/html+velocity'] def __init__(self, **options): super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer, - **options) + **options) class VelocityXmlLexer(DelegatingLexer): """ - Subclass of the `VelocityLexer` that highlights unlexer data + Subclass of the `VelocityLexer` that highlights unlexed data with the `XmlLexer`. """ name = 'XML+Velocity' aliases = ['xml+velocity'] - alias_filenames = ['*.xml','*.vm'] + alias_filenames = ['*.xml', '*.vm'] mimetypes = ['application/xml+velocity'] def __init__(self, **options): @@ -305,7 +314,7 @@ class VelocityXmlLexer(DelegatingLexer): def analyse_text(text): rv = VelocityLexer.analyse_text(text) - 0.01 if looks_like_xml(text): - rv += 0.5 + rv += 0.4 return rv @@ -343,25 +352,25 @@ class DjangoLexer(RegexLexer): Text, Comment.Preproc, Text, Keyword, Text, Comment.Preproc)), # filter blocks - (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)', bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), 'block'), - (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(\{%)(-?\s*)([a-zA-Z_]\w*)', bygroups(Comment.Preproc, Text, Keyword), 'block'), (r'\{', Other) ], 'varnames': [ - (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(\|)(\s*)([a-zA-Z_]\w*)', bygroups(Operator, Text, Name.Function)), - (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)', bygroups(Keyword, Text, Keyword, Text, Name.Function)), (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo), (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|' r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b', Keyword), (r'(loop|block|super|forloop)\b', Name.Builtin), - (r'[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable), - (r'\.[a-zA-Z0-9_]+', Name.Variable), + (r'[a-zA-Z][\w-]*', Name.Variable), + (r'\.\w+', Name.Variable), (r':?"(\\\\|\\"|[^"])*"', String.Double), (r":?'(\\\\|\\'|[^'])*'", String.Single), (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator), @@ -397,7 +406,7 @@ class MyghtyLexer(RegexLexer): Generic `myghty templates`_ lexer. Code that isn't Myghty markup is yielded as `Token.Other`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 .. _myghty templates: http://www.myghty.org/ """ @@ -442,10 +451,10 @@ class MyghtyLexer(RegexLexer): class MyghtyHtmlLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `HtmlLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'HTML+Myghty' @@ -459,10 +468,10 @@ class MyghtyHtmlLexer(DelegatingLexer): class MyghtyXmlLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `XmlLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'XML+Myghty' @@ -476,10 +485,10 @@ class MyghtyXmlLexer(DelegatingLexer): class MyghtyJavascriptLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `JavascriptLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'JavaScript+Myghty' @@ -495,10 +504,10 @@ class MyghtyJavascriptLexer(DelegatingLexer): class MyghtyCssLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `CssLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'CSS+Myghty' @@ -517,7 +526,7 @@ class MasonLexer(RegexLexer): .. _mason templates: http://www.masonhq.com/ - *New in Pygments 1.4.* + .. versionadded:: 1.4 """ name = 'Mason' aliases = ['mason'] @@ -570,7 +579,7 @@ class MakoLexer(RegexLexer): Generic `mako templates`_ lexer. Code that isn't Mako markup is yielded as `Token.Other`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 .. _mako templates: http://www.makotemplates.org/ """ @@ -589,11 +598,11 @@ class MakoLexer(RegexLexer): (r'(\s*)(##[^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, Other)), (r'(?s)<%doc>.*?', Comment.Preproc), - (r'(<%)([\w\.\:]+)', + (r'(<%)([\w.:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'), - (r'()', + (r'()', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), - (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'), + (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'), (r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(\$\{)(.*?)(\})', @@ -638,7 +647,7 @@ class MakoHtmlLexer(DelegatingLexer): Subclass of the `MakoLexer` that highlights unlexed data with the `HtmlLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'HTML+Mako' @@ -647,14 +656,15 @@ class MakoHtmlLexer(DelegatingLexer): def __init__(self, **options): super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, - **options) + **options) + class MakoXmlLexer(DelegatingLexer): """ - Subclass of the `MakoLexer` that highlights unlexer data + Subclass of the `MakoLexer` that highlights unlexed data with the `XmlLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'XML+Mako' @@ -663,14 +673,15 @@ class MakoXmlLexer(DelegatingLexer): def __init__(self, **options): super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, - **options) + **options) + class MakoJavascriptLexer(DelegatingLexer): """ - Subclass of the `MakoLexer` that highlights unlexer data + Subclass of the `MakoLexer` that highlights unlexed data with the `JavascriptLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'JavaScript+Mako' @@ -681,14 +692,15 @@ class MakoJavascriptLexer(DelegatingLexer): def __init__(self, **options): super(MakoJavascriptLexer, self).__init__(JavascriptLexer, - MakoLexer, **options) + MakoLexer, **options) + class MakoCssLexer(DelegatingLexer): """ - Subclass of the `MakoLexer` that highlights unlexer data + Subclass of the `MakoLexer` that highlights unlexed data with the `CssLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'CSS+Mako' @@ -697,7 +709,7 @@ class MakoCssLexer(DelegatingLexer): def __init__(self, **options): super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, - **options) + **options) # Genshi and Cheetah lexers courtesy of Matt Good. @@ -741,7 +753,7 @@ class CheetahLexer(RegexLexer): (bygroups(Comment.Preproc, using(CheetahPythonLexer), Comment.Preproc))), # TODO support other Python syntax like $foo['bar'] - (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])', + (r'(\$)([a-zA-Z_][\w.]*\w)', bygroups(Comment.Preproc, using(CheetahPythonLexer))), (r'(\$\{!?)(.*?)(\})(?s)', bygroups(Comment.Preproc, using(CheetahPythonLexer), @@ -749,7 +761,7 @@ class CheetahLexer(RegexLexer): (r'''(?sx) (.+?) # anything, followed by: (?: - (?=[#][#a-zA-Z]*) | # an eval comment + (?=\#[#a-zA-Z]*) | # an eval comment (?=\$[a-zA-Z_{]) | # a substitution \Z # end of string ) @@ -761,7 +773,7 @@ class CheetahLexer(RegexLexer): class CheetahHtmlLexer(DelegatingLexer): """ - Subclass of the `CheetahLexer` that highlights unlexer data + Subclass of the `CheetahLexer` that highlights unlexed data with the `HtmlLexer`. """ @@ -776,7 +788,7 @@ class CheetahHtmlLexer(DelegatingLexer): class CheetahXmlLexer(DelegatingLexer): """ - Subclass of the `CheetahLexer` that highlights unlexer data + Subclass of the `CheetahLexer` that highlights unlexed data with the `XmlLexer`. """ @@ -791,7 +803,7 @@ class CheetahXmlLexer(DelegatingLexer): class CheetahJavascriptLexer(DelegatingLexer): """ - Subclass of the `CheetahLexer` that highlights unlexer data + Subclass of the `CheetahLexer` that highlights unlexed data with the `JavascriptLexer`. """ @@ -822,11 +834,11 @@ class GenshiTextLexer(RegexLexer): tokens = { 'root': [ - (r'[^#\$\s]+', Other), + (r'[^#$\s]+', Other), (r'^(\s*)(##.*)$', bygroups(Text, Comment)), (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'), include('variable'), - (r'[#\$\s]', Other), + (r'[#$\s]', Other), ], 'directive': [ (r'\n', Text, '#pop'), @@ -839,7 +851,7 @@ class GenshiTextLexer(RegexLexer): 'variable': [ (r'(?)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), # yield style and script blocks as Other @@ -863,11 +875,11 @@ class GenshiMarkupLexer(RegexLexer): (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'), (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'), include('variable'), - (r'[<\$]', Other), + (r'[<$]', Other), ], 'pytag': [ (r'\s+', Text), - (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'), + (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'), (r'/?\s*>', Name.Tag, '#pop'), ], 'pyattr': [ @@ -877,8 +889,8 @@ class GenshiMarkupLexer(RegexLexer): ], 'tag': [ (r'\s+', Text), - (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'), - (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'), + (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'), + (r'[\w:-]+\s*=', Name.Attribute, 'attr'), (r'/?\s*>', Name.Tag, '#pop'), ], 'attr': [ @@ -903,7 +915,7 @@ class GenshiMarkupLexer(RegexLexer): 'variable': [ (r'(?=|<|>', Operator), + (r'<=|>=|<|>|==', Operator), (r'mod\b', Operator), (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator), (r'\|\||&&', Operator), + (r'\?', Operator), (r'"', String.Double, 'string'), # There is a special rule for allowing html in single quoted # strings, evidently. (r"'.*?'", String.Single), (r'\d+', Number), - (r'(if|else|len|var|case|default|break|switch)\b', Keyword), - (r'([A-Za-z_$][A-Za-z0-9_.]*)(\s*)(\()', + (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|' + r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|' + r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword), + (r'(true|false|null)\b', Keyword.Constant), + (r'(application|session|client|cookie|super|this|variables|arguments)\b', + Name.Constant), + (r'([a-z_$][\w.]*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), - (r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable), + (r'[a-z_$][\w.]*', Name.Variable), (r'[()\[\]{};:,.\\]', Punctuation), (r'\s+', Text), ], @@ -1525,7 +1547,7 @@ class ColdfusionMarkupLexer(RegexLexer): (r'<[^<>]*', Other), ], 'tags': [ - (r'(?s)', Comment.Multiline), + (r'', Comment), (r'', Name.Builtin, 'cfoutput'), (r'(?s)()(.+?)()', @@ -1541,12 +1563,17 @@ class ColdfusionMarkupLexer(RegexLexer): (r'[^#<]+', Other), (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer), Punctuation)), - #(r'', Name.Builtin, '#push'), + # (r'', Name.Builtin, '#push'), (r'', Name.Builtin, '#pop'), include('tags'), (r'(?s)<[^<>]*', Other), (r'#', Other), ], + 'cfcomment': [ + (r'', Comment.Multiline, '#pop'), + (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline), + ], } @@ -1556,7 +1583,7 @@ class ColdfusionHtmlLexer(DelegatingLexer): """ name = 'Coldfusion HTML' aliases = ['cfm'] - filenames = ['*.cfm', '*.cfml', '*.cfc'] + filenames = ['*.cfm', '*.cfml'] mimetypes = ['application/x-coldfusion'] def __init__(self, **options): @@ -1564,11 +1591,27 @@ class ColdfusionHtmlLexer(DelegatingLexer): **options) +class ColdfusionCFCLexer(DelegatingLexer): + """ + Coldfusion markup/script components + + .. versionadded:: 2.0 + """ + name = 'Coldfusion CFC' + aliases = ['cfc'] + filenames = ['*.cfc'] + mimetypes = [] + + def __init__(self, **options): + super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer, + **options) + + class SspLexer(DelegatingLexer): """ Lexer for Scalate Server Pages. - *New in Pygments 1.4.* + .. versionadded:: 1.4 """ name = 'Scalate Server Page' aliases = ['ssp'] @@ -1594,7 +1637,7 @@ class TeaTemplateRootLexer(RegexLexer): Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of code blocks. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ tokens = { @@ -1602,20 +1645,20 @@ class TeaTemplateRootLexer(RegexLexer): (r'<%\S?', Keyword, 'sec'), (r'[^<]+', Other), (r'<', Other), - ], + ], 'sec': [ (r'%>', Keyword, '#pop'), # note: '\w\W' != '.' without DOTALL. (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)), - ], - } + ], + } class TeaTemplateLexer(DelegatingLexer): """ Lexer for `Tea Templates `_. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'Tea' aliases = ['tea'] @@ -1642,7 +1685,7 @@ class LassoHtmlLexer(DelegatingLexer): Nested JavaScript and CSS is also highlighted. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'HTML+Lasso' @@ -1658,9 +1701,7 @@ class LassoHtmlLexer(DelegatingLexer): def analyse_text(text): rv = LassoLexer.analyse_text(text) - 0.01 - if re.search(r'<\w+>', text, re.I): - rv += 0.2 - if html_doctype_matches(text): + if html_doctype_matches(text): # same as HTML lexer rv += 0.5 return rv @@ -1670,7 +1711,7 @@ class LassoXmlLexer(DelegatingLexer): Subclass of the `LassoLexer` which highlights unhandled data with the `XmlLexer`. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'XML+Lasso' @@ -1694,7 +1735,7 @@ class LassoCssLexer(DelegatingLexer): Subclass of the `LassoLexer` which highlights unhandled data with the `CssLexer`. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'CSS+Lasso' @@ -1720,7 +1761,7 @@ class LassoJavascriptLexer(DelegatingLexer): Subclass of the `LassoLexer` which highlights unhandled data with the `JavascriptLexer`. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'JavaScript+Lasso' @@ -1740,3 +1781,394 @@ class LassoJavascriptLexer(DelegatingLexer): if 'function' in text: rv += 0.2 return rv + + +class HandlebarsLexer(RegexLexer): + """ + Generic `handlebars ` template lexer. + + Highlights only the Handlebars template tags (stuff between `{{` and `}}`). + Everything else is left for a delegating lexer. + + .. versionadded:: 2.0 + """ + + name = "Handlebars" + aliases = ['handlebars'] + + tokens = { + 'root': [ + (r'[^{]+', Other), + + (r'\{\{!.*\}\}', Comment), + + (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'), + (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'), + ], + + 'tag': [ + (r'\s+', Text), + (r'\}\}\}', Comment.Special, '#pop'), + (r'\}\}', Comment.Preproc, '#pop'), + + # Handlebars + (r'([#/]*)(each|if|unless|else|with|log|in)', bygroups(Keyword, + Keyword)), + + # General {{#block}} + (r'([#/])([\w-]+)', bygroups(Name.Function, Name.Function)), + + # {{opt=something}} + (r'([\w-]+)(=)', bygroups(Name.Attribute, Operator)), + + # borrowed from DjangoLexer + (r':?"(\\\\|\\"|[^"])*"', String.Double), + (r":?'(\\\\|\\'|[^'])*'", String.Single), + (r'[a-zA-Z][\w-]*', Name.Variable), + (r'\.[\w-]+', Name.Variable), + (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" + r"0[xX][0-9a-fA-F]+[Ll]?", Number), + ] + } + + +class HandlebarsHtmlLexer(DelegatingLexer): + """ + Subclass of the `HandlebarsLexer` that highlights unlexed data with the + `HtmlLexer`. + + .. versionadded:: 2.0 + """ + + name = "HTML+Handlebars" + aliases = ["html+handlebars"] + filenames = ['*.handlebars', '*.hbs'] + mimetypes = ['text/html+handlebars', 'text/x-handlebars-template'] + + def __init__(self, **options): + super(HandlebarsHtmlLexer, self).__init__(HtmlLexer, HandlebarsLexer, **options) + + +class YamlJinjaLexer(DelegatingLexer): + """ + Subclass of the `DjangoLexer` that highighlights unlexed data with the + `YamlLexer`. + + Commonly used in Saltstack salt states. + + .. versionadded:: 2.0 + """ + + name = 'YAML+Jinja' + aliases = ['yaml+jinja', 'salt', 'sls'] + filenames = ['*.sls'] + mimetypes = ['text/x-yaml+jinja', 'text/x-sls'] + + def __init__(self, **options): + super(YamlJinjaLexer, self).__init__(YamlLexer, DjangoLexer, **options) + + +class LiquidLexer(RegexLexer): + """ + Lexer for `Liquid templates + `_. + + .. versionadded:: 2.0 + """ + name = 'liquid' + aliases = ['liquid'] + filenames = ['*.liquid'] + + tokens = { + 'root': [ + (r'[^{]+', Text), + # tags and block tags + (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'), + # output tags + (r'(\{\{)(\s*)([^\s}]+)', + bygroups(Punctuation, Whitespace, using(this, state = 'generic')), + 'output'), + (r'\{', Text) + ], + + 'tag-or-block': [ + # builtin logic blocks + (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'), + (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace), + combined('end-of-block', 'whitespace', 'generic')), + (r'(else)(\s*)(%\})', + bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'), + + # other builtin blocks + (r'(capture)(\s+)([^\s%]+)(\s*)(%\})', + bygroups(Name.Tag, Whitespace, using(this, state = 'variable'), + Whitespace, Punctuation), '#pop'), + (r'(comment)(\s*)(%\})', + bygroups(Name.Tag, Whitespace, Punctuation), 'comment'), + (r'(raw)(\s*)(%\})', + bygroups(Name.Tag, Whitespace, Punctuation), 'raw'), + + # end of block + (r'(end(case|unless|if))(\s*)(%\})', + bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'), + (r'(end([^\s%]+))(\s*)(%\})', + bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'), + + # builtin tags (assign and include are handled together with usual tags) + (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)', + bygroups(Name.Tag, Whitespace, + using(this, state='generic'), Punctuation, Whitespace), + 'variable-tag-markup'), + + # other tags or blocks + (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup') + ], + + 'output': [ + include('whitespace'), + ('\}\}', Punctuation, '#pop'), # end of output + + (r'\|', Punctuation, 'filters') + ], + + 'filters': [ + include('whitespace'), + (r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output + + (r'([^\s|:]+)(:?)(\s*)', + bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup') + ], + + 'filter-markup': [ + (r'\|', Punctuation, '#pop'), + include('end-of-tag'), + include('default-param-markup') + ], + + 'condition': [ + include('end-of-block'), + include('whitespace'), + + (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})', + bygroups(using(this, state = 'generic'), Whitespace, Operator, + Whitespace, using(this, state = 'generic'), Whitespace, + Punctuation)), + (r'\b!', Operator), + (r'\bnot\b', Operator.Word), + (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)', + bygroups(using(this, state = 'generic'), Whitespace, Operator.Word, + Whitespace, using(this, state = 'generic'))), + + include('generic'), + include('whitespace') + ], + + 'generic-value': [ + include('generic'), + include('end-at-whitespace') + ], + + 'operator': [ + (r'(\s*)((=|!|>|<)=?)(\s*)', + bygroups(Whitespace, Operator, None, Whitespace), '#pop'), + (r'(\s*)(\bcontains\b)(\s*)', + bygroups(Whitespace, Operator.Word, Whitespace), '#pop'), + ], + + 'end-of-tag': [ + (r'\}\}', Punctuation, '#pop') + ], + + 'end-of-block': [ + (r'%\}', Punctuation, ('#pop', '#pop')) + ], + + 'end-at-whitespace': [ + (r'\s+', Whitespace, '#pop') + ], + + # states for unknown markup + 'param-markup': [ + include('whitespace'), + # params with colons or equals + (r'([^\s=:]+)(\s*)(=|:)', + bygroups(Name.Attribute, Whitespace, Operator)), + # explicit variables + (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})', + bygroups(Punctuation, Whitespace, using(this, state = 'variable'), + Whitespace, Punctuation)), + + include('string'), + include('number'), + include('keyword'), + (r',', Punctuation) + ], + + 'default-param-markup': [ + include('param-markup'), + (r'.', Text) # fallback for switches / variables / un-quoted strings / ... + ], + + 'variable-param-markup': [ + include('param-markup'), + include('variable'), + (r'.', Text) # fallback + ], + + 'tag-markup': [ + (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag + include('default-param-markup') + ], + + 'variable-tag-markup': [ + (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag + include('variable-param-markup') + ], + + # states for different values types + 'keyword': [ + (r'\b(false|true)\b', Keyword.Constant) + ], + + 'variable': [ + (r'[a-zA-Z_]\w*', Name.Variable), + (r'(?<=\w)\.(?=\w)', Punctuation) + ], + + 'string': [ + (r"'[^']*'", String.Single), + (r'"[^"]*"', String.Double) + ], + + 'number': [ + (r'\d+\.\d+', Number.Float), + (r'\d+', Number.Integer) + ], + + 'generic': [ # decides for variable, string, keyword or number + include('keyword'), + include('string'), + include('number'), + include('variable') + ], + + 'whitespace': [ + (r'[ \t]+', Whitespace) + ], + + # states for builtin blocks + 'comment': [ + (r'(\{%)(\s*)(endcomment)(\s*)(%\})', + bygroups(Punctuation, Whitespace, Name.Tag, Whitespace, + Punctuation), ('#pop', '#pop')), + (r'.', Comment) + ], + + 'raw': [ + (r'[^{]+', Text), + (r'(\{%)(\s*)(endraw)(\s*)(%\})', + bygroups(Punctuation, Whitespace, Name.Tag, Whitespace, + Punctuation), '#pop'), + (r'\{', Text) + ], + } + + +class TwigLexer(RegexLexer): + """ + `Twig `_ template lexer. + + It just highlights Twig code between the preprocessor directives, + other data is left untouched by the lexer. + + .. versionadded:: 2.0 + """ + + name = 'Twig' + aliases = ['twig'] + mimetypes = ['application/x-twig'] + + flags = re.M | re.S + + # Note that a backslash is included in the following two patterns + # PHP uses a backslash as a namespace separator + _ident_char = r'[\\\w-]|[^\x00-\x7f]' + _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])' + _ident_end = r'(?:' + _ident_char + ')*' + _ident_inner = _ident_begin + _ident_end + + tokens = { + 'root': [ + (r'[^{]+', Other), + (r'\{\{', Comment.Preproc, 'var'), + # twig comments + (r'\{\#.*?\#\}', Comment), + # raw twig blocks + (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)' + r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})', + bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, + Other, Comment.Preproc, Text, Keyword, Text, + Comment.Preproc)), + (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)' + r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})', + bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, + Other, Comment.Preproc, Text, Keyword, Text, + Comment.Preproc)), + # filter blocks + (r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner, + bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), + 'tag'), + (r'(\{%)(-?\s*)([a-zA-Z_]\w*)', + bygroups(Comment.Preproc, Text, Keyword), 'tag'), + (r'\{', Other), + ], + 'varnames': [ + (r'(\|)(\s*)(%s)' % _ident_inner, + bygroups(Operator, Text, Name.Function)), + (r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner, + bygroups(Keyword, Text, Keyword, Text, Name.Function)), + (r'(?i)(true|false|none|null)\b', Keyword.Pseudo), + (r'(in|not|and|b-and|or|b-or|b-xor|is' + r'if|elseif|else|import' + r'constant|defined|divisibleby|empty|even|iterable|odd|sameas' + r'matches|starts\s+with|ends\s+with)\b', + Keyword), + (r'(loop|block|parent)\b', Name.Builtin), + (_ident_inner, Name.Variable), + (r'\.' + _ident_inner, Name.Variable), + (r'\.[0-9]+', Number), + (r':?"(\\\\|\\"|[^"])*"', String.Double), + (r":?'(\\\\|\\'|[^'])*'", String.Single), + (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator), + (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" + r"0[xX][0-9a-fA-F]+[Ll]?", Number), + ], + 'var': [ + (r'\s+', Text), + (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'), + include('varnames') + ], + 'tag': [ + (r'\s+', Text), + (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'), + include('varnames'), + (r'.', Punctuation), + ], + } + + +class TwigHtmlLexer(DelegatingLexer): + """ + Subclass of the `TwigLexer` that highlights unlexed data with the + `HtmlLexer`. + + .. versionadded:: 2.0 + """ + + name = "HTML+Twig" + aliases = ["html+twig"] + filenames = ['*.twig'] + mimetypes = ['text/html+twig'] + + def __init__(self, **options): + super(TwigHtmlLexer, self).__init__(HtmlLexer, TwigLexer, **options) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/testing.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/testing.py new file mode 100644 index 0000000..07e3cb3 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/testing.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.testing + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for testing languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Comment, Keyword, Name, String + +__all__ = ['GherkinLexer'] + + +class GherkinLexer(RegexLexer): + """ + For `Gherkin ` syntax. + + .. versionadded:: 1.2 + """ + name = 'Gherkin' + aliases = ['cucumber', 'gherkin'] + filenames = ['*.feature'] + mimetypes = ['text/x-gherkin'] + + feature_keywords = u'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$' + feature_element_keywords = u'^(\\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$' + examples_keywords = u'^(\\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$' + step_keywords = u'^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )' + + tokens = { + 'comments': [ + (r'^\s*#.*$', Comment), + ], + 'feature_elements': [ + (step_keywords, Keyword, "step_content_stack"), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'feature_elements_on_stack': [ + (step_keywords, Keyword, "#pop:2"), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'examples_table': [ + (r"\s+\|", Keyword, 'examples_table_header'), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'examples_table_header': [ + (r"\s+\|\s*$", Keyword, "#pop:2"), + include('comments'), + (r"\\\|", Name.Variable), + (r"\s*\|", Keyword), + (r"[^|]", Name.Variable), + ], + 'scenario_sections_on_stack': [ + (feature_element_keywords, + bygroups(Name.Function, Keyword, Keyword, Name.Function), + "feature_elements_on_stack"), + ], + 'narrative': [ + include('scenario_sections_on_stack'), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'table_vars': [ + (r'(<[^>]+>)', Name.Variable), + ], + 'numbers': [ + (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String), + ], + 'string': [ + include('table_vars'), + (r'(\s|.)', String), + ], + 'py_string': [ + (r'"""', Keyword, "#pop"), + include('string'), + ], + 'step_content_root': [ + (r"$", Keyword, "#pop"), + include('step_content'), + ], + 'step_content_stack': [ + (r"$", Keyword, "#pop:2"), + include('step_content'), + ], + 'step_content': [ + (r'"', Name.Function, "double_string"), + include('table_vars'), + include('numbers'), + include('comments'), + (r'(\s|.)', Name.Function), + ], + 'table_content': [ + (r"\s+\|\s*$", Keyword, "#pop"), + include('comments'), + (r"\\\|", String), + (r"\s*\|", Keyword), + include('string'), + ], + 'double_string': [ + (r'"', Name.Function, "#pop"), + include('string'), + ], + 'root': [ + (r'\n', Name.Function), + include('comments'), + (r'"""', Keyword, "py_string"), + (r'\s+\|', Keyword, 'table_content'), + (r'"', Name.Function, "double_string"), + include('table_vars'), + include('numbers'), + (r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)), + (step_keywords, bygroups(Name.Function, Keyword), + 'step_content_root'), + (feature_keywords, bygroups(Keyword, Keyword, Name.Function), + 'narrative'), + (feature_element_keywords, + bygroups(Name.Function, Keyword, Keyword, Name.Function), + 'feature_elements'), + (examples_keywords, + bygroups(Name.Function, Keyword, Keyword, Name.Function), + 'examples_table'), + (r'(\s|.)', Name.Function), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/text.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/text.py new file mode 100644 index 0000000..3e543af --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/text.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.text + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for non-source code file types. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.configs import ApacheConfLexer, NginxConfLexer, \ + SquidConfLexer, LighttpdConfLexer, IniLexer, RegeditLexer, PropertiesLexer +from pygments.lexers.console import PyPyLogLexer +from pygments.lexers.textedit import VimLexer +from pygments.lexers.markup import BBCodeLexer, MoinWikiLexer, RstLexer, \ + TexLexer, GroffLexer +from pygments.lexers.installers import DebianControlLexer, SourcesListLexer +from pygments.lexers.make import MakefileLexer, BaseMakefileLexer, CMakeLexer +from pygments.lexers.haxe import HxmlLexer +from pygments.lexers.diff import DiffLexer, DarcsPatchLexer +from pygments.lexers.data import YamlLexer +from pygments.lexers.textfmts import IrcLogsLexer, GettextLexer, HttpLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/textedit.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/textedit.py new file mode 100644 index 0000000..af6f02b --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/textedit.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.textedit + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for languages related to text processing. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from bisect import bisect + +from pygments.lexer import RegexLexer, include, default, bygroups, using, this +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +from pygments.lexers.python import PythonLexer + +__all__ = ['AwkLexer', 'VimLexer'] + + +class AwkLexer(RegexLexer): + """ + For Awk scripts. + + .. versionadded:: 1.5 + """ + + name = 'Awk' + aliases = ['awk', 'gawk', 'mawk', 'nawk'] + filenames = ['*.awk'] + mimetypes = ['application/x-awk'] + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'#.*$', Comment.Single) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'\B', String.Regex, '#pop'), + (r'(?=/)', Text, ('#pop', 'badregex')), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + (r'^(?=\s|/)', Text, 'slashstartsregex'), + include('commentsandwhitespace'), + (r'\+\+|--|\|\||&&|in\b|\$|!?~|' + r'(\*\*|[-<>+*%\^/!=|])=?', Operator, 'slashstartsregex'), + (r'[{(\[;,]', Punctuation, 'slashstartsregex'), + (r'[})\].]', Punctuation), + (r'(break|continue|do|while|exit|for|if|else|' + r'return)\b', Keyword, 'slashstartsregex'), + (r'function\b', Keyword.Declaration, 'slashstartsregex'), + (r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|' + r'length|match|split|sprintf|sub|substr|tolower|toupper|close|' + r'fflush|getline|next|nextfile|print|printf|strftime|systime|' + r'delete|system)\b', Keyword.Reserved), + (r'(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|' + r'FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|' + r'RSTART|RT|SUBSEP)\b', Name.Builtin), + (r'[$a-zA-Z_]\w*', Name.Other), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + ] + } + + +class VimLexer(RegexLexer): + """ + Lexer for VimL script files. + + .. versionadded:: 0.8 + """ + name = 'VimL' + aliases = ['vim'] + filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc', + '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'] + mimetypes = ['text/x-vim'] + flags = re.MULTILINE + + _python = r'py(?:t(?:h(?:o(?:n)?)?)?)?' + + tokens = { + 'root': [ + (r'^([ \t:]*)(' + _python + r')([ \t]*)(<<)([ \t]*)(.*)((?:\n|.)*)(\6)', + bygroups(using(this), Keyword, Text, Operator, Text, Text, + using(PythonLexer), Text)), + (r'^([ \t:]*)(' + _python + r')([ \t])(.*)', + bygroups(using(this), Keyword, Text, using(PythonLexer))), + + (r'^\s*".*', Comment), + + (r'[ \t]+', Text), + # TODO: regexes can have other delims + (r'/(\\\\|\\/|[^\n/])*/', String.Regex), + (r'"(\\\\|\\"|[^\n"])*"', String.Double), + (r"'(''|[^\n'])*'", String.Single), + + # Who decided that doublequote was a good comment character?? + (r'(?<=\s)"[^\-:.%#=*].*', Comment), + (r'-?\d+', Number), + (r'#[0-9a-f]{6}', Number.Hex), + (r'^:', Punctuation), + (r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent. + (r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b', + Keyword), + (r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin), + (r'\b\w+\b', Name.Other), # These are postprocessed below + (r'.', Text), + ], + } + + def __init__(self, **options): + from pygments.lexers._vim_builtins import command, option, auto + self._cmd = command + self._opt = option + self._aut = auto + + RegexLexer.__init__(self, **options) + + def is_in(self, w, mapping): + r""" + It's kind of difficult to decide if something might be a keyword + in VimL because it allows you to abbreviate them. In fact, + 'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are + valid ways to call it so rather than making really awful regexps + like:: + + \bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b + + we match `\b\w+\b` and then call is_in() on those tokens. See + `scripts/get_vimkw.py` for how the lists are extracted. + """ + p = bisect(mapping, (w,)) + if p > 0: + if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \ + mapping[p-1][1][:len(w)] == w: + return True + if p < len(mapping): + return mapping[p][0] == w[:len(mapping[p][0])] and \ + mapping[p][1][:len(w)] == w + return False + + def get_tokens_unprocessed(self, text): + # TODO: builtins are only subsequent tokens on lines + # and 'keywords' only happen at the beginning except + # for :au ones + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name.Other: + if self.is_in(value, self._cmd): + yield index, Keyword, value + elif self.is_in(value, self._opt) or \ + self.is_in(value, self._aut): + yield index, Name.Builtin, value + else: + yield index, Text, value + else: + yield index, token, value diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/textfmts.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/textfmts.py new file mode 100644 index 0000000..189d334 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/textfmts.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.textfmts + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for various text formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Generic, Literal +from pygments.util import ClassNotFound + +__all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer'] + + +class IrcLogsLexer(RegexLexer): + """ + Lexer for IRC logs in *irssi*, *xchat* or *weechat* style. + """ + + name = 'IRC logs' + aliases = ['irc'] + filenames = ['*.weechatlog'] + mimetypes = ['text/x-irclog'] + + flags = re.VERBOSE | re.MULTILINE + timestamp = r""" + ( + # irssi / xchat and others + (?: \[|\()? # Opening bracket or paren for the timestamp + (?: # Timestamp + (?: (?:\d{1,4} [-/])* # Date as - or /-separated groups of digits + (?:\d{1,4}) + [T ])? # Date/time separator: T or space + (?: \d?\d [:.])* # Time as :/.-separated groups of 1 or 2 digits + (?: \d?\d [:.]) + ) + (?: \]|\))?\s+ # Closing bracket or paren for the timestamp + | + # weechat + \d{4}\s\w{3}\s\d{2}\s # Date + \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace + | + # xchat + \w{3}\s\d{2}\s # Date + \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace + )? + """ + tokens = { + 'root': [ + # log start/end + (r'^\*\*\*\*(.*)\*\*\*\*$', Comment), + # hack + ("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)), + # normal msgs + ("^" + timestamp + r""" + (\s*<.*?>\s*) # Nick """, + bygroups(Comment.Preproc, Name.Tag), 'msg'), + # /me msgs + ("^" + timestamp + r""" + (\s*[*]\s+) # Star + (\S+\s+.*?\n) # Nick + rest of message """, + bygroups(Comment.Preproc, Keyword, Generic.Inserted)), + # join/part msgs + ("^" + timestamp + r""" + (\s*(?:\*{3}|?)\s*) # Star(s) or symbols + (\S+\s+) # Nick + Space + (.*?\n) # Rest of message """, + bygroups(Comment.Preproc, Keyword, String, Comment)), + (r"^.*?\n", Text), + ], + 'msg': [ + (r"\S+:(?!//)", Name.Attribute), # Prefix + (r".*\n", Text, '#pop'), + ], + } + + +class GettextLexer(RegexLexer): + """ + Lexer for Gettext catalog files. + + .. versionadded:: 0.9 + """ + name = 'Gettext Catalog' + aliases = ['pot', 'po'] + filenames = ['*.pot', '*.po'] + mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext'] + + tokens = { + 'root': [ + (r'^#,\s.*?$', Keyword.Type), + (r'^#:\s.*?$', Keyword.Declaration), + # (r'^#$', Comment), + (r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single), + (r'^(")([A-Za-z-]+:)(.*")$', + bygroups(String, Name.Property, String)), + (r'^".*"$', String), + (r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$', + bygroups(Name.Variable, Text, String)), + (r'^(msgstr\[)(\d)(\])(\s+)(".*")$', + bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)), + ] + } + + +class HttpLexer(RegexLexer): + """ + Lexer for HTTP sessions. + + .. versionadded:: 1.5 + """ + + name = 'HTTP' + aliases = ['http'] + + flags = re.DOTALL + + def header_callback(self, match): + if match.group(1).lower() == 'content-type': + content_type = match.group(5).strip() + if ';' in content_type: + content_type = content_type[:content_type.find(';')].strip() + self.content_type = content_type + yield match.start(1), Name.Attribute, match.group(1) + yield match.start(2), Text, match.group(2) + yield match.start(3), Operator, match.group(3) + yield match.start(4), Text, match.group(4) + yield match.start(5), Literal, match.group(5) + yield match.start(6), Text, match.group(6) + + def continuous_header_callback(self, match): + yield match.start(1), Text, match.group(1) + yield match.start(2), Literal, match.group(2) + yield match.start(3), Text, match.group(3) + + def content_callback(self, match): + content_type = getattr(self, 'content_type', None) + content = match.group() + offset = match.start() + if content_type: + from pygments.lexers import get_lexer_for_mimetype + possible_lexer_mimetypes = [content_type] + if '+' in content_type: + # application/calendar+xml can be treated as application/xml + # if there's not a better match. + general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2', + content_type) + possible_lexer_mimetypes.append(general_type) + + for i in possible_lexer_mimetypes: + try: + lexer = get_lexer_for_mimetype(i) + except ClassNotFound: + pass + else: + for idx, token, value in lexer.get_tokens_unprocessed(content): + yield offset + idx, token, value + return + yield offset, Text, content + + tokens = { + 'root': [ + (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)' + r'(HTTP)(/)(1\.[01])(\r?\n|\Z)', + bygroups(Name.Function, Text, Name.Namespace, Text, + Keyword.Reserved, Operator, Number, Text), + 'headers'), + (r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|\Z)', + bygroups(Keyword.Reserved, Operator, Number, Text, Number, + Text, Name.Exception, Text), + 'headers'), + ], + 'headers': [ + (r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)', header_callback), + (r'([\t ]+)([^\r\n]+)(\r?\n|\Z)', continuous_header_callback), + (r'\r?\n', Text, 'content') + ], + 'content': [ + (r'.+', content_callback) + ] + } + + def analyse_text(text): + return text.startswith(('GET /', 'POST /', 'PUT /', 'DELETE /', 'HEAD /', + 'OPTIONS /', 'TRACE /', 'PATCH /')) + + +class TodotxtLexer(RegexLexer): + """ + Lexer for `Todo.txt `_ todo list format. + + .. versionadded:: 2.0 + """ + + name = 'Todotxt' + aliases = ['todotxt'] + # *.todotxt is not a standard extension for Todo.txt files; including it + # makes testing easier, and also makes autodetecting file type easier. + filenames = ['todo.txt', '*.todotxt'] + mimetypes = ['text/x-todo'] + + # Aliases mapping standard token types of Todo.txt format concepts + CompleteTaskText = Operator # Chosen to de-emphasize complete tasks + IncompleteTaskText = Text # Incomplete tasks should look like plain text + + # Priority should have most emphasis to indicate importance of tasks + Priority = Generic.Heading + # Dates should have next most emphasis because time is important + Date = Generic.Subheading + + # Project and context should have equal weight, and be in different colors + Project = Generic.Error + Context = String + + # If tag functionality is added, it should have the same weight as Project + # and Context, and a different color. Generic.Traceback would work well. + + # Regex patterns for building up rules; dates, priorities, projects, and + # contexts are all atomic + # TODO: Make date regex more ISO 8601 compliant + date_regex = r'\d{4,}-\d{2}-\d{2}' + priority_regex = r'\([A-Z]\)' + project_regex = r'\+\S+' + context_regex = r'@\S+' + + # Compound regex expressions + complete_one_date_regex = r'(x )(' + date_regex + r')' + complete_two_date_regex = (complete_one_date_regex + r'( )(' + + date_regex + r')') + priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')' + + tokens = { + # Should parse starting at beginning of line; each line is a task + 'root': [ + # Complete task entry points: two total: + # 1. Complete task with two dates + (complete_two_date_regex, bygroups(CompleteTaskText, Date, + CompleteTaskText, Date), + 'complete'), + # 2. Complete task with one date + (complete_one_date_regex, bygroups(CompleteTaskText, Date), + 'complete'), + + # Incomplete task entry points: six total: + # 1. Priority plus date + (priority_date_regex, bygroups(Priority, IncompleteTaskText, Date), + 'incomplete'), + # 2. Priority only + (priority_regex, Priority, 'incomplete'), + # 3. Leading date + (date_regex, Date, 'incomplete'), + # 4. Leading context + (context_regex, Context, 'incomplete'), + # 5. Leading project + (project_regex, Project, 'incomplete'), + # 6. Non-whitespace catch-all + ('\S+', IncompleteTaskText, 'incomplete'), + ], + + # Parse a complete task + 'complete': [ + # Newline indicates end of task, should return to root + (r'\s*\n', CompleteTaskText, '#pop'), + # Tokenize contexts and projects + (context_regex, Context), + (project_regex, Project), + # Tokenize non-whitespace text + ('\S+', CompleteTaskText), + # Tokenize whitespace not containing a newline + ('\s+', CompleteTaskText), + ], + + # Parse an incomplete task + 'incomplete': [ + # Newline indicates end of task, should return to root + (r'\s*\n', IncompleteTaskText, '#pop'), + # Tokenize contexts and projects + (context_regex, Context), + (project_regex, Project), + # Tokenize non-whitespace text + ('\S+', IncompleteTaskText), + # Tokenize whitespace not containing a newline + ('\s+', IncompleteTaskText), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/theorem.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/theorem.py new file mode 100644 index 0000000..585c6df --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/theorem.py @@ -0,0 +1,466 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.theorem + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for theorem-proving languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic + +__all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer'] + + +class CoqLexer(RegexLexer): + """ + For the `Coq `_ theorem prover. + + .. versionadded:: 1.5 + """ + + name = 'Coq' + aliases = ['coq'] + filenames = ['*.v'] + mimetypes = ['text/x-coq'] + + keywords1 = ( + # Vernacular commands + 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable', + 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis', + 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope', + 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac', + 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit', + 'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex', + 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure', + 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary', + 'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save', + 'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', + 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside', + 'outside', 'Check', + ) + keywords2 = ( + # Gallina + 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct', + 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else', + 'for', 'of', 'nosimpl', 'with', 'as', + ) + keywords3 = ( + # Sorts + 'Type', 'Prop', + ) + keywords4 = ( + # Tactics + 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro', + 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct', + 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite', + 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold', + 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog', + 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial', + 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto', + 'split', 'left', 'right', 'autorewrite', 'tauto', + ) + keywords5 = ( + # Terminators + 'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega', + 'assumption', 'solve', 'contradiction', 'discriminate', + ) + keywords6 = ( + # Control + 'do', 'last', 'first', 'try', 'idtac', 'repeat', + ) + # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', + # 'downto', 'else', 'end', 'exception', 'external', 'false', + # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', + # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', + # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', + # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', + # 'type', 'val', 'virtual', 'when', 'while', 'with' + keyopts = ( + '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.', + '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-', + '<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', + r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>', + r'/\\', r'\\/', + u'Π', u'λ', + ) + operators = r'[!$%&*+\./:<=>?@^|~-]' + word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or') + prefix_syms = r'[!?~]' + infix_syms = r'[=<>@^|&+\*/$%-]' + primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', + 'array') + + tokens = { + 'root': [ + (r'\s+', Text), + (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), + (r'\(\*', Comment, 'comment'), + (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type), + (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), + (words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), + (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), + (r'\b([A-Z][\w\']*)', Name.Class), + (r'(%s)' % '|'.join(keyopts[::-1]), Operator), + (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), + (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), + (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), + + (r"[^\W\d][\w']*", Name), + + (r'\d[\d_]*', Number.Integer), + (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), + (r'0[oO][0-7][0-7_]*', Number.Oct), + (r'0[bB][01][01_]*', Number.Bin), + (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), + + (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", + String.Char), + (r"'.'", String.Char), + (r"'", Keyword), # a stray quote is another syntax element + + (r'"', String.Double, 'string'), + + (r'[~?][a-z][\w\']*:', Name.Variable), + ], + 'comment': [ + (r'[^(*)]+', Comment), + (r'\(\*', Comment, '#push'), + (r'\*\)', Comment, '#pop'), + (r'[(*)]', Comment), + ], + 'string': [ + (r'[^"]+', String.Double), + (r'""', String.Double), + (r'"', String.Double, '#pop'), + ], + 'dotted': [ + (r'\s+', Text), + (r'\.', Punctuation), + (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), + (r'[A-Z][\w\']*', Name.Class, '#pop'), + (r'[a-z][a-z0-9_\']*', Name, '#pop'), + default('#pop') + ], + } + + def analyse_text(text): + if text.startswith('(*'): + return True + + +class IsabelleLexer(RegexLexer): + """ + For the `Isabelle `_ proof assistant. + + .. versionadded:: 2.0 + """ + + name = 'Isabelle' + aliases = ['isabelle'] + filenames = ['*.thy'] + mimetypes = ['text/x-isabelle'] + + keyword_minor = ( + 'and', 'assumes', 'attach', 'avoids', 'binder', 'checking', + 'class_instance', 'class_relation', 'code_module', 'congs', + 'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes', + 'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in', + 'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy', + 'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes', + 'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive', + 'pervasive', 'rep_compat', 'shows', 'structure', 'type_class', + 'type_constructor', 'unchecked', 'unsafe', 'where', + ) + + keyword_diag = ( + 'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms', + 'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms', + 'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf', + 'print_abbrevs', 'print_antiquotations', 'print_attributes', + 'print_binds', 'print_bnfs', 'print_bundles', + 'print_case_translations', 'print_cases', 'print_claset', + 'print_classes', 'print_codeproc', 'print_codesetup', + 'print_coercions', 'print_commands', 'print_context', + 'print_defn_rules', 'print_dependencies', 'print_facts', + 'print_induct_rules', 'print_inductives', 'print_interps', + 'print_locale', 'print_locales', 'print_methods', 'print_options', + 'print_orders', 'print_quot_maps', 'print_quotconsts', + 'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3', + 'print_rules', 'print_simpset', 'print_state', 'print_statement', + 'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules', + 'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status', + 'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps', + 'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome', + 'print_ML_antiquotations', 'print_term_bindings', 'values_prolog', + ) + + keyword_thy = ('theory', 'begin', 'end') + + keyword_section = ('header', 'chapter') + + keyword_subsection = ( + 'section', 'subsection', 'subsubsection', 'sect', 'subsect', + 'subsubsect', + ) + + keyword_theory_decl = ( + 'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities', + 'atom_decl', 'attribute_setup', 'axiomatization', 'bundle', + 'case_of_simps', 'class', 'classes', 'classrel', 'codatatype', + 'code_abort', 'code_class', 'code_const', 'code_datatype', + 'code_identifier', 'code_include', 'code_instance', 'code_modulename', + 'code_monad', 'code_printing', 'code_reflect', 'code_reserved', + 'code_type', 'coinductive', 'coinductive_set', 'consts', 'context', + 'datatype', 'datatype_new', 'datatype_new_compat', 'declaration', + 'declare', 'default_sort', 'defer_recdef', 'definition', 'defs', + 'domain', 'domain_isomorphism', 'domaindef', 'equivariance', + 'export_code', 'extract', 'extract_type', 'fixrec', 'fun', + 'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type', + 'import_const_map', 'import_file', 'import_tptp', 'import_type_map', + 'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas', + 'lifting_forget', 'lifting_update', 'local_setup', 'locale', + 'method_setup', 'nitpick_params', 'no_adhoc_overloading', + 'no_notation', 'no_syntax', 'no_translations', 'no_type_notation', + 'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle', + 'overloading', 'parse_ast_translation', 'parse_translation', + 'partial_function', 'primcorec', 'primrec', 'primrec_new', + 'print_ast_translation', 'print_translation', 'quickcheck_generator', + 'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record', + 'refute_params', 'setup', 'setup_lifting', 'simproc_setup', + 'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open', + 'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions', + 'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text', + 'text_raw', 'theorems', 'translations', 'type_notation', + 'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate', + 'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types', + 'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate', + 'bnf_axiomatization', 'cartouche', 'datatype_compat', + 'free_constructors', 'functor', 'nominal_function', + 'nominal_termination', 'permanent_interpretation', + 'binds', 'defining', 'smt2_status', 'term_cartouche', + 'boogie_file', 'text_cartouche', + ) + + keyword_theory_script = ('inductive_cases', 'inductive_simps') + + keyword_theory_goal = ( + 'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef', + 'crunch', 'crunch_ignore', + 'enriched_type', 'function', 'instance', 'interpretation', 'lemma', + 'lift_definition', 'nominal_inductive', 'nominal_inductive2', + 'nominal_primrec', 'pcpodef', 'primcorecursive', + 'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype', + 'schematic_corollary', 'schematic_lemma', 'schematic_theorem', + 'spark_vc', 'specification', 'subclass', 'sublocale', 'termination', + 'theorem', 'typedef', 'wrap_free_constructors', + ) + + keyword_qed = ('by', 'done', 'qed') + keyword_abandon_proof = ('sorry', 'oops') + + keyword_proof_goal = ('have', 'hence', 'interpret') + + keyword_proof_block = ('next', 'proof') + + keyword_proof_chain = ( + 'finally', 'from', 'then', 'ultimately', 'with', + ) + + keyword_proof_decl = ( + 'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note', + 'txt', 'txt_raw', 'unfolding', 'using', 'write', + ) + + keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume') + + keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus') + + keyword_proof_script = ( + 'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer', + ) + + operators = ( + '::', ':', '(', ')', '[', ']', '_', '=', ',', '|', + '+', '-', '!', '?', + ) + + proof_operators = ('{', '}', '.', '..') + + tokens = { + 'root': [ + (r'\s+', Text), + (r'\(\*', Comment, 'comment'), + (r'\{\*', Comment, 'text'), + + (words(operators), Operator), + (words(proof_operators), Operator.Word), + + (words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), + + (words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type), + + (words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword), + + (words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading), + (words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading), + + (words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + (words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + + (words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error), + + (words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword), + + (words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword), + + (words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), + + (r'\\<\w*>', Text.Symbol), + + (r"[^\W\d][.\w']*", Name), + (r"\?[^\W\d][.\w']*", Name), + (r"'[^\W\d][.\w']*", Name.Type), + + (r'\d[\d_]*', Name), # display numbers as name + (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), + (r'0[oO][0-7][0-7_]*', Number.Oct), + (r'0[bB][01][01_]*', Number.Bin), + + (r'"', String, 'string'), + (r'`', String.Other, 'fact'), + ], + 'comment': [ + (r'[^(*)]+', Comment), + (r'\(\*', Comment, '#push'), + (r'\*\)', Comment, '#pop'), + (r'[(*)]', Comment), + ], + 'text': [ + (r'[^*}]+', Comment), + (r'\*\}', Comment, '#pop'), + (r'\*', Comment), + (r'\}', Comment), + ], + 'string': [ + (r'[^"\\]+', String), + (r'\\<\w*>', String.Symbol), + (r'\\"', String), + (r'\\', String), + (r'"', String, '#pop'), + ], + 'fact': [ + (r'[^`\\]+', String.Other), + (r'\\<\w*>', String.Symbol), + (r'\\`', String.Other), + (r'\\', String.Other), + (r'`', String.Other, '#pop'), + ], + } + + +class LeanLexer(RegexLexer): + """ + For the `Lean `_ + theorem prover. + + .. versionadded:: 2.0 + """ + name = 'Lean' + aliases = ['lean'] + filenames = ['*.lean'] + mimetypes = ['text/x-lean'] + + flags = re.MULTILINE | re.UNICODE + + keywords1 = ('import', 'abbreviation', 'opaque_hint', 'tactic_hint', 'definition', 'renaming', + 'inline', 'hiding', 'exposing', 'parameter', 'parameters', 'conjecture', + 'hypothesis', 'lemma', 'corollary', 'variable', 'variables', 'print', 'theorem', + 'axiom', 'inductive', 'structure', 'universe', 'alias', 'help', + 'options', 'precedence', 'postfix', 'prefix', 'calc_trans', 'calc_subst', 'calc_refl', + 'infix', 'infixl', 'infixr', 'notation', 'eval', 'check', 'exit', 'coercion', 'end', + 'private', 'using', 'namespace', 'including', 'instance', 'section', 'context', + 'protected', 'expose', 'export', 'set_option', 'add_rewrite', 'extends') + + keywords2 = ( + 'forall', 'exists', 'fun', 'Pi', 'obtain', 'from', 'have', 'show', 'assume', 'take', + 'let', 'if', 'else', 'then', 'by', 'in', 'with', 'begin', 'proof', 'qed', 'calc' + ) + + keywords3 = ( + # Sorts + 'Type', 'Prop', + ) + + keywords4 = ( + # Tactics + 'apply', 'and_then', 'or_else', 'append', 'interleave', 'par', 'fixpoint', 'repeat', + 'at_most', 'discard', 'focus_at', 'rotate', 'try_for', 'now', 'assumption', 'eassumption', + 'state', 'intro', 'generalize', 'exact', 'unfold', 'beta', 'trace', 'focus', 'repeat1', + 'determ', 'destruct', 'try', 'auto', 'intros' + ) + + operators = ( + '!=', '#', '&', '&&', '*', '+', '-', '/', '@', + '-.', '->', '.', '..', '...', '::', ':>', ';', ';;', '<', + '<-', '=', '==', '>', '_', '`', '|', '||', '~', '=>', '<=', '>=', + '/\\', '\\/', u'∀', u'Π', u'λ', u'↔', u'∧', u'∨', u'≠', u'≤', u'≥', + u'¬', u'⁻¹', u'⬝', u'▸', u'→', u'∃', u'ℕ', u'ℤ', u'≈' + ) + + word_operators = ('and', 'or', 'not', 'iff', 'eq') + + punctuation = ('(', ')', ':', '{', '}', '[', ']', u'⦃', u'⦄', ':=', ',') + + primitives = ('unit', 'int', 'bool', 'string', 'char', 'list', + 'array', 'prod', 'sum', 'pair', 'real', 'nat', 'num', 'path') + + tokens = { + 'root': [ + (r'\s+', Text), + (r'\b(false|true)\b|\(\)|\[\]', Name.Builtin.Pseudo), + (r'/-', Comment, 'comment'), + (r'--.*?$', Comment.Single), + (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type), + (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword), + (words(operators), Name.Builtin.Pseudo), + (words(word_operators, prefix=r'\b', suffix=r'\b'), Name.Builtin.Pseudo), + (words(punctuation), Operator), + (words(primitives, prefix=r'\b', suffix=r'\b'), Keyword.Type), + (u"[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]" + u"[A-Za-z_'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079" + u"\u207f-\u2089\u2090-\u209c\u2100-\u214f]*", Name), + (r'\d+', Number.Integer), + (r'"', String.Double, 'string'), + (r'[~?][a-z][\w\']*:', Name.Variable) + ], + 'comment': [ + # Multiline Comments + (r'[^/-]', Comment.Multiline), + (r'/-', Comment.Multiline, '#push'), + (r'-/', Comment.Multiline, '#pop'), + (r'[/-]', Comment.Multiline) + ], + 'string': [ + (r'[^\\"]+', String.Double), + (r'\\[n"\\]', String.Escape), + ('"', String.Double, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/urbi.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/urbi.py new file mode 100644 index 0000000..b8ac151 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/urbi.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.urbi + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for UrbiScript language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import ExtendedRegexLexer, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['UrbiscriptLexer'] + + +class UrbiscriptLexer(ExtendedRegexLexer): + """ + For UrbiScript source code. + + .. versionadded:: 1.5 + """ + + name = 'UrbiScript' + aliases = ['urbiscript'] + filenames = ['*.u'] + mimetypes = ['application/x-urbiscript'] + + flags = re.DOTALL + + # TODO + # - handle Experimental and deprecated tags with specific tokens + # - handle Angles and Durations with specific tokens + + def blob_callback(lexer, match, ctx): + text_before_blob = match.group(1) + blob_start = match.group(2) + blob_size_str = match.group(3) + blob_size = int(blob_size_str) + yield match.start(), String, text_before_blob + ctx.pos += len(text_before_blob) + + # if blob size doesn't match blob format (example : "\B(2)(aaa)") + # yield blob as a string + if ctx.text[match.end() + blob_size] != ")": + result = "\\B(" + blob_size_str + ")(" + yield match.start(), String, result + ctx.pos += len(result) + return + + # if blob is well formated, yield as Escape + blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")" + yield match.start(), String.Escape, blob_text + ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")" + + tokens = { + 'root': [ + (r'\s+', Text), + # comments + (r'//.*?\n', Comment), + (r'/\*', Comment.Multiline, 'comment'), + (r'(every|for|loop|while)(?:;|&|\||,)', Keyword), + (words(( + 'assert', 'at', 'break', 'case', 'catch', 'closure', 'compl', + 'continue', 'default', 'else', 'enum', 'every', 'external', + 'finally', 'for', 'freezeif', 'if', 'new', 'onleave', 'return', + 'stopif', 'switch', 'this', 'throw', 'timeout', 'try', + 'waituntil', 'whenever', 'while'), suffix=r'\b'), + Keyword), + (words(( + 'asm', 'auto', 'bool', 'char', 'const_cast', 'delete', 'double', + 'dynamic_cast', 'explicit', 'export', 'extern', 'float', 'friend', + 'goto', 'inline', 'int', 'long', 'mutable', 'namespace', 'register', + 'reinterpret_cast', 'short', 'signed', 'sizeof', 'static_cast', + 'struct', 'template', 'typedef', 'typeid', 'typename', 'union', + 'unsigned', 'using', 'virtual', 'volatile', 'wchar_t'), suffix=r'\b'), + Keyword.Reserved), + # deprecated keywords, use a meaningfull token when available + (r'(emit|foreach|internal|loopn|static)\b', Keyword), + # ignored keywords, use a meaningfull token when available + (r'(private|protected|public)\b', Keyword), + (r'(var|do|const|function|class)\b', Keyword.Declaration), + (r'(true|false|nil|void)\b', Keyword.Constant), + (words(( + 'Barrier', 'Binary', 'Boolean', 'CallMessage', 'Channel', 'Code', + 'Comparable', 'Container', 'Control', 'Date', 'Dictionary', 'Directory', + 'Duration', 'Enumeration', 'Event', 'Exception', 'Executable', 'File', + 'Finalizable', 'Float', 'FormatInfo', 'Formatter', 'Global', 'Group', + 'Hash', 'InputStream', 'IoService', 'Job', 'Kernel', 'Lazy', 'List', + 'Loadable', 'Lobby', 'Location', 'Logger', 'Math', 'Mutex', 'nil', + 'Object', 'Orderable', 'OutputStream', 'Pair', 'Path', 'Pattern', + 'Position', 'Primitive', 'Process', 'Profile', 'PseudoLazy', 'PubSub', + 'RangeIterable', 'Regexp', 'Semaphore', 'Server', 'Singleton', 'Socket', + 'StackFrame', 'Stream', 'String', 'System', 'Tag', 'Timeout', + 'Traceable', 'TrajectoryGenerator', 'Triplet', 'Tuple', 'UObject', + 'UValue', 'UVar'), suffix=r'\b'), + Name.Builtin), + (r'(?:this)\b', Name.Builtin.Pseudo), + # don't match single | and & + (r'(?:[-=+*%/<>~^:]+|\.&?|\|\||&&)', Operator), + (r'(?:and_eq|and|bitand|bitor|in|not|not_eq|or_eq|or|xor_eq|xor)\b', + Operator.Word), + (r'[{}\[\]()]+', Punctuation), + (r'(?:;|\||,|&|\?|!)+', Punctuation), + (r'[$a-zA-Z_]\w*', Name.Other), + (r'0x[0-9a-fA-F]+', Number.Hex), + # Float, Integer, Angle and Duration + (r'(?:[0-9]+(?:(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?)?' + r'((?:rad|deg|grad)|(?:ms|s|min|h|d))?)\b', Number.Float), + # handle binary blob in strings + (r'"', String.Double, "string.double"), + (r"'", String.Single, "string.single"), + ], + 'string.double': [ + (r'((?:\\\\|\\"|[^"])*?)(\\B\((\d+)\)\()', blob_callback), + (r'(\\\\|\\"|[^"])*?"', String.Double, '#pop'), + ], + 'string.single': [ + (r"((?:\\\\|\\'|[^'])*?)(\\B\((\d+)\)\()", blob_callback), + (r"(\\\\|\\'|[^'])*?'", String.Single, '#pop'), + ], + # from http://pygments.org/docs/lexerdevelopment/#changing-states + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/web.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/web.py new file mode 100644 index 0000000..8db0da5 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/web.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.web + ~~~~~~~~~~~~~~~~~~~ + + Just export previously exported lexers. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.html import HtmlLexer, DtdLexer, XmlLexer, XsltLexer, \ + HamlLexer, ScamlLexer, JadeLexer +from pygments.lexers.css import CssLexer, SassLexer, ScssLexer +from pygments.lexers.javascript import JavascriptLexer, LiveScriptLexer, \ + DartLexer, TypeScriptLexer, LassoLexer, ObjectiveJLexer, CoffeeScriptLexer +from pygments.lexers.actionscript import ActionScriptLexer, \ + ActionScript3Lexer, MxmlLexer +from pygments.lexers.php import PhpLexer +from pygments.lexers.webmisc import DuelLexer, XQueryLexer, SlimLexer, QmlLexer +from pygments.lexers.data import JsonLexer +JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5 + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/webmisc.py b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/webmisc.py new file mode 100644 index 0000000..331d78d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py2/pygments/lexers/webmisc.py @@ -0,0 +1,920 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.webmisc + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for misc. web stuff. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ + default, using +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal +from pygments.util import unirange + +from pygments.lexers.css import _indentation, _starts_block +from pygments.lexers.html import HtmlLexer +from pygments.lexers.javascript import JavascriptLexer +from pygments.lexers.ruby import RubyLexer + +__all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer'] + + +class DuelLexer(RegexLexer): + """ + Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks. + See http://duelengine.org/. + See http://jsonml.org/jbst/. + + .. versionadded:: 1.4 + """ + + name = 'Duel' + aliases = ['duel', 'jbst', 'jsonml+bst'] + filenames = ['*.duel', '*.jbst'] + mimetypes = ['text/x-duel', 'text/x-jbst'] + + flags = re.DOTALL + + tokens = { + 'root': [ + (r'(<%[@=#!:]?)(.*?)(%>)', + bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)), + (r'(<%\$)(.*?)(:)(.*?)(%>)', + bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)), + (r'(<%--)(.*?)(--%>)', + bygroups(Name.Tag, Comment.Multiline, Name.Tag)), + (r'()(.*?)()', + bygroups(using(HtmlLexer), + using(JavascriptLexer), using(HtmlLexer))), + (r'(.+?)(?=<)', using(HtmlLexer)), + (r'.+', using(HtmlLexer)), + ], + } + + +class XQueryLexer(ExtendedRegexLexer): + """ + An XQuery lexer, parsing a stream and outputting the tokens needed to + highlight xquery code. + + .. versionadded:: 1.4 + """ + name = 'XQuery' + aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm'] + filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'] + mimetypes = ['text/xquery', 'application/xquery'] + + xquery_parse_state = [] + + # FIX UNICODE LATER + # ncnamestartchar = ( + # ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|" + # ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|" + # ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|" + # ur"[\u10000-\uEFFFF]" + # ) + ncnamestartchar = r"(?:[A-Z]|_|[a-z])" + # FIX UNICODE LATER + # ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|" + # ur"[\u203F-\u2040]") + ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])" + ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar) + pitarget_namestartchar = r"(?:[A-KN-WYZ]|_|:|[a-kn-wyz])" + pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])" + pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar) + prefixedname = "%s:%s" % (ncname, ncname) + unprefixedname = ncname + qname = "(?:%s|%s)" % (prefixedname, unprefixedname) + + entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)' + charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)' + + stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")' + stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')" + + # FIX UNICODE LATER + # elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' + # ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') + elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_\'`|~]' + # quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|' + # ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]') + quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%()*+,\-./:;=?@\[\\\]^_\'`|~]' + # aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' + # ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') + aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_`|~]' + + # CHAR elements - fix the above elementcontentchar, quotattrcontentchar, + # aposattrcontentchar + # x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + + flags = re.DOTALL | re.MULTILINE | re.UNICODE + + def punctuation_root_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + # transition to root always - don't pop off stack + ctx.stack = ['root'] + ctx.pos = match.end() + + def operator_root_callback(lexer, match, ctx): + yield match.start(), Operator, match.group(1) + # transition to root always - don't pop off stack + ctx.stack = ['root'] + ctx.pos = match.end() + + def popstate_tag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + ctx.stack.append(lexer.xquery_parse_state.pop()) + ctx.pos = match.end() + + def popstate_xmlcomment_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append(lexer.xquery_parse_state.pop()) + ctx.pos = match.end() + + def popstate_kindtest_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + next_state = lexer.xquery_parse_state.pop() + if next_state == 'occurrenceindicator': + if re.match("[?*+]+", match.group(2)): + yield match.start(), Punctuation, match.group(2) + ctx.stack.append('operator') + ctx.pos = match.end() + else: + ctx.stack.append('operator') + ctx.pos = match.end(1) + else: + ctx.stack.append(next_state) + ctx.pos = match.end(1) + + def popstate_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + # if we have run out of our state stack, pop whatever is on the pygments + # state stack + if len(lexer.xquery_parse_state) == 0: + ctx.stack.pop() + elif len(ctx.stack) > 1: + ctx.stack.append(lexer.xquery_parse_state.pop()) + else: + # i don't know if i'll need this, but in case, default back to root + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_element_content_starttag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + lexer.xquery_parse_state.append('element_content') + ctx.stack.append('start_tag') + ctx.pos = match.end() + + def pushstate_cdata_section_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('cdata_section') + lexer.xquery_parse_state.append(ctx.state.pop) + ctx.pos = match.end() + + def pushstate_starttag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + lexer.xquery_parse_state.append(ctx.state.pop) + ctx.stack.append('start_tag') + ctx.pos = match.end() + + def pushstate_operator_order_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + ctx.stack = ['root'] + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_operator_root_validate(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + ctx.stack = ['root'] + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_operator_root_validate_withmode(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Keyword, match.group(3) + ctx.stack = ['root'] + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_operator_processing_instruction_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('processing_instruction') + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_element_content_processing_instruction_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('processing_instruction') + lexer.xquery_parse_state.append('element_content') + ctx.pos = match.end() + + def pushstate_element_content_cdata_section_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('cdata_section') + lexer.xquery_parse_state.append('element_content') + ctx.pos = match.end() + + def pushstate_operator_cdata_section_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('cdata_section') + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_element_content_xmlcomment_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('xml_comment') + lexer.xquery_parse_state.append('element_content') + ctx.pos = match.end() + + def pushstate_operator_xmlcomment_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('xml_comment') + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_kindtest_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('kindtest') + ctx.stack.append('kindtest') + ctx.pos = match.end() + + def pushstate_operator_kindtestforpi_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.stack.append('kindtestforpi') + ctx.pos = match.end() + + def pushstate_operator_kindtest_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.stack.append('kindtest') + ctx.pos = match.end() + + def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('occurrenceindicator') + ctx.stack.append('kindtest') + ctx.pos = match.end() + + def pushstate_operator_starttag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + lexer.xquery_parse_state.append('operator') + ctx.stack.append('start_tag') + ctx.pos = match.end() + + def pushstate_operator_root_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + lexer.xquery_parse_state.append('operator') + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_operator_root_construct_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_root_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + cur_state = ctx.stack.pop() + lexer.xquery_parse_state.append(cur_state) + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_operator_attribute_callback(lexer, match, ctx): + yield match.start(), Name.Attribute, match.group(1) + ctx.stack.append('operator') + ctx.pos = match.end() + + def pushstate_operator_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + tokens = { + 'comment': [ + # xquery comments + (r'(:\))', Comment, '#pop'), + (r'(\(:)', Comment, '#push'), + (r'[^:)]', Comment), + (r'([^:)]|:|\))', Comment), + ], + 'whitespace': [ + (r'\s+', Text), + ], + 'operator': [ + include('whitespace'), + (r'(\})', popstate_callback), + (r'\(:', Comment, 'comment'), + + (r'(\{)', pushstate_root_callback), + (r'then|else|external|at|div|except', Keyword, 'root'), + (r'order by', Keyword, 'root'), + (r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'), + (r'and|or', Operator.Word, 'root'), + (r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)', + Operator.Word, 'root'), + (r'return|satisfies|to|union|where|preserve\s+strip', + Keyword, 'root'), + (r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=)', + operator_root_callback), + (r'(::|;|\[|//|/|,)', + punctuation_root_callback), + (r'(castable|cast)(\s+)(as)\b', + bygroups(Keyword, Text, Keyword), 'singletype'), + (r'(instance)(\s+)(of)\b', + bygroups(Keyword, Text, Keyword), 'itemtype'), + (r'(treat)(\s+)(as)\b', + bygroups(Keyword, Text, Keyword), 'itemtype'), + (r'(case|as)\b', Keyword, 'itemtype'), + (r'(\))(\s*)(as)', + bygroups(Punctuation, Text, Keyword), 'itemtype'), + (r'\$', Name.Variable, 'varname'), + (r'(for|let)(\s+)(\$)', + bygroups(Keyword, Text, Name.Variable), 'varname'), + # (r'\)|\?|\]', Punctuation, '#push'), + (r'\)|\?|\]', Punctuation), + (r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)), + (r'ascending|descending|default', Keyword, '#push'), + (r'external', Keyword), + (r'collation', Keyword, 'uritooperator'), + # finally catch all string literals and stay in operator state + (stringdouble, String.Double), + (stringsingle, String.Single), + + (r'(catch)(\s*)', bygroups(Keyword, Text), 'root'), + ], + 'uritooperator': [ + (stringdouble, String.Double, '#pop'), + (stringsingle, String.Single, '#pop'), + ], + 'namespacedecl': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)), + (r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)), + (stringdouble, String.Double), + (stringsingle, String.Single), + (r',', Punctuation), + (r'=', Operator), + (r';', Punctuation, 'root'), + (ncname, Name.Namespace), + ], + 'namespacekeyword': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (stringdouble, String.Double, 'namespacedecl'), + (stringsingle, String.Single, 'namespacedecl'), + (r'inherit|no-inherit', Keyword, 'root'), + (r'namespace', Keyword, 'namespacedecl'), + (r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)), + (r'preserve|no-preserve', Keyword), + (r',', Punctuation), + ], + 'varname': [ + (r'\(:', Comment, 'comment'), + (qname, Name.Variable, 'operator'), + ], + 'singletype': [ + (r'\(:', Comment, 'comment'), + (ncname + r'(:\*)', Name.Variable, 'operator'), + (qname, Name.Variable, 'operator'), + ], + 'itemtype': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (r'\$', Punctuation, 'varname'), + (r'(void)(\s*)(\()(\s*)(\))', + bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'), + (r'(element|attribute|schema-element|schema-attribute|comment|text|' + r'node|binary|document-node|empty-sequence)(\s*)(\()', + pushstate_occurrenceindicator_kindtest_callback), + # Marklogic specific type? + (r'(processing-instruction)(\s*)(\()', + bygroups(Keyword, Text, Punctuation), + ('occurrenceindicator', 'kindtestforpi')), + (r'(item)(\s*)(\()(\s*)(\))(?=[*+?])', + bygroups(Keyword, Text, Punctuation, Text, Punctuation), + 'occurrenceindicator'), + (r'\(\#', Punctuation, 'pragma'), + (r';', Punctuation, '#pop'), + (r'then|else', Keyword, '#pop'), + (r'(at)(\s+)(' + stringdouble + ')', + bygroups(Keyword, Text, String.Double), 'namespacedecl'), + (r'(at)(\s+)(' + stringsingle + ')', + bygroups(Keyword, Text, String.Single), 'namespacedecl'), + (r'except|intersect|in|is|return|satisfies|to|union|where', + Keyword, 'root'), + (r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'), + (r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'), + (r'external|at', Keyword, 'root'), + (r'(stable)(\s+)(order)(\s+)(by)', + bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'), + (r'(castable|cast)(\s+)(as)', + bygroups(Keyword, Text, Keyword), 'singletype'), + (r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)), + (r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)), + (r'case|as', Keyword, 'itemtype'), + (r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), + (ncname + r':\*', Keyword.Type, 'operator'), + (qname, Keyword.Type, 'occurrenceindicator'), + ], + 'kindtest': [ + (r'\(:', Comment, 'comment'), + (r'\{', Punctuation, 'root'), + (r'(\))([*+?]?)', popstate_kindtest_callback), + (r'\*', Name, 'closekindtest'), + (qname, Name, 'closekindtest'), + (r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback), + ], + 'kindtestforpi': [ + (r'\(:', Comment, 'comment'), + (r'\)', Punctuation, '#pop'), + (ncname, Name.Variable), + (stringdouble, String.Double), + (stringsingle, String.Single), + ], + 'closekindtest': [ + (r'\(:', Comment, 'comment'), + (r'(\))', popstate_callback), + (r',', Punctuation), + (r'(\{)', pushstate_operator_root_callback), + (r'\?', Punctuation), + ], + 'xml_comment': [ + (r'(-->)', popstate_xmlcomment_callback), + (r'[^-]{1,2}', Literal), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + ], + 'processing_instruction': [ + (r'\s+', Text, 'processing_instruction_content'), + (r'\?>', String.Doc, '#pop'), + (pitarget, Name), + ], + 'processing_instruction_content': [ + (r'\?>', String.Doc, '#pop'), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + ], + 'cdata_section': [ + (r']]>', String.Doc, '#pop'), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + ], + 'start_tag': [ + include('whitespace'), + (r'(/>)', popstate_tag_callback), + (r'>', Name.Tag, 'element_content'), + (r'"', Punctuation, 'quot_attribute_content'), + (r"'", Punctuation, 'apos_attribute_content'), + (r'=', Operator), + (qname, Name.Tag), + ], + 'quot_attribute_content': [ + (r'"', Punctuation, 'start_tag'), + (r'(\{)', pushstate_root_callback), + (r'""', Name.Attribute), + (quotattrcontentchar, Name.Attribute), + (entityref, Name.Attribute), + (charref, Name.Attribute), + (r'\{\{|\}\}', Name.Attribute), + ], + 'apos_attribute_content': [ + (r"'", Punctuation, 'start_tag'), + (r'\{', Punctuation, 'root'), + (r"''", Name.Attribute), + (aposattrcontentchar, Name.Attribute), + (entityref, Name.Attribute), + (charref, Name.Attribute), + (r'\{\{|\}\}', Name.Attribute), + ], + 'element_content': [ + (r')', popstate_tag_callback), + (qname, Name.Tag), + ], + 'xmlspace_decl': [ + (r'\(:', Comment, 'comment'), + (r'preserve|strip', Keyword, '#pop'), + ], + 'declareordering': [ + (r'\(:', Comment, 'comment'), + include('whitespace'), + (r'ordered|unordered', Keyword, '#pop'), + ], + 'xqueryversion': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (stringdouble, String.Double), + (stringsingle, String.Single), + (r'encoding', Keyword), + (r';', Punctuation, '#pop'), + ], + 'pragma': [ + (qname, Name.Variable, 'pragmacontents'), + ], + 'pragmacontents': [ + (r'#\)', Punctuation, 'operator'), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + (r'(\s+)', Text), + ], + 'occurrenceindicator': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (r'\*|\?|\+', Operator, 'operator'), + (r':=', Operator, 'root'), + default('operator'), + ], + 'option': [ + include('whitespace'), + (qname, Name.Variable, '#pop'), + ], + 'qname_braren': [ + include('whitespace'), + (r'(\{)', pushstate_operator_root_callback), + (r'(\()', Punctuation, 'root'), + ], + 'element_qname': [ + (qname, Name.Variable, 'root'), + ], + 'attribute_qname': [ + (qname, Name.Variable, 'root'), + ], + 'root': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + + # handle operator state + # order on numbers matters - handle most complex first + (r'\d+(\.\d*)?[eE][+-]?\d+', Number.Float, 'operator'), + (r'(\.\d+)[eE][+-]?\d+', Number.Float, 'operator'), + (r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'), + (r'(\d+)', Number.Integer, 'operator'), + (r'(\.\.|\.|\))', Punctuation, 'operator'), + (r'(declare)(\s+)(construction)', + bygroups(Keyword, Text, Keyword), 'operator'), + (r'(declare)(\s+)(default)(\s+)(order)', + bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'), + (ncname + ':\*', Name, 'operator'), + ('\*:'+ncname, Name.Tag, 'operator'), + ('\*', Name.Tag, 'operator'), + (stringdouble, String.Double, 'operator'), + (stringsingle, String.Single, 'operator'), + + (r'(\})', popstate_callback), + + # NAMESPACE DECL + (r'(declare)(\s+)(default)(\s+)(collation)', + bygroups(Keyword, Text, Keyword, Text, Keyword)), + (r'(module|declare)(\s+)(namespace)', + bygroups(Keyword, Text, Keyword), 'namespacedecl'), + (r'(declare)(\s+)(base-uri)', + bygroups(Keyword, Text, Keyword), 'namespacedecl'), + + # NAMESPACE KEYWORD + (r'(declare)(\s+)(default)(\s+)(element|function)', + bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'), + (r'(import)(\s+)(schema|module)', + bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'), + (r'(declare)(\s+)(copy-namespaces)', + bygroups(Keyword, Text, Keyword), 'namespacekeyword'), + + # VARNAMEs + (r'(for|let|some|every)(\s+)(\$)', + bygroups(Keyword, Text, Name.Variable), 'varname'), + (r'\$', Name.Variable, 'varname'), + (r'(declare)(\s+)(variable)(\s+)(\$)', + bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'), + + # ITEMTYPE + (r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), + + (r'(element|attribute|schema-element|schema-attribute|comment|' + r'text|node|document-node|empty-sequence)(\s+)(\()', + pushstate_operator_kindtest_callback), + + (r'(processing-instruction)(\s+)(\()', + pushstate_operator_kindtestforpi_callback), + + (r'(', Comment, '#pop'), + ('-', Comment), + ], + 'tag': [ + (r'\s+', Text), + (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), + (r'/?\s*>', Name.Tag, '#pop'), + ], + 'attr': [ + ('\s+', Text), + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/agile.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/agile.py new file mode 100644 index 0000000..7ad60c8 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/agile.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.agile + ~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.lisp import SchemeLexer +from pygments.lexers.jvm import IokeLexer, ClojureLexer +from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \ + PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer +from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer +from pygments.lexers.perl import PerlLexer, Perl6Lexer +from pygments.lexers.d import CrocLexer, MiniDLexer +from pygments.lexers.iolang import IoLexer +from pygments.lexers.tcl import TclLexer +from pygments.lexers.factor import FactorLexer +from pygments.lexers.scripting import LuaLexer, MoonScriptLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/algebra.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/algebra.py new file mode 100644 index 0000000..6bb1b08 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/algebra.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.algebra + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for computer algebra systems. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['GAPLexer', 'MathematicaLexer', 'MuPADLexer'] + + +class GAPLexer(RegexLexer): + """ + For `GAP `_ source code. + + .. versionadded:: 2.0 + """ + name = 'GAP' + aliases = ['gap'] + filenames = ['*.g', '*.gd', '*.gi', '*.gap'] + + tokens = { + 'root': [ + (r'#.*$', Comment.Single), + (r'"(?:[^"\\]|\\.)*"', String), + (r'\(|\)|\[|\]|\{|\}', Punctuation), + (r'''(?x)\b(?: + if|then|elif|else|fi| + for|while|do|od| + repeat|until| + break|continue| + function|local|return|end| + rec| + quit|QUIT| + IsBound|Unbind| + TryNextMethod| + Info|Assert + )\b''', Keyword), + (r'''(?x)\b(?: + true|false|fail|infinity + )\b''', + Name.Constant), + (r'''(?x)\b(?: + (Declare|Install)([A-Z][A-Za-z]+)| + BindGlobal|BIND_GLOBAL + )\b''', + Name.Builtin), + (r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator), + (r'''(?x)\b(?: + and|or|not|mod|in + )\b''', + Operator.Word), + (r'''(?x) + (?:\w+|`[^`]*`) + (?:::\w+|`[^`]*`)*''', Name.Variable), + (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number), + (r'\.[0-9]+(?:e[0-9]+)?', Number), + (r'.', Text) + ] + } + + +class MathematicaLexer(RegexLexer): + """ + Lexer for `Mathematica `_ source code. + + .. versionadded:: 2.0 + """ + name = 'Mathematica' + aliases = ['mathematica', 'mma', 'nb'] + filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma'] + mimetypes = ['application/mathematica', + 'application/vnd.wolfram.mathematica', + 'application/vnd.wolfram.mathematica.package', + 'application/vnd.wolfram.cdf'] + + # http://reference.wolfram.com/mathematica/guide/Syntax.html + operators = ( + ";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/", + "^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@", + "@@@", "~~", "===", "&", "<", ">", "<=", ">=", + ) + + punctuation = (",", ";", "(", ")", "[", "]", "{", "}") + + def _multi_escape(entries): + return '(%s)' % ('|'.join(re.escape(entry) for entry in entries)) + + tokens = { + 'root': [ + (r'(?s)\(\*.*?\*\)', Comment), + + (r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace), + (r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable), + (r'#\d*', Name.Variable), + (r'([a-zA-Z]+[a-zA-Z0-9]*)', Name), + + (r'-?[0-9]+\.[0-9]*', Number.Float), + (r'-?[0-9]*\.[0-9]+', Number.Float), + (r'-?[0-9]+', Number.Integer), + + (words(operators), Operator), + (words(punctuation), Punctuation), + (r'".*?"', String), + (r'\s+', Text.Whitespace), + ], + } + + +class MuPADLexer(RegexLexer): + """ + A `MuPAD `_ lexer. + Contributed by Christopher Creutzig . + + .. versionadded:: 0.8 + """ + name = 'MuPAD' + aliases = ['mupad'] + filenames = ['*.mu'] + + tokens = { + 'root': [ + (r'//.*?$', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + (r'"(?:[^"\\]|\\.)*"', String), + (r'\(|\)|\[|\]|\{|\}', Punctuation), + (r'''(?x)\b(?: + next|break|end| + axiom|end_axiom|category|end_category|domain|end_domain|inherits| + if|%if|then|elif|else|end_if| + case|of|do|otherwise|end_case| + while|end_while| + repeat|until|end_repeat| + for|from|to|downto|step|end_for| + proc|local|option|save|begin|end_proc| + delete|frame + )\b''', Keyword), + (r'''(?x)\b(?: + DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR| + DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT| + DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC| + DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR + )\b''', Name.Class), + (r'''(?x)\b(?: + PI|EULER|E|CATALAN| + NIL|FAIL|undefined|infinity| + TRUE|FALSE|UNKNOWN + )\b''', + Name.Constant), + (r'\b(?:dom|procname)\b', Name.Builtin.Pseudo), + (r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator), + (r'''(?x)\b(?: + and|or|not|xor| + assuming| + div|mod| + union|minus|intersect|in|subset + )\b''', + Operator.Word), + (r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number), + # (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin), + (r'''(?x) + ((?:[a-zA-Z_#][\w#]*|`[^`]*`) + (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''', + bygroups(Name.Function, Text, Punctuation)), + (r'''(?x) + (?:[a-zA-Z_#][\w#]*|`[^`]*`) + (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable), + (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number), + (r'\.[0-9]+(?:e[0-9]+)?', Number), + (r'.', Text) + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ambient.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ambient.py new file mode 100644 index 0000000..ff8a1f6 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ambient.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ambient + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for AmbientTalk language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['AmbientTalkLexer'] + + +class AmbientTalkLexer(RegexLexer): + """ + Lexer for `AmbientTalk `_ source code. + + .. versionadded:: 2.0 + """ + name = 'AmbientTalk' + filenames = ['*.at'] + aliases = ['at', 'ambienttalk', 'ambienttalk/2'] + mimetypes = ['text/x-ambienttalk'] + + flags = re.MULTILINE | re.DOTALL + + builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:', + 'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:', + 'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:', + 'mirroredBy:', 'is:')) + tokens = { + 'root': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'(def|deftype|import|alias|exclude)\b', Keyword), + (builtin, Name.Builtin), + (r'(true|false|nil)\b', Keyword.Constant), + (r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'), + (r'"(\\\\|\\"|[^"])*"', String), + (r'\|', Punctuation, 'arglist'), + (r'<:|[*^!%&<>+=,./?-]|:=', Operator), + (r"`[a-zA-Z_]\w*", String.Symbol), + (r"[a-zA-Z_]\w*:", Name.Function), + (r"[{}()\[\];`]", Punctuation), + (r'(self|super)\b', Name.Variable.Instance), + (r"[a-zA-Z_]\w*", Name.Variable), + (r"@[a-zA-Z_]\w*", Name.Class), + (r"@\[", Name.Class, 'annotations'), + include('numbers'), + ], + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer) + ], + 'namespace': [ + (r'[a-zA-Z_]\w*\.', Name.Namespace), + (r'[a-zA-Z_]\w*:', Name.Function, '#pop'), + (r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop') + ], + 'annotations': [ + (r"(.*?)\]", Name.Class, '#pop') + ], + 'arglist': [ + (r'\|', Punctuation, '#pop'), + (r'\s*(,)\s*', Punctuation), + (r'[a-zA-Z_]\w*', Name.Variable), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/apl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/apl.py new file mode 100644 index 0000000..d29133e --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/apl.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.apl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for APL. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['APLLexer'] + + +class APLLexer(RegexLexer): + """ + A simple APL lexer. + + .. versionadded:: 2.0 + """ + name = 'APL' + aliases = ['apl'] + filenames = ['*.apl'] + + tokens = { + 'root': [ + # Whitespace + # ========== + (r'\s+', Text), + # + # Comment + # ======= + # '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog) + (u'[⍝#].*$', Comment.Single), + # + # Strings + # ======= + (r'\'((\'\')|[^\'])*\'', String.Single), + (r'"(("")|[^"])*"', String.Double), # supported by NGN APL + # + # Punctuation + # =========== + # This token type is used for diamond and parenthesis + # but not for bracket and ; (see below) + (u'[⋄◇()]', Punctuation), + # + # Array indexing + # ============== + # Since this token type is very important in APL, it is not included in + # the punctuation token type but rather in the following one + (r'[\[\];]', String.Regex), + # + # Distinguished names + # =================== + # following IBM APL2 standard + (u'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function), + # + # Labels + # ====== + # following IBM APL2 standard + # (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label), + # + # Variables + # ========= + # following IBM APL2 standard + (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable), + # + # Numbers + # ======= + (u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)' + u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?', + Number), + # + # Operators + # ========== + (u'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type + (u'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]', + Operator), + # + # Constant + # ======== + (u'⍬', Name.Constant), + # + # Quad symbol + # =========== + (u'[⎕⍞]', Name.Variable.Global), + # + # Arrows left/right + # ================= + (u'[←→]', Keyword.Declaration), + # + # D-Fn + # ==== + (u'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo), + (r'[{}]', Keyword.Type), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/asm.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/asm.py similarity index 79% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/asm.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/asm.py index 3f67862..7745f9c 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/asm.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/asm.py @@ -5,19 +5,21 @@ Lexers for assembly languages. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer -from pygments.lexers.compiled import DLexer, CppLexer, CLexer +from pygments.lexers.c_cpp import CppLexer, CLexer +from pygments.lexers.d import DLexer from pygments.token import Text, Name, Number, String, Comment, Punctuation, \ - Other, Keyword, Operator + Other, Keyword, Operator -__all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer', - 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'Ca65Lexer'] +__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer', + 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'NasmObjdumpLexer', + 'Ca65Lexer'] class GasLexer(RegexLexer): @@ -31,7 +33,7 @@ class GasLexer(RegexLexer): #: optional Comment or Whitespace string = r'"(\\"|[^"])*"' - char = r'[a-zA-Z$._0-9@-]' + char = r'[\w$.@-]' identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)' number = r'(?:0[xX][a-zA-Z0-9]+|\d+)' @@ -96,18 +98,12 @@ class GasLexer(RegexLexer): return 0.1 -class ObjdumpLexer(RegexLexer): +def _objdump_lexer_tokens(asm_lexer): """ - For the output of 'objdump -dr' + Common objdump lexer tokens to wrap an ASM lexer. """ - name = 'objdump' - aliases = ['objdump'] - filenames = ['*.objdump'] - mimetypes = ['text/x-objdump'] - - hex = r'[0-9A-Za-z]' - - tokens = { + hex_re = r'[0-9A-Za-z]' + return { 'root': [ # File name & format: ('(.*?)(:)( +file format )(.*?)$', @@ -117,33 +113,33 @@ class ObjdumpLexer(RegexLexer): bygroups(Text, Name.Label, Punctuation)), # Function labels # (With offset) - ('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$', + ('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$', bygroups(Number.Hex, Text, Punctuation, Name.Function, Punctuation, Number.Hex, Punctuation)), # (Without offset) - ('('+hex+'+)( )(<)(.*?)(>:)$', + ('('+hex_re+'+)( )(<)(.*?)(>:)$', bygroups(Number.Hex, Text, Punctuation, Name.Function, Punctuation)), # Code line with disassembled instructions - ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$', + ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$', bygroups(Text, Name.Label, Text, Number.Hex, Text, - using(GasLexer))), + using(asm_lexer))), # Code line with ascii - ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$', + ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$', bygroups(Text, Name.Label, Text, Number.Hex, Text, String)), # Continued code line, only raw opcodes without disassembled # instruction - ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$', + ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$', bygroups(Text, Name.Label, Text, Number.Hex)), # Skipped a few bytes (r'\t\.\.\.$', Text), # Relocation line # (With offset) - (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$', + (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$', bygroups(Text, Name.Label, Text, Name.Property, Text, Name.Constant, Punctuation, Number.Hex)), # (Without offset) - (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)$', + (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$', bygroups(Text, Name.Label, Text, Name.Property, Text, Name.Constant)), (r'[^\n]+\n', Other) @@ -151,6 +147,18 @@ class ObjdumpLexer(RegexLexer): } +class ObjdumpLexer(RegexLexer): + """ + For the output of 'objdump -dr' + """ + name = 'objdump' + aliases = ['objdump'] + filenames = ['*.objdump'] + mimetypes = ['text/x-objdump'] + + tokens = _objdump_lexer_tokens(GasLexer) + + class DObjdumpLexer(DelegatingLexer): """ For the output of 'objdump -Sr on compiled D files' @@ -201,7 +209,7 @@ class LlvmLexer(RegexLexer): #: optional Comment or Whitespace string = r'"[^"]*?"' - identifier = r'([-a-zA-Z$._][-a-zA-Z$._0-9]*|' + string + ')' + identifier = r'([-a-zA-Z$._][\w\-$.]*|' + string + ')' tokens = { 'root': [ @@ -212,10 +220,11 @@ class LlvmLexer(RegexLexer): include('keyword'), - (r'%' + identifier, Name.Variable),#Name.Identifier.Local), - (r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global), - (r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous), - (r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous), + (r'%' + identifier, Name.Variable), + (r'@' + identifier, Name.Variable.Global), + (r'%\d+', Name.Variable.Anonymous), + (r'@\d+', Name.Variable.Global), + (r'#\d+', Name.Variable.Global), (r'!' + identifier, Name.Variable), (r'!\d+', Name.Variable.Anonymous), (r'c?' + string, String), @@ -242,17 +251,24 @@ class LlvmLexer(RegexLexer): r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple' r'|datalayout|volatile|nuw|nsw|nnan|ninf|nsz|arcp|fast|exact|inbounds' r'|align|addrspace|section|alias|module|asm|sideeffect|gc|dbg' + r'|linker_private_weak' + r'|attributes|blockaddress|initialexec|localdynamic|localexec' + r'|prefix|unnamed_addr' r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc' r'|arm_aapcscc|arm_aapcs_vfpcc|ptx_device|ptx_kernel' + r'|intel_ocl_bicc|msp430_intrcc|spir_func|spir_kernel' + r'|x86_64_sysvcc|x86_64_win64cc|x86_thiscallcc' r'|cc|c' r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture' r'|byval|nest|readnone|readonly' - r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone' r'|noimplicitfloat|naked' + r'|builtin|cold|nobuiltin|noduplicate|nonlazybind|optnone' + r'|returns_twice|sanitize_address|sanitize_memory|sanitize_thread' + r'|sspstrong|uwtable|returned' r'|type|opaque' @@ -261,24 +277,30 @@ class LlvmLexer(RegexLexer): r'|oeq|one|olt|ogt|ole' r'|oge|ord|uno|ueq|une' r'|x' + r'|acq_rel|acquire|alignstack|atomic|catch|cleanup|filter' + r'|inteldialect|max|min|monotonic|nand|personality|release' + r'|seq_cst|singlethread|umax|umin|unordered|xchg' # instructions r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl' r'|lshr|ashr|and|or|xor|icmp|fcmp' r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui' - r'fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch' + r'|fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch' r'|invoke|unwind|unreachable' + r'|indirectbr|landingpad|resume' r'|malloc|alloca|free|load|store|getelementptr' r'|extractelement|insertelement|shufflevector|getresult' r'|extractvalue|insertvalue' + r'|atomicrmw|cmpxchg|fence' + r')\b', Keyword), # Types - (r'void|float|double|x86_fp80|fp128|ppc_fp128|label|metadata', + (r'void|half|float|double|x86_fp80|fp128|ppc_fp128|label|metadata', Keyword.Type), # Integer types @@ -296,8 +318,8 @@ class NasmLexer(RegexLexer): filenames = ['*.asm', '*.ASM'] mimetypes = ['text/x-nasm'] - identifier = r'[a-zA-Z$._?][a-zA-Z0-9$._?#@~]*' - hexn = r'(?:0[xX][0-9a-fA-F]+|$0[0-9a-fA-F]*|[0-9]+[0-9a-fA-F]*h)' + identifier = r'[a-z$._?][\w$.?#@~]*' + hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)' octn = r'[0-7]+q' binn = r'[01]+b' decn = r'[0-9]+' @@ -316,8 +338,8 @@ class NasmLexer(RegexLexer): flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ - include('whitespace'), (r'^\s*%', Comment.Preproc, 'preproc'), + include('whitespace'), (identifier + ':', Name.Label), (r'(%s)(\s+)(equ)' % identifier, bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration), @@ -331,7 +353,7 @@ class NasmLexer(RegexLexer): (string, String), (hexn, Number.Hex), (octn, Number.Oct), - (binn, Number), + (binn, Number.Bin), (floatn, Number.Float), (decn, Number.Integer), include('punctuation'), @@ -360,13 +382,27 @@ class NasmLexer(RegexLexer): } +class NasmObjdumpLexer(ObjdumpLexer): + """ + For the output of 'objdump -d -M intel'. + + .. versionadded:: 2.0 + """ + name = 'objdump-nasm' + aliases = ['objdump-nasm'] + filenames = ['*.objdump-intel'] + mimetypes = ['text/x-nasm-objdump'] + + tokens = _objdump_lexer_tokens(NasmLexer) + + class Ca65Lexer(RegexLexer): """ For ca65 assembler sources. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ - name = 'ca65' + name = 'ca65 assembler' aliases = ['ca65'] filenames = ['*.s'] @@ -381,13 +417,14 @@ class Ca65Lexer(RegexLexer): r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs' r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor' r'|bit)\b', Keyword), - (r'\.[a-z0-9_]+', Keyword.Pseudo), + (r'\.\w+', Keyword.Pseudo), (r'[-+~*/^&|!<>=]', Operator), (r'"[^"\n]*.', String), (r"'[^'\n]*.", String.Char), (r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex), - (r'\d+|%[01]+', Number.Integer), - (r'[#,.:()=]', Punctuation), + (r'\d+', Number.Integer), + (r'%[01]+', Number.Bin), + (r'[#,.:()=\[\]]', Punctuation), (r'[a-z_.@$][\w.@$]*', Name), ] } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/automation.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/automation.py new file mode 100644 index 0000000..a66ceff --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/automation.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.automation + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for automation scripting languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, combined +from pygments.token import Text, Comment, Operator, Name, String, \ + Number, Punctuation, Generic + +__all__ = ['AutohotkeyLexer', 'AutoItLexer'] + + +class AutohotkeyLexer(RegexLexer): + """ + For `autohotkey `_ source code. + + .. versionadded:: 1.4 + """ + name = 'autohotkey' + aliases = ['ahk', 'autohotkey'] + filenames = ['*.ahk', '*.ahkl'] + mimetypes = ['text/x-autohotkey'] + + tokens = { + 'root': [ + (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'), + (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'), + (r'\s+;.*?$', Comment.Singleline), + (r'^;.*?$', Comment.Singleline), + (r'[]{}(),;[]', Punctuation), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable), + (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), + include('commands'), + include('labels'), + include('builtInFunctions'), + include('builtInVariables'), + (r'"', String, combined('stringescape', 'dqs')), + include('numbers'), + (r'[a-zA-Z_#@$][\w#@$]*', Name), + (r'\\|\'', Text), + (r'\`([,%`abfnrtv\-+;])', String.Escape), + include('garbage'), + ], + 'incomment': [ + (r'^\s*\*/', Comment.Multiline, '#pop'), + (r'[^*/]', Comment.Multiline), + (r'[*/]', Comment.Multiline) + ], + 'incontinuation': [ + (r'^\s*\)', Generic, '#pop'), + (r'[^)]', Generic), + (r'[)]', Generic), + ], + 'commands': [ + (r'(?i)^(\s*)(global|local|static|' + r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|' + r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|' + r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|' + r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|' + r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|' + r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|' + r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|' + r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|' + r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|' + r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|' + r'ControlSendRaw|ControlSetText|CoordMode|Critical|' + r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|' + r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|' + r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|' + r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|' + r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|' + r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|' + r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|' + r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|' + r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|' + r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|' + r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|' + r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|' + r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|' + r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|' + r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|' + r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|' + r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|' + r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|' + r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|' + r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|' + r'SetBatchLines|SetCapslockState|SetControlDelay|' + r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|' + r'SetMouseDelay|SetNumlockState|SetScrollLockState|' + r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|' + r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|' + r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|' + r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|' + r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|' + r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|' + r'StringReplace|StringRight|StringSplit|StringTrimLeft|' + r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|' + r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|' + r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|' + r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|' + r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|' + r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|' + r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|' + r'WinWait)\b', bygroups(Text, Name.Builtin)), + ], + 'builtInFunctions': [ + (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|' + r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|' + r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|' + r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|' + r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|' + r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|' + r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|' + r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|' + r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|' + r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|' + r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|' + r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|' + r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|' + r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|' + r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|' + r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b', + Name.Function), + ], + 'builtInVariables': [ + (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|' + r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|' + r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|' + r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|' + r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|' + r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|' + r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|' + r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|' + r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|' + r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|' + r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|' + r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|' + r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|' + r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|' + r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|' + r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|' + r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|' + r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|' + r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|' + r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|' + r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|' + r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|' + r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|' + r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|' + r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|' + r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|' + r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|' + r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|' + r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|' + r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b', + Name.Variable), + ], + 'labels': [ + # hotkeys and labels + # technically, hotkey names are limited to named keys and buttons + (r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)), + (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)), + ], + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+[eE][+-]?[0-9]+', Number.Float), + (r'0\d+', Number.Oct), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+', Number.Integer) + ], + 'stringescape': [ + (r'\"\"|\`([,%`abfnrtv])', String.Escape), + ], + 'strings': [ + (r'[^"\n]+', String), + ], + 'dqs': [ + (r'"', String, '#pop'), + include('strings') + ], + 'garbage': [ + (r'[^\S\n]', Text), + # (r'.', Text), # no cheating + ], + } + + +class AutoItLexer(RegexLexer): + """ + For `AutoIt `_ files. + + AutoIt is a freeware BASIC-like scripting language + designed for automating the Windows GUI and general scripting + + .. versionadded:: 1.6 + """ + name = 'AutoIt' + aliases = ['autoit'] + filenames = ['*.au3'] + mimetypes = ['text/x-autoit'] + + # Keywords, functions, macros from au3.keywords.properties + # which can be found in AutoIt installed directory, e.g. + # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties + + keywords = """\ + #include-once #include #endregion #forcedef #forceref #region + and byref case continueloop dim do else elseif endfunc endif + endselect exit exitloop for func global + if local next not or return select step + then to until wend while exit""".split() + + functions = """\ + abs acos adlibregister adlibunregister asc ascw asin assign atan + autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen + binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor + blockinput break call cdtray ceiling chr chrw clipget clipput consoleread + consolewrite consolewriteerror controlclick controlcommand controldisable + controlenable controlfocus controlgetfocus controlgethandle controlgetpos + controlgettext controlhide controllistview controlmove controlsend + controlsettext controlshow controltreeview cos dec dircopy dircreate + dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree + dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate + dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata + drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype + drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree + drivespacetotal drivestatus envget envset envupdate eval execute exp + filechangedir fileclose filecopy filecreatentfslink filecreateshortcut + filedelete fileexists filefindfirstfile filefindnextfile fileflush + filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut + filegetshortname filegetsize filegettime filegetversion fileinstall filemove + fileopen fileopendialog fileread filereadline filerecycle filerecycleempty + filesavedialog fileselectfolder filesetattrib filesetpos filesettime + filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi + guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo + guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy + guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon + guictrlcreateinput guictrlcreatelabel guictrlcreatelist + guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu + guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj + guictrlcreatepic guictrlcreateprogress guictrlcreateradio + guictrlcreateslider guictrlcreatetab guictrlcreatetabitem + guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown + guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg + guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy + guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata + guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic + guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos + guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete + guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators + guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon + guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset + httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize + inetread inidelete iniread inireadsection inireadsectionnames + inirenamesection iniwrite iniwritesection inputbox int isadmin isarray + isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword + isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag + mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox + number objcreate objcreateinterface objevent objevent objget objname + onautoitexitregister onautoitexitunregister opt ping pixelchecksum + pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists + processgetstats processlist processsetpriority processwait processwaitclose + progressoff progresson progressset ptr random regdelete regenumkey + regenumval regread regwrite round run runas runaswait runwait send + sendkeepactive seterror setextended shellexecute shellexecutewait shutdown + sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton + sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread + string stringaddcr stringcompare stringformat stringfromasciiarray + stringinstr stringisalnum stringisalpha stringisascii stringisdigit + stringisfloat stringisint stringislower stringisspace stringisupper + stringisxdigit stringleft stringlen stringlower stringmid stringregexp + stringregexpreplace stringreplace stringright stringsplit stringstripcr + stringstripws stringtoasciiarray stringtobinary stringtrimleft + stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect + tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff + timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete + trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent + trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent + traysetpauseicon traysetstate traysettooltip traytip ubound udpbind + udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype + winactivate winactive winclose winexists winflash wingetcaretpos + wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess + wingetstate wingettext wingettitle winkill winlist winmenuselectitem + winminimizeall winminimizeallundo winmove winsetontop winsetstate + winsettitle winsettrans winwait winwaitactive winwaitclose + winwaitnotactive""".split() + + macros = """\ + @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion + @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec + @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir + @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error + @exitcode @exitmethod @extended @favoritescommondir @favoritesdir + @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid + @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour + @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf + @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang + @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype + @osversion @programfilesdir @programscommondir @programsdir @scriptdir + @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir + @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide + @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault + @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna + @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir + @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday + @windowsdir @workingdir @yday @year""".split() + + tokens = { + 'root': [ + (r';.*\n', Comment.Single), + (r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline), + (r'[\[\]{}(),;]', Punctuation), + (r'(and|or|not)\b', Operator.Word), + (r'[$|@][a-zA-Z_]\w*', Name.Variable), + (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), + include('commands'), + include('labels'), + include('builtInFunctions'), + include('builtInMarcros'), + (r'"', String, combined('stringescape', 'dqs')), + include('numbers'), + (r'[a-zA-Z_#@$][\w#@$]*', Name), + (r'\\|\'', Text), + (r'\`([,%`abfnrtv\-+;])', String.Escape), + (r'_\n', Text), # Line continuation + include('garbage'), + ], + 'commands': [ + (r'(?i)(\s*)(%s)\b' % '|'.join(keywords), + bygroups(Text, Name.Builtin)), + ], + 'builtInFunctions': [ + (r'(?i)(%s)\b' % '|'.join(functions), + Name.Function), + ], + 'builtInMarcros': [ + (r'(?i)(%s)\b' % '|'.join(macros), + Name.Variable.Global), + ], + 'labels': [ + # sendkeys + (r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)), + ], + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+[eE][+-]?[0-9]+', Number.Float), + (r'0\d+', Number.Oct), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+', Number.Integer) + ], + 'stringescape': [ + (r'\"\"|\`([,%`abfnrtv])', String.Escape), + ], + 'strings': [ + (r'[^"\n]+', String), + ], + 'dqs': [ + (r'"', String, '#pop'), + include('strings') + ], + 'garbage': [ + (r'[^\S\n]', Text), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/basic.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/basic.py new file mode 100644 index 0000000..0ec459e --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/basic.py @@ -0,0 +1,500 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.basic + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for BASIC like languages (other than VB.net). + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, default, words, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer', + 'QBasicLexer'] + + +class BlitzMaxLexer(RegexLexer): + """ + For `BlitzMax `_ source code. + + .. versionadded:: 1.4 + """ + + name = 'BlitzMax' + aliases = ['blitzmax', 'bmax'] + filenames = ['*.bmx'] + mimetypes = ['text/x-bmx'] + + bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b' + bmax_sktypes = r'@{1,2}|[!#$%]' + bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b' + bmax_name = r'[a-z_]\w*' + bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)' + r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \ + (bmax_name, bmax_sktypes, bmax_lktypes, bmax_name) + bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])' + + flags = re.MULTILINE | re.IGNORECASE + tokens = { + 'root': [ + # Text + (r'[ \t]+', Text), + (r'\.\.\n', Text), # Line continuation + # Comments + (r"'.*?\n", Comment.Single), + (r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline), + # Data types + ('"', String.Double, 'string'), + # Numbers + (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), + (r'\.[0-9]*(?!\.)', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\$[0-9a-f]+', Number.Hex), + (r'\%[10]+', Number.Bin), + # Other + (r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' % + (bmax_vopwords), Operator), + (r'[(),.:\[\]]', Punctuation), + (r'(?:#[\w \t]*)', Name.Label), + (r'(?:\?[\w \t]*)', Comment.Preproc), + # Identifiers + (r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name), + bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)), + (r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' % + (bmax_name, bmax_name), + bygroups(Keyword.Reserved, Text, Keyword.Namespace)), + (bmax_func, bygroups(Name.Function, Text, Keyword.Type, + Operator, Text, Punctuation, Text, + Keyword.Type, Name.Class, Text, + Keyword.Type, Text, Punctuation)), + (bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator, + Text, Punctuation, Text, Keyword.Type, + Name.Class, Text, Keyword.Type)), + (r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name), + bygroups(Keyword.Reserved, Text, Name.Class)), + # Keywords + (r'\b(Ptr)\b', Keyword.Type), + (r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant), + (r'\b(Local|Global|Const|Field)\b', Keyword.Declaration), + (words(( + 'TNullMethodException', 'TNullFunctionException', + 'TNullObjectException', 'TArrayBoundsException', + 'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception), + (words(( + 'Strict', 'SuperStrict', 'Module', 'ModuleInfo', + 'End', 'Return', 'Continue', 'Exit', 'Public', 'Private', + 'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max', + 'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen', + 'Framework', 'Include', 'Import', 'Extern', 'EndExtern', + 'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod', + 'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf', + 'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile', + 'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect', + 'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData', + 'RestoreData'), prefix=r'\b', suffix=r'\b'), + Keyword.Reserved), + # Final resolve (for variable names and such) + (r'(%s)' % (bmax_name), Name.Variable), + ], + 'string': [ + (r'""', String.Double), + (r'"C?', String.Double, '#pop'), + (r'[^"]+', String.Double), + ], + } + + +class BlitzBasicLexer(RegexLexer): + """ + For `BlitzBasic `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'BlitzBasic' + aliases = ['blitzbasic', 'b3d', 'bplus'] + filenames = ['*.bb', '*.decls'] + mimetypes = ['text/x-bb'] + + bb_sktypes = r'@{1,2}|[#$%]' + bb_name = r'[a-z]\w*' + bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \ + (bb_name, bb_sktypes, bb_name) + + flags = re.MULTILINE | re.IGNORECASE + tokens = { + 'root': [ + # Text + (r'[ \t]+', Text), + # Comments + (r";.*?\n", Comment.Single), + # Data types + ('"', String.Double, 'string'), + # Numbers + (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), + (r'\.[0-9]+(?!\.)', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\$[0-9a-f]+', Number.Hex), + (r'\%[10]+', Number.Bin), + # Other + (words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not', + 'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str', + 'First', 'Last', 'Before', 'After'), + prefix=r'\b', suffix=r'\b'), + Operator), + (r'([+\-*/~=<>^])', Operator), + (r'[(),:\[\]\\]', Punctuation), + (r'\.([ \t]*)(%s)' % bb_name, Name.Label), + # Identifiers + (r'\b(New)\b([ \t]+)(%s)' % (bb_name), + bygroups(Keyword.Reserved, Text, Name.Class)), + (r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name), + bygroups(Keyword.Reserved, Text, Name.Label)), + (r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name), + bygroups(Operator, Text, Punctuation, Text, Name.Class)), + (r'\b%s\b([ \t]*)(\()' % bb_var, + bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation, + Text, Name.Class, Text, Punctuation)), + (r'\b(Function)\b([ \t]+)%s' % bb_var, + bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type, + Text, Punctuation, Text, Name.Class)), + (r'\b(Type)([ \t]+)(%s)' % (bb_name), + bygroups(Keyword.Reserved, Text, Name.Class)), + # Keywords + (r'\b(Pi|True|False|Null)\b', Keyword.Constant), + (r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration), + (words(( + 'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert', + 'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf', + 'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend', + 'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', + 'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'), + Keyword.Reserved), + # Final resolve (for variable names and such) + # (r'(%s)' % (bb_name), Name.Variable), + (bb_var, bygroups(Name.Variable, Text, Keyword.Type, + Text, Punctuation, Text, Name.Class)), + ], + 'string': [ + (r'""', String.Double), + (r'"C?', String.Double, '#pop'), + (r'[^"]+', String.Double), + ], + } + + +class MonkeyLexer(RegexLexer): + """ + For + `Monkey `_ + source code. + + .. versionadded:: 1.6 + """ + + name = 'Monkey' + aliases = ['monkey'] + filenames = ['*.monkey'] + mimetypes = ['text/x-monkey'] + + name_variable = r'[a-z_]\w*' + name_function = r'[A-Z]\w*' + name_constant = r'[A-Z_][A-Z0-9_]*' + name_class = r'[A-Z]\w*' + name_module = r'[a-z0-9_]*' + + keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)' + # ? == Bool // % == Int // # == Float // $ == String + keyword_type_special = r'[?%#$]' + + flags = re.MULTILINE + + tokens = { + 'root': [ + # Text + (r'\s+', Text), + # Comments + (r"'.*", Comment), + (r'(?i)^#rem\b', Comment.Multiline, 'comment'), + # preprocessor directives + (r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc), + # preprocessor variable (any line starting with '#' that is not a directive) + (r'^#', Comment.Preproc, 'variables'), + # String + ('"', String.Double, 'string'), + # Numbers + (r'[0-9]+\.[0-9]*(?!\.)', Number.Float), + (r'\.[0-9]+(?!\.)', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\$[0-9a-fA-Z]+', Number.Hex), + (r'\%[10]+', Number.Bin), + # Native data types + (r'\b%s\b' % keyword_type, Keyword.Type), + # Exception handling + (r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved), + (r'Throwable', Name.Exception), + # Builtins + (r'(?i)\b(?:Null|True|False)\b', Name.Builtin), + (r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo), + (r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant), + # Keywords + (r'(?i)^(Import)(\s+)(.*)(\n)', + bygroups(Keyword.Namespace, Text, Name.Namespace, Text)), + (r'(?i)^Strict\b.*\n', Keyword.Reserved), + (r'(?i)(Const|Local|Global|Field)(\s+)', + bygroups(Keyword.Declaration, Text), 'variables'), + (r'(?i)(New|Class|Interface|Extends|Implements)(\s+)', + bygroups(Keyword.Reserved, Text), 'classname'), + (r'(?i)(Function|Method)(\s+)', + bygroups(Keyword.Reserved, Text), 'funcname'), + (r'(?i)(?:End|Return|Public|Private|Extern|Property|' + r'Final|Abstract)\b', Keyword.Reserved), + # Flow Control stuff + (r'(?i)(?:If|Then|Else|ElseIf|EndIf|' + r'Select|Case|Default|' + r'While|Wend|' + r'Repeat|Until|Forever|' + r'For|To|Until|Step|EachIn|Next|' + r'Exit|Continue)\s+', Keyword.Reserved), + # not used yet + (r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved), + # Array + (r'[\[\]]', Punctuation), + # Other + (r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator), + (r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word), + (r'[(){}!#,.:]', Punctuation), + # catch the rest + (r'%s\b' % name_constant, Name.Constant), + (r'%s\b' % name_function, Name.Function), + (r'%s\b' % name_variable, Name.Variable), + ], + 'funcname': [ + (r'(?i)%s\b' % name_function, Name.Function), + (r':', Punctuation, 'classname'), + (r'\s+', Text), + (r'\(', Punctuation, 'variables'), + (r'\)', Punctuation, '#pop') + ], + 'classname': [ + (r'%s\.' % name_module, Name.Namespace), + (r'%s\b' % keyword_type, Keyword.Type), + (r'%s\b' % name_class, Name.Class), + # array (of given size) + (r'(\[)(\s*)(\d*)(\s*)(\])', + bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)), + # generics + (r'\s+(?!<)', Text, '#pop'), + (r'<', Punctuation, '#push'), + (r'>', Punctuation, '#pop'), + (r'\n', Text, '#pop'), + default('#pop') + ], + 'variables': [ + (r'%s\b' % name_constant, Name.Constant), + (r'%s\b' % name_variable, Name.Variable), + (r'%s' % keyword_type_special, Keyword.Type), + (r'\s+', Text), + (r':', Punctuation, 'classname'), + (r',', Punctuation, '#push'), + default('#pop') + ], + 'string': [ + (r'[^"~]+', String.Double), + (r'~q|~n|~r|~t|~z|~~', String.Escape), + (r'"', String.Double, '#pop'), + ], + 'comment': [ + (r'(?i)^#rem.*?', Comment.Multiline, "#push"), + (r'(?i)^#end.*?', Comment.Multiline, "#pop"), + (r'\n', Comment.Multiline), + (r'.+', Comment.Multiline), + ], + } + + +class CbmBasicV2Lexer(RegexLexer): + """ + For CBM BASIC V2 sources. + + .. versionadded:: 1.6 + """ + name = 'CBM BASIC V2' + aliases = ['cbmbas'] + filenames = ['*.bas'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'rem.*\n', Comment.Single), + (r'\s+', Text), + (r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont' + r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?' + r'|list|clr|cmd|open|close|get#?', Keyword.Reserved), + (r'data|restore|dim|let|def|fn', Keyword.Declaration), + (r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn' + r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin), + (r'[-+*/^<>=]', Operator), + (r'not|and|or', Operator.Word), + (r'"[^"\n]*.', String), + (r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float), + (r'[(),:;]', Punctuation), + (r'\w+[$%]?', Name), + ] + } + + def analyse_text(self, text): + # if it starts with a line number, it shouldn't be a "modern" Basic + # like VB.net + if re.match(r'\d+', text): + return 0.2 + + +class QBasicLexer(RegexLexer): + """ + For + `QBasic `_ + source code. + + .. versionadded:: 2.0 + """ + + name = 'QBasic' + aliases = ['qbasic', 'basic'] + filenames = ['*.BAS', '*.bas'] + mimetypes = ['text/basic'] + + declarations = ('DATA', 'LET') + + functions = ( + 'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG', + 'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI', + 'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV', + 'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE', + 'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT', + 'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF', + 'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$', + 'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY', + 'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD', + 'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC', + 'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN', + 'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR', + 'VARPTR$', 'VARSEG' + ) + + metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC') + + operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR') + + statements = ( + 'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE', + 'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR', + 'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA', + 'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT', + 'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP', + 'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD', + 'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO', + 'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY', + 'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE', + 'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME', + 'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY', + 'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM', + 'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN', + 'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING', + 'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM', + 'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN', + 'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP', + 'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM', + 'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK', + 'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE' + ) + + keywords = ( + 'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY', + 'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF', + 'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD', + 'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE', + 'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND' + ) + + tokens = { + 'root': [ + (r'\n+', Text), + (r'\s+', Text.Whitespace), + (r'^(\s*)(\d*)(\s*)(REM .*)$', + bygroups(Text.Whitespace, Name.Label, Text.Whitespace, + Comment.Single)), + (r'^(\s*)(\d+)(\s*)', + bygroups(Text.Whitespace, Name.Label, Text.Whitespace)), + (r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global), + (r'(?=[^"]*)\'.*$', Comment.Single), + (r'"[^\n"]*"', String.Double), + (r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)', + bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)), + (r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)', + bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, + Text.Whitespace, Name)), + (r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)', + bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, + Text.Whitespace, Name.Variable.Global)), + (r'(DIM)(\s+)([^\s(]+)', + bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)), + (r'^(\s*)([a-zA-Z_]+)(\s*)(\=)', + bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace, + Operator)), + (r'(GOTO|GOSUB)(\s+)(\w+\:?)', + bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)), + (r'(SUB)(\s+)(\w+\:?)', + bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)), + include('declarations'), + include('functions'), + include('metacommands'), + include('operators'), + include('statements'), + include('keywords'), + (r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global), + (r'[a-zA-Z_]\w*\:', Name.Label), + (r'\-?\d*\.\d+[@|#]?', Number.Float), + (r'\-?\d+[@|#]', Number.Float), + (r'\-?\d+#?', Number.Integer.Long), + (r'\-?\d+#?', Number.Integer), + (r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator), + (r'[\[\]{}(),;]', Punctuation), + (r'[\w]+', Name.Variable.Global), + ], + # can't use regular \b because of X$() + # XXX: use words() here + 'declarations': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)), + Keyword.Declaration), + ], + 'functions': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)), + Keyword.Reserved), + ], + 'metacommands': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)), + Keyword.Constant), + ], + 'operators': [ + (r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word), + ], + 'statements': [ + (r'\b(%s)\b' % '|'.join(map(re.escape, statements)), + Keyword.Reserved), + ], + 'keywords': [ + (r'\b(%s)\b' % '|'.join(keywords), Keyword), + ], + } + + def analyse_text(text): + if '$DYNAMIC' in text or '$STATIC' in text: + return 0.9 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/business.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/business.py new file mode 100644 index 0000000..11f0093 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/business.py @@ -0,0 +1,592 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.business + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for "business-oriented" languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +from pygments.lexers._openedge_builtins import OPENEDGEKEYWORDS + +__all__ = ['CobolLexer', 'CobolFreeformatLexer', 'ABAPLexer', 'OpenEdgeLexer', + 'GoodDataCLLexer', 'MaqlLexer'] + + +class CobolLexer(RegexLexer): + """ + Lexer for OpenCOBOL code. + + .. versionadded:: 1.6 + """ + name = 'COBOL' + aliases = ['cobol'] + filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY'] + mimetypes = ['text/x-cobol'] + flags = re.IGNORECASE | re.MULTILINE + + # Data Types: by PICTURE and USAGE + # Operators: **, *, +, -, /, <, >, <=, >=, =, <> + # Logical (?): NOT, AND, OR + + # Reserved words: + # http://opencobol.add1tocobol.com/#reserved-words + # Intrinsics: + # http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions + + tokens = { + 'root': [ + include('comment'), + include('strings'), + include('core'), + include('nums'), + (r'[a-z0-9]([\w\-]*[a-z0-9]+)?', Name.Variable), + # (r'[\s]+', Text), + (r'[ \t]+', Text), + ], + 'comment': [ + (r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment), + ], + 'core': [ + # Figurative constants + (r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?' + r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)' + r'\s*($|(?=[^0-9a-z_\-]))', + Name.Constant), + + # Reserved words STATEMENTS and other bolds + (words(( + 'ACCEPT', 'ADD', 'ALLOCATE', 'CALL', 'CANCEL', 'CLOSE', 'COMPUTE', + 'CONFIGURATION', 'CONTINUE', 'DATA', 'DELETE', 'DISPLAY', 'DIVIDE', + 'DIVISION', 'ELSE', 'END', 'END-ACCEPT', + 'END-ADD', 'END-CALL', 'END-COMPUTE', 'END-DELETE', 'END-DISPLAY', + 'END-DIVIDE', 'END-EVALUATE', 'END-IF', 'END-MULTIPLY', 'END-OF-PAGE', + 'END-PERFORM', 'END-READ', 'END-RETURN', 'END-REWRITE', 'END-SEARCH', + 'END-START', 'END-STRING', 'END-SUBTRACT', 'END-UNSTRING', 'END-WRITE', + 'ENVIRONMENT', 'EVALUATE', 'EXIT', 'FD', 'FILE', 'FILE-CONTROL', 'FOREVER', + 'FREE', 'GENERATE', 'GO', 'GOBACK', 'IDENTIFICATION', 'IF', 'INITIALIZE', + 'INITIATE', 'INPUT-OUTPUT', 'INSPECT', 'INVOKE', 'I-O-CONTROL', 'LINKAGE', + 'LOCAL-STORAGE', 'MERGE', 'MOVE', 'MULTIPLY', 'OPEN', 'PERFORM', + 'PROCEDURE', 'PROGRAM-ID', 'RAISE', 'READ', 'RELEASE', 'RESUME', + 'RETURN', 'REWRITE', 'SCREEN', 'SD', 'SEARCH', 'SECTION', 'SET', + 'SORT', 'START', 'STOP', 'STRING', 'SUBTRACT', 'SUPPRESS', + 'TERMINATE', 'THEN', 'UNLOCK', 'UNSTRING', 'USE', 'VALIDATE', + 'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^0-9a-z_\-]))', + suffix=r'\s*($|(?=[^0-9a-z_\-]))'), + Keyword.Reserved), + + # Reserved words + (words(( + 'ACCESS', 'ADDRESS', 'ADVANCING', 'AFTER', 'ALL', + 'ALPHABET', 'ALPHABETIC', 'ALPHABETIC-LOWER', 'ALPHABETIC-UPPER', + 'ALPHANUMERIC', 'ALPHANUMERIC-EDITED', 'ALSO', 'ALTER', 'ALTERNATE' + 'ANY', 'ARE', 'AREA', 'AREAS', 'ARGUMENT-NUMBER', 'ARGUMENT-VALUE', 'AS', + 'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC', 'AUTOTERMINATE', + 'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL', + 'BLANK', 'BLINK', 'BLOCK', 'BOTTOM', 'BY', 'BYTE-LENGTH', 'CHAINING', + 'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL', 'COLLATING', + 'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE', 'COMMIT', 'COMMON', + 'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL', + 'CONTROLS', 'CONVERTING', 'COPY', 'CORR', 'CORRESPONDING', 'COUNT', 'CRT', + 'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE', 'DEBUGGING', + 'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED', + 'DELIMITER', 'DEPENDING', 'DESCENDING', 'DETAIL', 'DISK', + 'DOWN', 'DUPLICATES', 'DYNAMIC', 'EBCDIC', + 'ENTRY', 'ENVIRONMENT-NAME', 'ENVIRONMENT-VALUE', 'EOL', 'EOP', + 'EOS', 'ERASE', 'ERROR', 'ESCAPE', 'EXCEPTION', + 'EXCLUSIVE', 'EXTEND', 'EXTERNAL', + 'FILE-ID', 'FILLER', 'FINAL', 'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT', + 'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL', 'FUNCTION', + 'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP', + 'HEADING', 'HIGHLIGHT', 'I-O', 'ID', + 'IGNORE', 'IGNORING', 'IN', 'INDEX', 'INDEXED', 'INDICATE', + 'INITIAL', 'INITIALIZED', 'INPUT', + 'INTO', 'INTRINSIC', 'INVALID', 'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL', + 'LAST', 'LEADING', 'LEFT', 'LENGTH', 'LIMIT', 'LIMITS', 'LINAGE', + 'LINAGE-COUNTER', 'LINE', 'LINES', 'LOCALE', 'LOCK', + 'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE', + 'MULTIPLE', 'NATIONAL', 'NATIONAL-EDITED', 'NATIVE', + 'NEGATIVE', 'NEXT', 'NO', 'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC', + 'NUMERIC-EDITED', 'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY', + 'OPTIONAL', 'ORDER', 'ORGANIZATION', 'OTHER', 'OUTPUT', 'OVERFLOW', + 'OVERLINE', 'PACKED-DECIMAL', 'PADDING', 'PAGE', 'PARAGRAPH', + 'PLUS', 'POINTER', 'POSITION', 'POSITIVE', 'PRESENT', 'PREVIOUS', + 'PRINTER', 'PRINTING', 'PROCEDURE-POINTER', 'PROCEDURES', + 'PROCEED', 'PROGRAM', 'PROGRAM-POINTER', 'PROMPT', 'QUOTE', + 'QUOTES', 'RANDOM', 'RD', 'RECORD', 'RECORDING', 'RECORDS', 'RECURSIVE', + 'REDEFINES', 'REEL', 'REFERENCE', 'RELATIVE', 'REMAINDER', 'REMOVAL', + 'RENAMES', 'REPLACING', 'REPORT', 'REPORTING', 'REPORTS', 'REPOSITORY', + 'REQUIRED', 'RESERVE', 'RETURNING', 'REVERSE-VIDEO', 'REWIND', + 'RIGHT', 'ROLLBACK', 'ROUNDED', 'RUN', 'SAME', 'SCROLL', + 'SECURE', 'SEGMENT-LIMIT', 'SELECT', 'SENTENCE', 'SEPARATE', + 'SEQUENCE', 'SEQUENTIAL', 'SHARING', 'SIGN', 'SIGNED', 'SIGNED-INT', + 'SIGNED-LONG', 'SIGNED-SHORT', 'SIZE', 'SORT-MERGE', 'SOURCE', + 'SOURCE-COMPUTER', 'SPECIAL-NAMES', 'STANDARD', + 'STANDARD-1', 'STANDARD-2', 'STATUS', 'SUM', + 'SYMBOLIC', 'SYNC', 'SYNCHRONIZED', 'TALLYING', 'TAPE', + 'TEST', 'THROUGH', 'THRU', 'TIME', 'TIMES', 'TO', 'TOP', 'TRAILING', + 'TRANSFORM', 'TYPE', 'UNDERLINE', 'UNIT', 'UNSIGNED', + 'UNSIGNED-INT', 'UNSIGNED-LONG', 'UNSIGNED-SHORT', 'UNTIL', 'UP', + 'UPDATE', 'UPON', 'USAGE', 'USING', 'VALUE', 'VALUES', 'VARYING', + 'WAIT', 'WHEN', 'WITH', 'WORDS', 'YYYYDDD', 'YYYYMMDD'), + prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'), + Keyword.Pseudo), + + # inactive reserved words + (words(( + 'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE', 'B-AND', + 'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER', 'CF', 'CH', 'CHAIN', 'CLASS-ID', + 'CLASSIFICATION', 'COMMUNICATION', 'CONDITION', 'DATA-POINTER', + 'DESTINATION', 'DISABLE', 'EC', 'EGI', 'EMI', 'ENABLE', 'END-RECEIVE', + 'ENTRY-CONVENTION', 'EO', 'ESI', 'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY', + 'FLOAT-BINARY-16', 'FLOAT-BINARY-34', 'FLOAT-BINARY-7', + 'FLOAT-DECIMAL-16', 'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT', + 'FUNCTION-POINTER', 'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY', + 'INHERITS', 'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE', + 'LC_CTYPE', 'LC_MESSAGES', 'LC_MONETARY', 'LC_NUMERIC', 'LC_TIME', + 'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE', 'NORMAL', + 'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE', 'PAGE-COUNTER', 'PF', 'PH', + 'PROPERTY', 'PROTOTYPE', 'PURGE', 'QUEUE', 'RAISE', 'RAISING', 'RECEIVE', + 'RELATION', 'REPLACE', 'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY', + 'RF', 'RH', 'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT', 'STEP', + 'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3', 'SUPER', 'SYMBOL', + 'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT', 'TYPEDEF', 'UCS-4', 'UNIVERSAL', + 'USER-DEFAULT', 'UTF-16', 'UTF-8', 'VAL-STATUS', 'VALID', 'VALIDATE', + 'VALIDATE-STATUS'), + prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'), + Error), + + # Data Types + (r'(^|(?<=[^0-9a-z_\-]))' + r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|' + r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|' + r'BINARY-C-LONG|' + r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|' + r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type), + + # Operators + (r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator), + + # (r'(::)', Keyword.Declaration), + + (r'([(),;:&%.])', Punctuation), + + # Intrinsics + (r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|' + r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|' + r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|' + r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|' + r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|' + r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG(?:10)?|' + r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|' + r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|' + r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|' + r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|' + r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|' + r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*' + r'($|(?=[^0-9a-z_\-]))', Name.Function), + + # Booleans + (r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin), + # Comparing Operators + (r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|' + r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word), + ], + + # \"[^\"\n]*\"|\'[^\'\n]*\' + 'strings': [ + # apparently strings can be delimited by EOL if they are continued + # in the next line + (r'"[^"\n]*("|\n)', String.Double), + (r"'[^'\n]*('|\n)", String.Single), + ], + + 'nums': [ + (r'\d+(\s*|\.$|$)', Number.Integer), + (r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float), + (r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float), + ], + } + + +class CobolFreeformatLexer(CobolLexer): + """ + Lexer for Free format OpenCOBOL code. + + .. versionadded:: 1.6 + """ + name = 'COBOLFree' + aliases = ['cobolfree'] + filenames = ['*.cbl', '*.CBL'] + mimetypes = [] + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'comment': [ + (r'(\*>.*\n|^\w*\*.*$)', Comment), + ], + } + + +class ABAPLexer(RegexLexer): + """ + Lexer for ABAP, SAP's integrated language. + + .. versionadded:: 1.1 + """ + name = 'ABAP' + aliases = ['abap'] + filenames = ['*.abap'] + mimetypes = ['text/x-abap'] + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'common': [ + (r'\s+', Text), + (r'^\*.*$', Comment.Single), + (r'\".*?\n', Comment.Single), + ], + 'variable-names': [ + (r'<\S+>', Name.Variable), + (r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable), + ], + 'root': [ + include('common'), + # function calls + (r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)', + bygroups(Keyword, Text, Name.Function)), + (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|' + r'TRANSACTION|TRANSFORMATION))\b', + Keyword), + (r'(FORM|PERFORM)(\s+)(\w+)', + bygroups(Keyword, Text, Name.Function)), + (r'(PERFORM)(\s+)(\()(\w+)(\))', + bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation)), + (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)', + bygroups(Keyword, Text, Name.Function, Text, Keyword)), + + # method implementation + (r'(METHOD)(\s+)([\w~]+)', + bygroups(Keyword, Text, Name.Function)), + # method calls + (r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)', + bygroups(Text, Name.Variable, Operator, Name.Function)), + # call methodnames returning style + (r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function), + + # keywords with dashes in them. + # these need to be first, because for instance the -ID part + # of MESSAGE-ID wouldn't get highlighted if MESSAGE was + # first in the list of keywords. + (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|' + r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|' + r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|' + r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|' + r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|' + r'INTERFACE-POOL|INVERTED-DATE|' + r'LOAD-OF-PROGRAM|LOG-POINT|' + r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|' + r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|' + r'OUTPUT-LENGTH|PRINT-CONTROL|' + r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|' + r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|' + r'TYPE-POOL|TYPE-POOLS' + r')\b', Keyword), + + # keyword kombinations + (r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|' + r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|' + r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|' + r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|' + r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|' + r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|' + r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|' + r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|' + r'RUN\s+TIME|TIME\s+(STAMP)?)?|' + r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|' + r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|' + r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|' + r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|' + r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|' + r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|' + r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|' + r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|' + r'DATABASE|SHARED\s+(MEMORY|BUFFER))|' + r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|' + r'FREE\s(MEMORY|OBJECT)?|' + r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|' + r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|' + r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|' + r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|' + r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|' + r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|' + r'SCREEN)|COMMENT|FUNCTION\s+KEY|' + r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|' + r'SKIP|ULINE)|' + r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|' + r'TO LIST-PROCESSING|TO TRANSACTION)' + r'(ENDING|STARTING)\s+AT|' + r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|' + r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|' + r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|' + r'(BEGIN|END)\s+OF|' + r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|' + r'COMPARING(\s+ALL\s+FIELDS)?|' + r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|' + r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|' + r'END-OF-(DEFINITION|PAGE|SELECTION)|' + r'WITH\s+FRAME(\s+TITLE)|' + + # simple kombinations + r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|' + r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|' + r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|' + r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|' + r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|' + r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|' + r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword), + + # single word keywords. + (r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|' + r'ASSIGN(ING)?|AT(\s+FIRST)?|' + r'BACK|BLOCK|BREAK-POINT|' + r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|' + r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|' + r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|' + r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|' + r'DETAIL|DIRECTORY|DIVIDE|DO|' + r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|' + r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|' + r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|' + r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|' + r'HIDE|' + r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|' + r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|' + r'LENGTH|LINES|LOAD|LOCAL|' + r'JOIN|' + r'KEY|' + r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|' + r'NODES|' + r'OBLIGATORY|OF|OFF|ON|OVERLAY|' + r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|' + r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|' + r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|' + r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|' + r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|' + r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|' + r'ULINE|UNDER|UNPACK|UPDATE|USING|' + r'VALUE|VALUES|VIA|' + r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword), + + # builtins + (r'(abs|acos|asin|atan|' + r'boolc|boolx|bit_set|' + r'char_off|charlen|ceil|cmax|cmin|condense|contains|' + r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|' + r'count|count_any_of|count_any_not_of|' + r'dbmaxlen|distance|' + r'escape|exp|' + r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|' + r'insert|' + r'lines|log|log10|' + r'match|matches|' + r'nmax|nmin|numofchar|' + r'repeat|replace|rescale|reverse|round|' + r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|' + r'substring|substring_after|substring_from|substring_before|substring_to|' + r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|' + r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)), + + (r'&[0-9]', Name), + (r'[0-9]+', Number.Integer), + + # operators which look like variable names before + # parsing variable names. + (r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|' + r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|' + r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator), + + include('variable-names'), + + # standard oparators after variable names, + # because < and > are part of field symbols. + (r'[?*<>=\-+]', Operator), + (r"'(''|[^'])*'", String.Single), + (r"`([^`])*`", String.Single), + (r'[/;:()\[\],.]', Punctuation) + ], + } + + +class OpenEdgeLexer(RegexLexer): + """ + Lexer for `OpenEdge ABL (formerly Progress) + `_ source code. + + .. versionadded:: 1.5 + """ + name = 'OpenEdge ABL' + aliases = ['openedge', 'abl', 'progress'] + filenames = ['*.p', '*.cls'] + mimetypes = ['text/x-openedge', 'application/x-openedge'] + + types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|' + r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|' + r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|' + r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|' + r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))') + + keywords = words(OPENEDGEKEYWORDS, + prefix=r'(?i)(^|(?<=[^0-9a-z_\-]))', + suffix=r'\s*($|(?=[^0-9a-z_\-]))') + + tokens = { + 'root': [ + (r'/\*', Comment.Multiline, 'comment'), + (r'\{', Comment.Preproc, 'preprocessor'), + (r'\s*&.*', Comment.Preproc), + (r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration), + (types, Keyword.Type), + (keywords, Name.Builtin), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'[0-9]+', Number.Integer), + (r'\s+', Text), + (r'[+*/=-]', Operator), + (r'[.:()]', Punctuation), + (r'.', Name.Variable), # Lazy catch-all + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'preprocessor': [ + (r'[^{}]', Comment.Preproc), + (r'\{', Comment.Preproc, '#push'), + (r'\}', Comment.Preproc, '#pop'), + ], + } + + +class GoodDataCLLexer(RegexLexer): + """ + Lexer for `GoodData-CL + `_ + script files. + + .. versionadded:: 1.4 + """ + + name = 'GoodData-CL' + aliases = ['gooddata-cl'] + filenames = ['*.gdc'] + mimetypes = ['text/x-gooddata-cl'] + + flags = re.IGNORECASE + tokens = { + 'root': [ + # Comments + (r'#.*', Comment.Single), + # Function call + (r'[a-z]\w*', Name.Function), + # Argument list + (r'\(', Punctuation, 'args-list'), + # Punctuation + (r';', Punctuation), + # Space is not significant + (r'\s+', Text) + ], + 'args-list': [ + (r'\)', Punctuation, '#pop'), + (r',', Punctuation), + (r'[a-z]\w*', Name.Variable), + (r'=', Operator), + (r'"', String, 'string-literal'), + (r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number), + # Space is not significant + (r'\s', Text) + ], + 'string-literal': [ + (r'\\[tnrfbae"\\]', String.Escape), + (r'"', String, '#pop'), + (r'[^\\"]+', String) + ] + } + + +class MaqlLexer(RegexLexer): + """ + Lexer for `GoodData MAQL + `_ + scripts. + + .. versionadded:: 1.4 + """ + + name = 'MAQL' + aliases = ['maql'] + filenames = ['*.maql'] + mimetypes = ['text/x-gooddata-maql', 'application/x-gooddata-maql'] + + flags = re.IGNORECASE + tokens = { + 'root': [ + # IDENTITY + (r'IDENTIFIER\b', Name.Builtin), + # IDENTIFIER + (r'\{[^}]+\}', Name.Variable), + # NUMBER + (r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number), + # STRING + (r'"', String, 'string-literal'), + # RELATION + (r'\<\>|\!\=', Operator), + (r'\=|\>\=|\>|\<\=|\<', Operator), + # := + (r'\:\=', Operator), + # OBJECT + (r'\[[^]]+\]', Name.Variable.Class), + # keywords + (words(( + 'DIMENSION', 'DIMENSIONS', 'BOTTOM', 'METRIC', 'COUNT', 'OTHER', + 'FACT', 'WITH', 'TOP', 'OR', 'ATTRIBUTE', 'CREATE', 'PARENT', + 'FALSE', 'ROW', 'ROWS', 'FROM', 'ALL', 'AS', 'PF', 'COLUMN', + 'COLUMNS', 'DEFINE', 'REPORT', 'LIMIT', 'TABLE', 'LIKE', 'AND', + 'BY', 'BETWEEN', 'EXCEPT', 'SELECT', 'MATCH', 'WHERE', 'TRUE', + 'FOR', 'IN', 'WITHOUT', 'FILTER', 'ALIAS', 'WHEN', 'NOT', 'ON', + 'KEYS', 'KEY', 'FULLSET', 'PRIMARY', 'LABELS', 'LABEL', + 'VISUAL', 'TITLE', 'DESCRIPTION', 'FOLDER', 'ALTER', 'DROP', + 'ADD', 'DATASET', 'DATATYPE', 'INT', 'BIGINT', 'DOUBLE', 'DATE', + 'VARCHAR', 'DECIMAL', 'SYNCHRONIZE', 'TYPE', 'DEFAULT', 'ORDER', + 'ASC', 'DESC', 'HYPERLINK', 'INCLUDE', 'TEMPLATE', 'MODIFY'), + suffix=r'\b'), + Keyword), + # FUNCNAME + (r'[a-z]\w*\b', Name.Function), + # Comments + (r'#.*', Comment.Single), + # Punctuation + (r'[,;()]', Punctuation), + # Space is not significant + (r'\s+', Text) + ], + 'string-literal': [ + (r'\\[tnrfbae"\\]', String.Escape), + (r'"', String, '#pop'), + (r'[^\\"]+', String) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/c_cpp.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/c_cpp.py new file mode 100644 index 0000000..128cc6c --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/c_cpp.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.c_cpp + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for C/C++ languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, \ + this, inherit, default, words +from pygments.util import get_bool_opt +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['CLexer', 'CppLexer'] + + +class CFamilyLexer(RegexLexer): + """ + For C family source code. This is used as a base class to avoid repetitious + definitions. + """ + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' + #: only one /* */ style comment + _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' + + tokens = { + 'whitespace': [ + # preprocessor directives: without whitespace + ('^#if\s+0', Comment.Preproc, 'if0'), + ('^#', Comment.Preproc, 'macro'), + # or with whitespace + ('^(' + _ws1 + r')(#if\s+0)', + bygroups(using(this), Comment.Preproc), 'if0'), + ('^(' + _ws1 + ')(#)', + bygroups(using(this), Comment.Preproc), 'macro'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + ], + 'statements': [ + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'0[0-7]+[LlUu]*', Number.Oct), + (r'\d+[LlUu]*', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.]', Punctuation), + (words(('auto', 'break', 'case', 'const', 'continue', 'default', 'do', + 'else', 'enum', 'extern', 'for', 'goto', 'if', 'register', + 'restricted', 'return', 'sizeof', 'static', 'struct', + 'switch', 'typedef', 'union', 'volatile', 'while'), + suffix=r'\b'), Keyword), + (r'(bool|int|long|float|short|double|char|unsigned|signed|void|' + r'[a-z_][a-z0-9_]*_t)\b', + Keyword.Type), + (words(('inline', '_inline', '__inline', 'naked', 'restrict', + 'thread', 'typename'), suffix=r'\b'), Keyword.Reserved), + # Vector intrinsics + (r'(__m(128i|128d|128|64))\b', Keyword.Reserved), + # Microsoft-isms + (words(( + 'asm', 'int8', 'based', 'except', 'int16', 'stdcall', 'cdecl', + 'fastcall', 'int32', 'declspec', 'finally', 'int64', 'try', + 'leave', 'wchar_t', 'w64', 'unaligned', 'raise', 'noop', + 'identifier', 'forceinline', 'assume'), + prefix=r'__', suffix=r'\b'), Keyword.Reserved), + (r'(true|false|NULL)\b', Name.Builtin), + (r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)), + ('[a-zA-Z_]\w*', Name), + ], + 'root': [ + include('whitespace'), + # functions + (r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')?(\{)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation), + 'function'), + # function declarations + (r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')?(;)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation)), + default('statement'), + ], + 'statement': [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'function': [ + include('whitespace'), + include('statements'), + (';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' + r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?)', Text, '#pop'), + ], + } + + def analyse_text(text): + if re.search('#include <[a-z]+>', text): + return 0.2 + if re.search('using namespace ', text): + return 0.4 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/c_like.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/c_like.py new file mode 100644 index 0000000..9617975 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/c_like.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.c_like + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for other C-like languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \ + default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers import _mql_builtins + +__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer', + 'CudaLexer', 'SwigLexer', 'MqlLexer'] + + +class PikeLexer(CppLexer): + """ + For `Pike `_ source code. + + .. versionadded:: 2.0 + """ + name = 'Pike' + aliases = ['pike'] + filenames = ['*.pike', '*.pmod'] + mimetypes = ['text/x-pike'] + + tokens = { + 'statements': [ + (words(( + 'catch', 'new', 'private', 'protected', 'public', 'gauge', + 'throw', 'throws', 'class', 'interface', 'implement', 'abstract', 'extends', 'from', + 'this', 'super', 'constant', 'final', 'static', 'import', 'use', 'extern', + 'inline', 'proto', 'break', 'continue', 'if', 'else', 'for', + 'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null', + '__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__', + '__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__', + '__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__', + '__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'), + Keyword), + (r'(bool|int|long|float|short|double|char|string|object|void|mapping|' + r'array|multiset|program|function|lambda|mixed|' + r'[a-z_][a-z0-9_]*_t)\b', + Keyword.Type), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'[~!%^&*+=|?:<>/@-]', Operator), + inherit, + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + # template specification + (r'\s*(?=>)', Text, '#pop'), + ], + } + + +class NesCLexer(CLexer): + """ + For `nesC `_ source code with preprocessor + directives. + + .. versionadded:: 2.0 + """ + name = 'nesC' + aliases = ['nesc'] + filenames = ['*.nc'] + mimetypes = ['text/x-nescsrc'] + + tokens = { + 'statements': [ + (words(( + 'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component', + 'components', 'configuration', 'event', 'extends', 'generic', + 'implementation', 'includes', 'interface', 'module', 'new', 'norace', + 'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'), + Keyword), + (words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t', + 'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t', + 'nx_uint64_t'), suffix=r'\b'), + Keyword.Type), + inherit, + ], + } + + +class ClayLexer(RegexLexer): + """ + For `Clay `_ source. + + .. versionadded:: 2.0 + """ + name = 'Clay' + filenames = ['*.clay'] + aliases = ['clay'] + mimetypes = ['text/x-clay'] + tokens = { + 'root': [ + (r'\s', Text), + (r'//.*?$', Comment.Singleline), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'\b(public|private|import|as|record|variant|instance' + r'|define|overload|default|external|alias' + r'|rvalue|ref|forward|inline|noinline|forceinline' + r'|enum|var|and|or|not|if|else|goto|return|while' + r'|switch|case|break|continue|for|in|true|false|try|catch|throw' + r'|finally|onerror|staticassert|eval|when|newtype' + r'|__FILE__|__LINE__|__COLUMN__|__ARG__' + r')\b', Keyword), + (r'[~!%^&*+=|:<>/-]', Operator), + (r'[#(){}\[\],;.]', Punctuation), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'\d+[LlUu]*', Number.Integer), + (r'\b(true|false)\b', Name.Builtin), + (r'(?i)[a-z_?][\w?]*', Name), + (r'"""', String, 'tdqs'), + (r'"', String, 'dqs'), + ], + 'strings': [ + (r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape), + (r'.', String), + ], + 'nl': [ + (r'\n', String), + ], + 'dqs': [ + (r'"', String, '#pop'), + include('strings'), + ], + 'tdqs': [ + (r'"""', String, '#pop'), + include('strings'), + include('nl'), + ], + } + + +class ECLexer(CLexer): + """ + For eC source code with preprocessor directives. + + .. versionadded:: 1.5 + """ + name = 'eC' + aliases = ['ec'] + filenames = ['*.ec', '*.eh'] + mimetypes = ['text/x-echdr', 'text/x-ecsrc'] + + tokens = { + 'statements': [ + (words(( + 'virtual', 'class', 'private', 'public', 'property', 'import', + 'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get', + 'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass', + '__on_register_module', 'namespace', 'using', 'typed_object', + 'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers', + 'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset', + 'class_default_property', 'property_category', 'class_data', + 'class_property', 'thisclass', 'dbtable', 'dbindex', + 'database_open', 'dbfield'), suffix=r'\b'), Keyword), + (words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte', + 'unichar', 'int64'), suffix=r'\b'), + Keyword.Type), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'(null|value|this)\b', Name.Builtin), + inherit, + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + # template specification + (r'\s*(?=>)', Text, '#pop'), + ], + } + + +class ValaLexer(RegexLexer): + """ + For Vala source code with preprocessor directives. + + .. versionadded:: 1.1 + """ + name = 'Vala' + aliases = ['vala', 'vapi'] + filenames = ['*.vala', '*.vapi'] + mimetypes = ['text/x-vala'] + + tokens = { + 'whitespace': [ + (r'^\s*#if\s+0', Comment.Preproc, 'if0'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + ], + 'statements': [ + (r'[L@]?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", + String.Char), + (r'(?s)""".*?"""', String), # verbatim strings + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), + (r'0[0-7]+[Ll]?', Number.Oct), + (r'\d+[Ll]?', Number.Integer), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])', + bygroups(Punctuation, Name.Decorator, Punctuation)), + # TODO: "correctly" parse complex code attributes + (r'(\[)(CCode|(?:Integer|Floating)Type)', + bygroups(Punctuation, Name.Decorator)), + (r'[()\[\],.]', Punctuation), + (words(( + 'as', 'base', 'break', 'case', 'catch', 'construct', 'continue', + 'default', 'delete', 'do', 'else', 'enum', 'finally', 'for', + 'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params', + 'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try', + 'typeof', 'while', 'yield'), suffix=r'\b'), + Keyword), + (words(( + 'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern', + 'inline', 'internal', 'override', 'owned', 'private', 'protected', + 'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned', + 'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'), + Keyword.Declaration), + (r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text), + 'namespace'), + (r'(class|errordomain|interface|struct)(\s+)', + bygroups(Keyword.Declaration, Text), 'class'), + (r'(\.)([a-zA-Z_]\w*)', + bygroups(Operator, Name.Attribute)), + # void is an actual keyword, others are in glib-2.0.vapi + (words(( + 'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16', + 'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string', + 'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64', + 'ulong', 'unichar', 'ushort'), suffix=r'\b'), + Keyword.Type), + (r'(true|false|null)\b', Name.Builtin), + ('[a-zA-Z_]\w*', Name), + ], + 'root': [ + include('whitespace'), + default('statement'), + ], + 'statement': [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'if0': [ + (r'^\s*#if.*?(?`_ + source. + + .. versionadded:: 1.6 + """ + name = 'CUDA' + filenames = ['*.cu', '*.cuh'] + aliases = ['cuda', 'cu'] + mimetypes = ['text/x-cuda'] + + function_qualifiers = set(('__device__', '__global__', '__host__', + '__noinline__', '__forceinline__')) + variable_qualifiers = set(('__device__', '__constant__', '__shared__', + '__restrict__')) + vector_types = set(('char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3', + 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2', + 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1', + 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1', + 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4', + 'ulong4', 'longlong1', 'ulonglong1', 'longlong2', + 'ulonglong2', 'float1', 'float2', 'float3', 'float4', + 'double1', 'double2', 'dim3')) + variables = set(('gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize')) + functions = set(('__threadfence_block', '__threadfence', '__threadfence_system', + '__syncthreads', '__syncthreads_count', '__syncthreads_and', + '__syncthreads_or')) + execution_confs = set(('<<<', '>>>')) + + def get_tokens_unprocessed(self, text): + for index, token, value in CLexer.get_tokens_unprocessed(self, text): + if token is Name: + if value in self.variable_qualifiers: + token = Keyword.Type + elif value in self.vector_types: + token = Keyword.Type + elif value in self.variables: + token = Name.Builtin + elif value in self.execution_confs: + token = Keyword.Pseudo + elif value in self.function_qualifiers: + token = Keyword.Reserved + elif value in self.functions: + token = Name.Function + yield index, token, value + + +class SwigLexer(CppLexer): + """ + For `SWIG `_ source code. + + .. versionadded:: 2.0 + """ + name = 'SWIG' + aliases = ['swig'] + filenames = ['*.swg', '*.i'] + mimetypes = ['text/swig'] + priority = 0.04 # Lower than C/C++ and Objective C/C++ + + tokens = { + 'statements': [ + # SWIG directives + (r'(%[a-z_][a-z0-9_]*)', Name.Function), + # Special variables + ('\$\**\&?\w+', Name), + # Stringification / additional preprocessor directives + (r'##*[a-zA-Z_]\w*', Comment.Preproc), + inherit, + ], + } + + # This is a far from complete set of SWIG directives + swig_directives = set(( + # Most common directives + '%apply', '%define', '%director', '%enddef', '%exception', '%extend', + '%feature', '%fragment', '%ignore', '%immutable', '%import', '%include', + '%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma', + '%rename', '%shared_ptr', '%template', '%typecheck', '%typemap', + # Less common directives + '%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear', + '%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum', + '%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor', + '%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor', + '%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments', + '%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv', + '%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception', + '%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar', + '%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend', + '%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall', + '%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof', + '%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', + '%warnfilter')) + + def analyse_text(text): + rv = 0 + # Search for SWIG directives, which are conventionally at the beginning of + # a line. The probability of them being within a line is low, so let another + # lexer win in this case. + matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M) + for m in matches: + if m in SwigLexer.swig_directives: + rv = 0.98 + break + else: + rv = 0.91 # Fraction higher than MatlabLexer + return rv + + +class MqlLexer(CppLexer): + """ + For `MQL4 `_ and + `MQL5 `_ source code. + + .. versionadded:: 2.0 + """ + name = 'MQL' + aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5'] + filenames = ['*.mq4', '*.mq5', '*.mqh'] + mimetypes = ['text/x-mql'] + + tokens = { + 'statements': [ + (words(_mql_builtins.keywords, suffix=r'\b'), Keyword), + (words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type), + (words(_mql_builtins.types, suffix=r'\b'), Name.Function), + (words(_mql_builtins.constants, suffix=r'\b'), Name.Constant), + (words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'), + Name.Constant), + inherit, + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/chapel.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/chapel.py new file mode 100644 index 0000000..417301d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/chapel.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.chapel + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Chapel language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['ChapelLexer'] + + +class ChapelLexer(RegexLexer): + """ + For `Chapel `_ source. + + .. versionadded:: 2.0 + """ + name = 'Chapel' + filenames = ['*.chpl'] + aliases = ['chapel', 'chpl'] + # mimetypes = ['text/x-chapel'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), + + (r'//(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + + (r'(config|const|in|inout|out|param|ref|type|var)\b', + Keyword.Declaration), + (r'(false|nil|true)\b', Keyword.Constant), + (r'(bool|complex|imag|int|opaque|range|real|string|uint)\b', + Keyword.Type), + (words(( + 'align', 'atomic', 'begin', 'break', 'by', 'cobegin', 'coforall', + 'continue', 'delete', 'dmapped', 'do', 'domain', 'else', 'enum', + 'export', 'extern', 'for', 'forall', 'if', 'index', 'inline', + 'iter', 'label', 'lambda', 'let', 'local', 'new', 'noinit', 'on', + 'otherwise', 'pragma', 'reduce', 'return', 'scan', 'select', + 'serial', 'single', 'sparse', 'subdomain', 'sync', 'then', 'use', + 'when', 'where', 'while', 'with', 'yield', 'zip'), suffix=r'\b'), + Keyword), + (r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'), + (r'(class|module|record|union)(\s+)', bygroups(Keyword, Text), + 'classname'), + + # imaginary integers + (r'\d+i', Number), + (r'\d+\.\d*([Ee][-+]\d+)?i', Number), + (r'\.\d+([Ee][-+]\d+)?i', Number), + (r'\d+[Ee][-+]\d+i', Number), + + # reals cannot end with a period due to lexical ambiguity with + # .. operator. See reference for rationale. + (r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float), + (r'\d+[eE][+-]?[0-9]+i?', Number.Float), + + # integer literals + # -- binary + (r'0[bB][01]+', Number.Bin), + # -- hex + (r'0[xX][0-9a-fA-F]+', Number.Hex), + # -- octal + (r'0[oO][0-7]+', Number.Oct), + # -- decimal + (r'[0-9]+', Number.Integer), + + # strings + (r'["\'](\\\\|\\"|[^"\'])*["\']', String), + + # tokens + (r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|' + r'<=>|<~>|\.\.|by|#|\.\.\.|' + r'&&|\|\||!|&|\||\^|~|<<|>>|' + r'==|!=|<=|>=|<|>|' + r'[+\-*/%]|\*\*)', Operator), + (r'[:;,.?()\[\]{}]', Punctuation), + + # identifiers + (r'[a-zA-Z_][\w$]*', Name.Other), + ], + 'classname': [ + (r'[a-zA-Z_][\w$]*', Name.Class, '#pop'), + ], + 'procname': [ + (r'[a-zA-Z_][\w$]*', Name.Function, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/compiled.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/compiled.py new file mode 100644 index 0000000..74326c3 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/compiled.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.compiled + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.jvm import JavaLexer, ScalaLexer +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers.d import DLexer +from pygments.lexers.objective import ObjectiveCLexer, \ + ObjectiveCppLexer, LogosLexer +from pygments.lexers.go import GoLexer +from pygments.lexers.rust import RustLexer +from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer +from pygments.lexers.pascal import DelphiLexer, Modula2Lexer, AdaLexer +from pygments.lexers.business import CobolLexer, CobolFreeformatLexer +from pygments.lexers.fortran import FortranLexer +from pygments.lexers.prolog import PrologLexer +from pygments.lexers.python import CythonLexer +from pygments.lexers.graphics import GLShaderLexer +from pygments.lexers.ml import OcamlLexer +from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer +from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer +from pygments.lexers.ooc import OocLexer +from pygments.lexers.felix import FelixLexer +from pygments.lexers.nimrod import NimrodLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/configs.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/configs.py new file mode 100644 index 0000000..e621c84 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/configs.py @@ -0,0 +1,546 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.configs + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for configuration file formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, default, words, bygroups, include, using +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Whitespace +from pygments.lexers.shell import BashLexer + +__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer', + 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer', + 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer'] + + +class IniLexer(RegexLexer): + """ + Lexer for configuration files in INI style. + """ + + name = 'INI' + aliases = ['ini', 'cfg', 'dosini'] + filenames = ['*.ini', '*.cfg'] + mimetypes = ['text/x-ini'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'[;#].*', Comment.Single), + (r'\[.*?\]$', Keyword), + (r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)', + bygroups(Name.Attribute, Text, Operator, Text, String)) + ] + } + + def analyse_text(text): + npos = text.find('\n') + if npos < 3: + return False + return text[0] == '[' and text[npos-1] == ']' + + +class RegeditLexer(RegexLexer): + """ + Lexer for `Windows Registry + `_ files produced + by regedit. + + .. versionadded:: 1.6 + """ + + name = 'reg' + aliases = ['registry'] + filenames = ['*.reg'] + mimetypes = ['text/x-windows-registry'] + + tokens = { + 'root': [ + (r'Windows Registry Editor.*', Text), + (r'\s+', Text), + (r'[;#].*', Comment.Single), + (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$', + bygroups(Keyword, Operator, Name.Builtin, Keyword)), + # String keys, which obey somewhat normal escaping + (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)', + bygroups(Name.Attribute, Text, Operator, Text), + 'value'), + # Bare keys (includes @) + (r'(.*?)([ \t]*)(=)([ \t]*)', + bygroups(Name.Attribute, Text, Operator, Text), + 'value'), + ], + 'value': [ + (r'-', Operator, '#pop'), # delete value + (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)', + bygroups(Name.Variable, Punctuation, Number), '#pop'), + # As far as I know, .reg files do not support line continuation. + (r'.+', String, '#pop'), + default('#pop'), + ] + } + + def analyse_text(text): + return text.startswith('Windows Registry Editor') + + +class PropertiesLexer(RegexLexer): + """ + Lexer for configuration files in Java's properties format. + + .. versionadded:: 1.4 + """ + + name = 'Properties' + aliases = ['properties', 'jproperties'] + filenames = ['*.properties'] + mimetypes = ['text/x-java-properties'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'(?:[;#]|//).*$', Comment), + (r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)', + bygroups(Name.Attribute, Text, Operator, Text, String)), + ], + } + + +def _rx_indent(level): + # Kconfig *always* interprets a tab as 8 spaces, so this is the default. + # Edit this if you are in an environment where KconfigLexer gets expanded + # input (tabs expanded to spaces) and the expansion tab width is != 8, + # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). + # Value range here is 2 <= {tab_width} <= 8. + tab_width = 8 + # Regex matching a given indentation {level}, assuming that indentation is + # a multiple of {tab_width}. In other cases there might be problems. + if tab_width == 2: + space_repeat = '+' + else: + space_repeat = '{1,%d}' % (tab_width - 1) + if level == 1: + level_repeat = '' + else: + level_repeat = '{%s}' % level + return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat) + + +class KconfigLexer(RegexLexer): + """ + For Linux-style Kconfig files. + + .. versionadded:: 1.6 + """ + + name = 'Kconfig' + aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] + # Adjust this if new kconfig file names appear in your environment + filenames = ['Kconfig', '*Config.in*', 'external.in*', + 'standard-modules.in'] + mimetypes = ['text/x-kconfig'] + # No re.MULTILINE, indentation-aware help text needs line-by-line handling + flags = 0 + + def call_indent(level): + # If indentation >= {level} is detected, enter state 'indent{level}' + return (_rx_indent(level), String.Doc, 'indent%s' % level) + + def do_indent(level): + # Print paragraphs of indentation level >= {level} as String.Doc, + # ignoring blank lines. Then return to 'root' state. + return [ + (_rx_indent(level), String.Doc), + (r'\s*\n', Text), + default('#pop:2') + ] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?\n', Comment.Single), + (words(( + 'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice', + 'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif', + 'source', 'prompt', 'select', 'depends on', 'default', + 'range', 'option'), suffix=r'\b'), + Keyword), + (r'(---help---|help)[\t ]*\n', Keyword, 'help'), + (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', + Name.Builtin), + (r'[!=&|]', Operator), + (r'[()]', Punctuation), + (r'[0-9]+', Number.Integer), + (r"'(''|[^'])*'", String.Single), + (r'"(""|[^"])*"', String.Double), + (r'\S+', Text), + ], + # Help text is indented, multi-line and ends when a lower indentation + # level is detected. + 'help': [ + # Skip blank lines after help token, if any + (r'\s*\n', Text), + # Determine the first help line's indentation level heuristically(!). + # Attention: this is not perfect, but works for 99% of "normal" + # indentation schemes up to a max. indentation level of 7. + call_indent(7), + call_indent(6), + call_indent(5), + call_indent(4), + call_indent(3), + call_indent(2), + call_indent(1), + default('#pop'), # for incomplete help sections without text + ], + # Handle text for indentation levels 7 to 1 + 'indent7': do_indent(7), + 'indent6': do_indent(6), + 'indent5': do_indent(5), + 'indent4': do_indent(4), + 'indent3': do_indent(3), + 'indent2': do_indent(2), + 'indent1': do_indent(1), + } + + +class Cfengine3Lexer(RegexLexer): + """ + Lexer for `CFEngine3 `_ policy files. + + .. versionadded:: 1.5 + """ + + name = 'CFEngine3' + aliases = ['cfengine3', 'cf3'] + filenames = ['*.cf'] + mimetypes = [] + + tokens = { + 'root': [ + (r'#.*?\n', Comment), + (r'(body)(\s+)(\S+)(\s+)(control)', + bygroups(Keyword, Text, Keyword, Text, Keyword)), + (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', + bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation), + 'arglist'), + (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', + bygroups(Keyword, Text, Keyword, Text, Name.Function)), + (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', + bygroups(Punctuation, Name.Variable, Punctuation, + Text, Keyword.Type, Text, Operator, Text)), + (r'(\S+)(\s*)(=>)(\s*)', + bygroups(Keyword.Reserved, Text, Operator, Text)), + (r'"', String, 'string'), + (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), + (r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)), + (r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)), + (r'@[{(][^)}]+[})]', Name.Variable), + (r'[(){},;]', Punctuation), + (r'=>', Operator), + (r'->', Operator), + (r'\d+\.\d+', Number.Float), + (r'\d+', Number.Integer), + (r'\w+', Name.Function), + (r'\s+', Text), + ], + 'string': [ + (r'\$[{(]', String.Interpol, 'interpol'), + (r'\\.', String.Escape), + (r'"', String, '#pop'), + (r'\n', String), + (r'.', String), + ], + 'interpol': [ + (r'\$[{(]', String.Interpol, '#push'), + (r'[})]', String.Interpol, '#pop'), + (r'[^${()}]+', String.Interpol), + ], + 'arglist': [ + (r'\)', Punctuation, '#pop'), + (r',', Punctuation), + (r'\w+', Name.Variable), + (r'\s+', Text), + ], + } + + +class ApacheConfLexer(RegexLexer): + """ + Lexer for configuration files following the Apache config file + format. + + .. versionadded:: 0.6 + """ + + name = 'ApacheConf' + aliases = ['apacheconf', 'aconf', 'apache'] + filenames = ['.htaccess', 'apache.conf', 'apache2.conf'] + mimetypes = ['text/x-apacheconf'] + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + (r'\s+', Text), + (r'(#.*?)$', Comment), + (r'(<[^\s>]+)(?:(\s+)(.*?))?(>)', + bygroups(Name.Tag, Text, String, Name.Tag)), + (r'([a-z]\w*)(\s+)', + bygroups(Name.Builtin, Text), 'value'), + (r'\.+', Text), + ], + 'value': [ + (r'\\\n', Text), + (r'$', Text, '#pop'), + (r'\\', Text), + (r'[^\S\n]+', Text), + (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), + (r'\d+', Number), + (r'/([a-z0-9][\w./-]+)', String.Other), + (r'(on|off|none|any|all|double|email|dns|min|minimal|' + r'os|productonly|full|emerg|alert|crit|error|warn|' + r'notice|info|debug|registry|script|inetd|standalone|' + r'user|group)\b', Keyword), + (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), + (r'[^\s"\\]+', Text) + ], + } + + +class SquidConfLexer(RegexLexer): + """ + Lexer for `squid `_ configuration files. + + .. versionadded:: 0.9 + """ + + name = 'SquidConf' + aliases = ['squidconf', 'squid.conf', 'squid'] + filenames = ['squid.conf'] + mimetypes = ['text/x-squidconf'] + flags = re.IGNORECASE + + keywords = ( + "access_log", "acl", "always_direct", "announce_host", + "announce_period", "announce_port", "announce_to", "anonymize_headers", + "append_domain", "as_whois_server", "auth_param_basic", + "authenticate_children", "authenticate_program", "authenticate_ttl", + "broken_posts", "buffered_logs", "cache_access_log", "cache_announce", + "cache_dir", "cache_dns_program", "cache_effective_group", + "cache_effective_user", "cache_host", "cache_host_acl", + "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high", + "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer", + "cache_peer_access", "cahce_replacement_policy", "cache_stoplist", + "cache_stoplist_pattern", "cache_store_log", "cache_swap", + "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db", + "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir", + "dead_peer_timeout", "debug_options", "delay_access", "delay_class", + "delay_initial_bucket_level", "delay_parameters", "delay_pools", + "deny_info", "dns_children", "dns_defnames", "dns_nameservers", + "dns_testnames", "emulate_httpd_log", "err_html_text", + "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port", + "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width", + "ftp_passive", "ftp_user", "half_closed_clients", "header_access", + "header_replace", "hierarchy_stoplist", "high_response_time_warning", + "high_page_fault_warning", "hosts_file", "htcp_port", "http_access", + "http_anonymizer", "httpd_accel", "httpd_accel_host", + "httpd_accel_port", "httpd_accel_uses_host_header", + "httpd_accel_with_proxy", "http_port", "http_reply_access", + "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout", + "ident_lookup", "ident_lookup_access", "ident_timeout", + "incoming_http_average", "incoming_icp_average", "inside_firewall", + "ipcache_high", "ipcache_low", "ipcache_size", "local_domain", + "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries", + "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries", + "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr", + "mcast_miss_encode_key", "mcast_miss_port", "memory_pools", + "memory_pools_limit", "memory_replacement_policy", "mime_table", + "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops", + "minimum_object_size", "minimum_retry_timeout", "miss_access", + "negative_dns_ttl", "negative_ttl", "neighbor_timeout", + "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period", + "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy", + "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl", + "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp", + "quick_abort", "quick_abort_max", "quick_abort_min", + "quick_abort_pct", "range_offset_limit", "read_timeout", + "redirect_children", "redirect_program", + "redirect_rewrites_host_header", "reference_age", + "refresh_pattern", "reload_into_ims", "request_body_max_size", + "request_size", "request_timeout", "shutdown_lifetime", + "single_parent_bypass", "siteselect_timeout", "snmp_access", + "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy", + "store_avg_object_size", "store_objects_per_bucket", + "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs", + "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize", + "test_reachability", "udp_hit_obj", "udp_hit_obj_size", + "udp_incoming_address", "udp_outgoing_address", "unique_hostname", + "unlinkd_program", "uri_whitespace", "useragent_log", + "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port", + ) + + opts = ( + "proxy-only", "weight", "ttl", "no-query", "default", "round-robin", + "multicast-responder", "on", "off", "all", "deny", "allow", "via", + "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2", + "credentialsttl", "none", "disable", "offline_toggle", "diskd", + ) + + actions = ( + "shutdown", "info", "parameter", "server_list", "client_list", + r'squid.conf', + ) + + actions_stats = ( + "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns", + "redirector", "io", "reply_headers", "filedescriptors", "netdb", + ) + + actions_log = ("status", "enable", "disable", "clear") + + acls = ( + "url_regex", "urlpath_regex", "referer_regex", "port", "proto", + "req_mime_type", "rep_mime_type", "method", "browser", "user", "src", + "dst", "time", "dstdomain", "ident", "snmp_community", + ) + + ip_re = ( + r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|' + r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|' + r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|' + r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?`_ configuration files. + + .. versionadded:: 0.11 + """ + name = 'Nginx configuration file' + aliases = ['nginx'] + filenames = [] + mimetypes = ['text/x-nginx-conf'] + + tokens = { + 'root': [ + (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)), + (r'[^\s;#]+', Keyword, 'stmt'), + include('base'), + ], + 'block': [ + (r'\}', Punctuation, '#pop:2'), + (r'[^\s;#]+', Keyword.Namespace, 'stmt'), + include('base'), + ], + 'stmt': [ + (r'\{', Punctuation, 'block'), + (r';', Punctuation, '#pop'), + include('base'), + ], + 'base': [ + (r'#.*\n', Comment.Single), + (r'on|off', Name.Constant), + (r'\$[^\s;#()]+', Name.Variable), + (r'([a-z0-9.-]+)(:)([0-9]+)', + bygroups(Name, Punctuation, Number.Integer)), + (r'[a-z-]+/[a-z-+]+', String), # mimetype + # (r'[a-zA-Z._-]+', Keyword), + (r'[0-9]+[km]?\b', Number.Integer), + (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)), + (r'[:=~]', Punctuation), + (r'[^\s;#{}$]+', String), # catch all + (r'/[^\s;#]*', Name), # pathname + (r'\s+', Text), + (r'[$;]', Text), # leftover characters + ], + } + + +class LighttpdConfLexer(RegexLexer): + """ + Lexer for `Lighttpd `_ configuration files. + + .. versionadded:: 0.11 + """ + name = 'Lighttpd configuration file' + aliases = ['lighty', 'lighttpd'] + filenames = [] + mimetypes = ['text/x-lighttpd-conf'] + + tokens = { + 'root': [ + (r'#.*\n', Comment.Single), + (r'/\S*', Name), # pathname + (r'[a-zA-Z._-]+', Keyword), + (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), + (r'[0-9]+', Number), + (r'=>|=~|\+=|==|=|\+', Operator), + (r'\$[A-Z]+', Name.Builtin), + (r'[(){}\[\],]', Punctuation), + (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), + (r'\s+', Text), + ], + + } + + +class DockerLexer(RegexLexer): + """ + Lexer for `Docker `_ configuration files. + + .. versionadded:: 2.0 + """ + name = 'Docker' + aliases = ['docker', 'dockerfile'] + filenames = ['Dockerfile', '*.docker'] + mimetypes = ['text/x-dockerfile-config'] + + _keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|' + r'VOLUME|WORKDIR)') + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + (r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,), + bygroups(Name.Keyword, Whitespace, Keyword)), + (r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)), + (r'#.*', Comment), + (r'RUN', Keyword), # Rest of line falls through + (r'(.*\\\n)*.+', using(BashLexer)), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/console.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/console.py new file mode 100644 index 0000000..5612058 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/console.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.console + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for misc console output. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Generic, Comment, String, Text, Keyword, Name, \ + Punctuation, Number + +__all__ = ['VCTreeStatusLexer', 'PyPyLogLexer'] + + +class VCTreeStatusLexer(RegexLexer): + """ + For colorizing output of version control status commans, like "hg + status" or "svn status". + + .. versionadded:: 2.0 + """ + name = 'VCTreeStatus' + aliases = ['vctreestatus'] + filenames = [] + mimetypes = [] + + tokens = { + 'root': [ + (r'^A \+ C\s+', Generic.Error), + (r'^A\s+\+?\s+', String), + (r'^M\s+', Generic.Inserted), + (r'^C\s+', Generic.Error), + (r'^D\s+', Generic.Deleted), + (r'^[?!]\s+', Comment.Preproc), + (r' >\s+.*\n', Comment.Preproc), + (r'.*\n', Text) + ] + } + + +class PyPyLogLexer(RegexLexer): + """ + Lexer for PyPy log files. + + .. versionadded:: 1.5 + """ + name = "PyPy Log" + aliases = ["pypylog", "pypy"] + filenames = ["*.pypylog"] + mimetypes = ['application/x-pypylog'] + + tokens = { + "root": [ + (r"\[\w+\] \{jit-log-.*?$", Keyword, "jit-log"), + (r"\[\w+\] \{jit-backend-counts$", Keyword, "jit-backend-counts"), + include("extra-stuff"), + ], + "jit-log": [ + (r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"), + (r"^\+\d+: ", Comment), + (r"--end of the loop--", Comment), + (r"[ifp]\d+", Name), + (r"ptr\d+", Name), + (r"(\()(\w+(?:\.\w+)?)(\))", + bygroups(Punctuation, Name.Builtin, Punctuation)), + (r"[\[\]=,()]", Punctuation), + (r"(\d+\.\d+|inf|-inf)", Number.Float), + (r"-?\d+", Number.Integer), + (r"'.*'", String), + (r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name), + (r"<.*?>+", Name.Builtin), + (r"(label|debug_merge_point|jump|finish)", Name.Class), + (r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|" + r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|" + r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|" + r"int_is_true|" + r"uint_floordiv|uint_ge|uint_lt|" + r"float_add|float_sub|float_mul|float_truediv|float_neg|" + r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|" + r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|" + r"cast_int_to_float|cast_float_to_int|" + r"force_token|quasiimmut_field|same_as|virtual_ref_finish|" + r"virtual_ref|mark_opaque_ptr|" + r"call_may_force|call_assembler|call_loopinvariant|" + r"call_release_gil|call_pure|call|" + r"new_with_vtable|new_array|newstr|newunicode|new|" + r"arraylen_gc|" + r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|" + r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|" + r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|" + r"getfield_raw|setfield_gc|setfield_raw|" + r"strgetitem|strsetitem|strlen|copystrcontent|" + r"unicodegetitem|unicodesetitem|unicodelen|" + r"guard_true|guard_false|guard_value|guard_isnull|" + r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|" + r"guard_not_forced|guard_no_exception|guard_not_invalidated)", + Name.Builtin), + include("extra-stuff"), + ], + "jit-backend-counts": [ + (r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"), + (r":", Punctuation), + (r"\d+", Number), + include("extra-stuff"), + ], + "extra-stuff": [ + (r"\s+", Text), + (r"#.*?$", Comment), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/css.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/css.py new file mode 100644 index 0000000..eeb0a4a --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/css.py @@ -0,0 +1,498 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.css + ~~~~~~~~~~~~~~~~~~~ + + Lexers for CSS and related stylesheet formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import copy + +from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \ + default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation +from pygments.util import iteritems + +__all__ = ['CssLexer', 'SassLexer', 'ScssLexer'] + + +class CssLexer(RegexLexer): + """ + For CSS (Cascading Style Sheets). + """ + + name = 'CSS' + aliases = ['css'] + filenames = ['*.css'] + mimetypes = ['text/css'] + + tokens = { + 'root': [ + include('basics'), + ], + 'basics': [ + (r'\s+', Text), + (r'/\*(?:.|\n)*?\*/', Comment), + (r'\{', Punctuation, 'content'), + (r'\:[\w-]+', Name.Decorator), + (r'\.[\w-]+', Name.Class), + (r'\#[\w-]+', Name.Function), + (r'@[\w-]+', Keyword, 'atrule'), + (r'[\w-]+', Name.Tag), + (r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single) + ], + 'atrule': [ + (r'\{', Punctuation, 'atcontent'), + (r';', Punctuation, '#pop'), + include('basics'), + ], + 'atcontent': [ + include('basics'), + (r'\}', Punctuation, '#pop:2'), + ], + 'content': [ + (r'\s+', Text), + (r'\}', Punctuation, '#pop'), + (r'url\(.*?\)', String.Other), + (r'^@.*?$', Comment.Preproc), + (words(( + 'azimuth', 'background-attachment', 'background-color', + 'background-image', 'background-position', 'background-repeat', + 'background', 'border-bottom-color', 'border-bottom-style', + 'border-bottom-width', 'border-left-color', 'border-left-style', + 'border-left-width', 'border-right', 'border-right-color', + 'border-right-style', 'border-right-width', 'border-top-color', + 'border-top-style', 'border-top-width', 'border-bottom', + 'border-collapse', 'border-left', 'border-width', 'border-color', + 'border-spacing', 'border-style', 'border-top', 'border', 'caption-side', + 'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset', + 'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display', + 'elevation', 'empty-cells', 'float', 'font-family', 'font-size', + 'font-size-adjust', 'font-stretch', 'font-style', 'font-variant', + 'font-weight', 'font', 'height', 'letter-spacing', 'line-height', + 'list-style-type', 'list-style-image', 'list-style-position', + 'list-style', 'margin-bottom', 'margin-left', 'margin-right', + 'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width', + 'min-height', 'min-width', 'opacity', 'orphans', 'outline-color', + 'outline-style', 'outline-width', 'outline', 'overflow', 'overflow-x', + 'overflow-y', 'padding-bottom', 'padding-left', 'padding-right', 'padding-top', + 'padding', 'page', 'page-break-after', 'page-break-before', 'page-break-inside', + 'pause-after', 'pause-before', 'pause', 'pitch-range', 'pitch', + 'play-during', 'position', 'quotes', 'richness', 'right', 'size', + 'speak-header', 'speak-numeral', 'speak-punctuation', 'speak', + 'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration', + 'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi', + 'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space', + 'widows', 'width', 'word-spacing', 'z-index', 'bottom', + 'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline', + 'behind', 'below', 'bidi-override', 'blink', 'block', 'bolder', 'bold', 'both', + 'capitalize', 'center-left', 'center-right', 'center', 'circle', + 'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous', + 'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero', + 'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed', + 'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left', + 'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help', + 'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon', + 'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic', + 'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large', + 'left-side', 'leftwards', 'left', 'level', 'lighter', 'line-through', 'list-item', + 'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr', + 'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace', + 'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote', + 'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once', + 'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px', + 'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side', + 'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize', + 'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent', + 'slower', 'slow', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid', + 'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize', + 'table-caption', 'table-cell', 'table-column', 'table-column-group', + 'table-footer-group', 'table-header-group', 'table-row', + 'table-row-group', 'text-bottom', 'text-top', 'text', 'thick', 'thin', + 'transparent', 'ultra-condensed', 'ultra-expanded', 'underline', + 'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url', + 'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud', + 'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'), + Keyword), + (words(( + 'indigo', 'gold', 'firebrick', 'indianred', 'yellow', 'darkolivegreen', + 'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse', + 'mediumslateblue', 'black', 'springgreen', 'crimson', 'lightsalmon', 'brown', + 'turquoise', 'olivedrab', 'cyan', 'silver', 'skyblue', 'gray', 'darkturquoise', + 'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', 'teal', + 'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle', + 'violet', 'navy', 'orchid', 'blue', 'ghostwhite', 'honeydew', 'cornflowerblue', + 'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'red', 'bisque', 'slategray', + 'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue', + 'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'purple', 'lightgrey', + 'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace', + 'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna', + 'lightcoral', 'orangered', 'navajowhite', 'lime', 'palegreen', 'burlywood', + 'seashell', 'mediumspringgreen', 'fuchsia', 'papayawhip', 'blanchedalmond', + 'peru', 'aquamarine', 'white', 'darkslategray', 'ivory', 'dodgerblue', + 'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', 'olive', + 'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin', + 'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink', + 'plum', 'aqua', 'darkgoldenrod', 'maroon', 'sandybrown', 'magenta', 'tan', + 'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen', + 'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue', + 'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon', + 'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink', + 'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', 'green', + 'blueviolet', 'peachpuff'), suffix=r'\b'), + Name.Builtin), + (r'\!important', Comment.Preproc), + (r'/\*(?:.|\n)*?\*/', Comment), + (r'\#[a-zA-Z0-9]{1,6}', Number), + (r'[.-]?[0-9]*[.]?[0-9]+(em|px|pt|pc|in|mm|cm|ex|s)\b', Number), + # Separate regex for percentages, as can't do word boundaries with % + (r'[.-]?[0-9]*[.]?[0-9]+%', Number), + (r'-?[0-9]+', Number), + (r'[~^*!%&<>|+=@:,./?-]+', Operator), + (r'[\[\]();]+', Punctuation), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + (r'[a-zA-Z_]\w*', Name) + ] + } + + +common_sass_tokens = { + 'value': [ + (r'[ \t]+', Text), + (r'[!$][\w-]+', Name.Variable), + (r'url\(', String.Other, 'string-url'), + (r'[a-z_-][\w-]*(?=\()', Name.Function), + (words(( + 'azimuth', 'background-attachment', 'background-color', + 'background-image', 'background-position', 'background-repeat', + 'background', 'border-bottom-color', 'border-bottom-style', + 'border-bottom-width', 'border-left-color', 'border-left-style', + 'border-left-width', 'border-right', 'border-right-color', + 'border-right-style', 'border-right-width', 'border-top-color', + 'border-top-style', 'border-top-width', 'border-bottom', + 'border-collapse', 'border-left', 'border-width', 'border-color', + 'border-spacing', 'border-style', 'border-top', 'border', 'caption-side', + 'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset', + 'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display', + 'elevation', 'empty-cells', 'float', 'font-family', 'font-size', + 'font-size-adjust', 'font-stretch', 'font-style', 'font-variant', + 'font-weight', 'font', 'height', 'letter-spacing', 'line-height', + 'list-style-type', 'list-style-image', 'list-style-position', + 'list-style', 'margin-bottom', 'margin-left', 'margin-right', + 'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width', + 'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color', + 'outline-style', 'outline-width', 'overflow', 'padding-bottom', + 'padding-left', 'padding-right', 'padding-top', 'padding', 'page', + 'page-break-after', 'page-break-before', 'page-break-inside', + 'pause-after', 'pause-before', 'pause', 'pitch', 'pitch-range', + 'play-during', 'position', 'quotes', 'richness', 'right', 'size', + 'speak-header', 'speak-numeral', 'speak-punctuation', 'speak', + 'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration', + 'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi', + 'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space', + 'widows', 'width', 'word-spacing', 'z-index', 'bottom', 'left', + 'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline', + 'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both', + 'capitalize', 'center-left', 'center-right', 'center', 'circle', + 'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous', + 'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero', + 'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed', + 'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left', + 'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help', + 'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon', + 'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic', + 'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large', + 'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item', + 'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr', + 'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace', + 'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote', + 'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once', + 'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px', + 'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side', + 'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize', + 'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent', + 'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid', + 'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize', + 'table-caption', 'table-cell', 'table-column', 'table-column-group', + 'table-footer-group', 'table-header-group', 'table-row', + 'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin', + 'transparent', 'ultra-condensed', 'ultra-expanded', 'underline', + 'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url', + 'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud', + 'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'), + Name.Constant), + (words(( + 'indigo', 'gold', 'firebrick', 'indianred', 'darkolivegreen', + 'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse', + 'mediumslateblue', 'springgreen', 'crimson', 'lightsalmon', 'brown', + 'turquoise', 'olivedrab', 'cyan', 'skyblue', 'darkturquoise', + 'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', + 'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle', + 'violet', 'orchid', 'ghostwhite', 'honeydew', 'cornflowerblue', + 'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'bisque', 'slategray', + 'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue', + 'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'lightgrey', + 'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace', + 'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna', + 'lightcoral', 'orangered', 'navajowhite', 'palegreen', 'burlywood', + 'seashell', 'mediumspringgreen', 'papayawhip', 'blanchedalmond', + 'peru', 'aquamarine', 'darkslategray', 'ivory', 'dodgerblue', + 'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', + 'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin', + 'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink', + 'plum', 'darkgoldenrod', 'sandybrown', 'magenta', 'tan', + 'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen', + 'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue', + 'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon', + 'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink', + 'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', + 'blueviolet', 'peachpuff'), suffix=r'\b'), + Name.Entity), + (words(( + 'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', + 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'), + Name.Builtin), + (r'\!(important|default)', Name.Exception), + (r'(true|false)', Name.Pseudo), + (r'(and|or|not)', Operator.Word), + (r'/\*', Comment.Multiline, 'inline-comment'), + (r'//[^\n]*', Comment.Single), + (r'\#[a-z0-9]{1,6}', Number.Hex), + (r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)), + (r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)), + (r'#\{', String.Interpol, 'interpolation'), + (r'[~^*!&%<>|+=@:,./?-]+', Operator), + (r'[\[\]()]+', Punctuation), + (r'"', String.Double, 'string-double'), + (r"'", String.Single, 'string-single'), + (r'[a-z_-][\w-]*', Name), + ], + + 'interpolation': [ + (r'\}', String.Interpol, '#pop'), + include('value'), + ], + + 'selector': [ + (r'[ \t]+', Text), + (r'\:', Name.Decorator, 'pseudo-class'), + (r'\.', Name.Class, 'class'), + (r'\#', Name.Namespace, 'id'), + (r'[\w-]+', Name.Tag), + (r'#\{', String.Interpol, 'interpolation'), + (r'&', Keyword), + (r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator), + (r'"', String.Double, 'string-double'), + (r"'", String.Single, 'string-single'), + ], + + 'string-double': [ + (r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double), + (r'#\{', String.Interpol, 'interpolation'), + (r'"', String.Double, '#pop'), + ], + + 'string-single': [ + (r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double), + (r'#\{', String.Interpol, 'interpolation'), + (r"'", String.Double, '#pop'), + ], + + 'string-url': [ + (r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other), + (r'#\{', String.Interpol, 'interpolation'), + (r'\)', String.Other, '#pop'), + ], + + 'pseudo-class': [ + (r'[\w-]+', Name.Decorator), + (r'#\{', String.Interpol, 'interpolation'), + default('#pop'), + ], + + 'class': [ + (r'[\w-]+', Name.Class), + (r'#\{', String.Interpol, 'interpolation'), + default('#pop'), + ], + + 'id': [ + (r'[\w-]+', Name.Namespace), + (r'#\{', String.Interpol, 'interpolation'), + default('#pop'), + ], + + 'for': [ + (r'(from|to|through)', Operator.Word), + include('value'), + ], +} + + +def _indentation(lexer, match, ctx): + indentation = match.group(0) + yield match.start(), Text, indentation + ctx.last_indentation = indentation + ctx.pos = match.end() + + if hasattr(ctx, 'block_state') and ctx.block_state and \ + indentation.startswith(ctx.block_indentation) and \ + indentation != ctx.block_indentation: + ctx.stack.append(ctx.block_state) + else: + ctx.block_state = None + ctx.block_indentation = None + ctx.stack.append('content') + + +def _starts_block(token, state): + def callback(lexer, match, ctx): + yield match.start(), token, match.group(0) + + if hasattr(ctx, 'last_indentation'): + ctx.block_indentation = ctx.last_indentation + else: + ctx.block_indentation = '' + + ctx.block_state = state + ctx.pos = match.end() + + return callback + + +class SassLexer(ExtendedRegexLexer): + """ + For Sass stylesheets. + + .. versionadded:: 1.3 + """ + + name = 'Sass' + aliases = ['sass'] + filenames = ['*.sass'] + mimetypes = ['text/x-sass'] + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + (r'[ \t]*\n', Text), + (r'[ \t]*', _indentation), + ], + + 'content': [ + (r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'), + 'root'), + (r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'), + 'root'), + (r'@import', Keyword, 'import'), + (r'@for', Keyword, 'for'), + (r'@(debug|warn|if|while)', Keyword, 'value'), + (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), + (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), + (r'@extend', Keyword, 'selector'), + (r'@[\w-]+', Keyword, 'selector'), + (r'=[\w-]+', Name.Function, 'value'), + (r'\+[\w-]+', Name.Decorator, 'value'), + (r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))', + bygroups(Name.Variable, Operator), 'value'), + (r':', Name.Attribute, 'old-style-attr'), + (r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'), + default('selector'), + ], + + 'single-comment': [ + (r'.+', Comment.Single), + (r'\n', Text, 'root'), + ], + + 'multi-comment': [ + (r'.+', Comment.Multiline), + (r'\n', Text, 'root'), + ], + + 'import': [ + (r'[ \t]+', Text), + (r'\S+', String), + (r'\n', Text, 'root'), + ], + + 'old-style-attr': [ + (r'[^\s:="\[]+', Name.Attribute), + (r'#\{', String.Interpol, 'interpolation'), + (r'[ \t]*=', Operator, 'value'), + default('value'), + ], + + 'new-style-attr': [ + (r'[^\s:="\[]+', Name.Attribute), + (r'#\{', String.Interpol, 'interpolation'), + (r'[ \t]*[=:]', Operator, 'value'), + ], + + 'inline-comment': [ + (r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline), + (r'#\{', String.Interpol, 'interpolation'), + (r"\*/", Comment, '#pop'), + ], + } + for group, common in iteritems(common_sass_tokens): + tokens[group] = copy.copy(common) + tokens['value'].append((r'\n', Text, 'root')) + tokens['selector'].append((r'\n', Text, 'root')) + + +class ScssLexer(RegexLexer): + """ + For SCSS stylesheets. + """ + + name = 'SCSS' + aliases = ['scss'] + filenames = ['*.scss'] + mimetypes = ['text/x-scss'] + + flags = re.IGNORECASE | re.DOTALL + tokens = { + 'root': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'@import', Keyword, 'value'), + (r'@for', Keyword, 'for'), + (r'@(debug|warn|if|while)', Keyword, 'value'), + (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), + (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), + (r'@extend', Keyword, 'selector'), + (r'(@media)(\s+)', bygroups(Keyword, Text), 'value'), + (r'@[\w-]+', Keyword, 'selector'), + (r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'), + (r'(?=[^;{}][;}])', Name.Attribute, 'attr'), + (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'), + default('selector'), + ], + + 'attr': [ + (r'[^\s:="\[]+', Name.Attribute), + (r'#\{', String.Interpol, 'interpolation'), + (r'[ \t]*:', Operator, 'value'), + ], + + 'inline-comment': [ + (r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline), + (r'#\{', String.Interpol, 'interpolation'), + (r"\*/", Comment, '#pop'), + ], + } + for group, common in iteritems(common_sass_tokens): + tokens[group] = copy.copy(common) + tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')]) + tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')]) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/d.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/d.py new file mode 100644 index 0000000..efd97be --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/d.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.d + ~~~~~~~~~~~~~~~~~ + + Lexers for D languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['DLexer', 'CrocLexer', 'MiniDLexer'] + + +class DLexer(RegexLexer): + """ + For D source. + + .. versionadded:: 1.2 + """ + name = 'D' + filenames = ['*.d', '*.di'] + aliases = ['d'] + mimetypes = ['text/x-dsrc'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + # (r'\\\n', Text), # line continuations + # Comments + (r'//(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'/\+', Comment.Multiline, 'nested_comment'), + # Keywords + (words(( + 'abstract', 'alias', 'align', 'asm', 'assert', 'auto', 'body', + 'break', 'case', 'cast', 'catch', 'class', 'const', 'continue', + 'debug', 'default', 'delegate', 'delete', 'deprecated', 'do', 'else', + 'enum', 'export', 'extern', 'finally', 'final', 'foreach_reverse', + 'foreach', 'for', 'function', 'goto', 'if', 'immutable', 'import', + 'interface', 'invariant', 'inout', 'in', 'is', 'lazy', 'mixin', + 'module', 'new', 'nothrow', 'out', 'override', 'package', 'pragma', + 'private', 'protected', 'public', 'pure', 'ref', 'return', 'scope', + 'shared', 'static', 'struct', 'super', 'switch', 'synchronized', + 'template', 'this', 'throw', 'try', 'typedef', 'typeid', 'typeof', + 'union', 'unittest', 'version', 'volatile', 'while', 'with', + '__gshared', '__traits', '__vector', '__parameters'), + suffix=r'\b'), + Keyword), + (words(( + 'bool', 'byte', 'cdouble', 'cent', 'cfloat', 'char', 'creal', + 'dchar', 'double', 'float', 'idouble', 'ifloat', 'int', 'ireal', + 'long', 'real', 'short', 'ubyte', 'ucent', 'uint', 'ulong', + 'ushort', 'void', 'wchar'), suffix=r'\b'), + Keyword.Type), + (r'(false|true|null)\b', Keyword.Constant), + (words(( + '__FILE__', '__MODULE__', '__LINE__', '__FUNCTION__', '__PRETTY_FUNCTION__' + '', '__DATE__', '__EOF__', '__TIME__', '__TIMESTAMP__', '__VENDOR__', + '__VERSION__'), suffix=r'\b'), + Keyword.Pseudo), + (r'macro\b', Keyword.Reserved), + (r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin), + # FloatLiteral + # -- HexFloat + (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' + r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float), + # -- DecimalFloat + (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float), + (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float), + # IntegerLiteral + # -- Binary + (r'0[Bb][01_]+', Number.Bin), + # -- Octal + (r'0[0-7_]+', Number.Oct), + # -- Hexadecimal + (r'0[xX][0-9a-fA-F_]+', Number.Hex), + # -- Decimal + (r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer), + # CharacterLiteral + (r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""", + String.Char), + # StringLiteral + # -- WysiwygString + (r'r"[^"]*"[cwd]?', String), + # -- AlternateWysiwygString + (r'`[^`]*`[cwd]?', String), + # -- DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"[cwd]?', String), + # -- EscapeSequence + (r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}" + r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)", + String), + # -- HexString + (r'x"[0-9a-fA-F_\s]*"[cwd]?', String), + # -- DelimitedString + (r'q"\[', String, 'delimited_bracket'), + (r'q"\(', String, 'delimited_parenthesis'), + (r'q"<', String, 'delimited_angle'), + (r'q"\{', String, 'delimited_curly'), + (r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String), + (r'q"(.).*?\1"', String), + # -- TokenString + (r'q\{', String, 'token_string'), + # Attributes + (r'@([a-zA-Z_]\w*)?', Name.Decorator), + # Tokens + (r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>=' + r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)' + r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation), + # Identifier + (r'[a-zA-Z_]\w*', Name), + # Line + (r'#line\s.*\n', Comment.Special), + ], + 'nested_comment': [ + (r'[^+/]+', Comment.Multiline), + (r'/\+', Comment.Multiline, '#push'), + (r'\+/', Comment.Multiline, '#pop'), + (r'[+/]', Comment.Multiline), + ], + 'token_string': [ + (r'\{', Punctuation, 'token_string_nest'), + (r'\}', String, '#pop'), + include('root'), + ], + 'token_string_nest': [ + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + include('root'), + ], + 'delimited_bracket': [ + (r'[^\[\]]+', String), + (r'\[', String, 'delimited_inside_bracket'), + (r'\]"', String, '#pop'), + ], + 'delimited_inside_bracket': [ + (r'[^\[\]]+', String), + (r'\[', String, '#push'), + (r'\]', String, '#pop'), + ], + 'delimited_parenthesis': [ + (r'[^()]+', String), + (r'\(', String, 'delimited_inside_parenthesis'), + (r'\)"', String, '#pop'), + ], + 'delimited_inside_parenthesis': [ + (r'[^()]+', String), + (r'\(', String, '#push'), + (r'\)', String, '#pop'), + ], + 'delimited_angle': [ + (r'[^<>]+', String), + (r'<', String, 'delimited_inside_angle'), + (r'>"', String, '#pop'), + ], + 'delimited_inside_angle': [ + (r'[^<>]+', String), + (r'<', String, '#push'), + (r'>', String, '#pop'), + ], + 'delimited_curly': [ + (r'[^{}]+', String), + (r'\{', String, 'delimited_inside_curly'), + (r'\}"', String, '#pop'), + ], + 'delimited_inside_curly': [ + (r'[^{}]+', String), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + } + + +class CrocLexer(RegexLexer): + """ + For `Croc `_ source. + """ + name = 'Croc' + filenames = ['*.croc'] + aliases = ['croc'] + mimetypes = ['text/x-crocsrc'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + # Comments + (r'//(.*?)\n', Comment.Single), + (r'/\*', Comment.Multiline, 'nestedcomment'), + # Keywords + (words(( + 'as', 'assert', 'break', 'case', 'catch', 'class', 'continue', + 'default', 'do', 'else', 'finally', 'for', 'foreach', 'function', + 'global', 'namespace', 'if', 'import', 'in', 'is', 'local', + 'module', 'return', 'scope', 'super', 'switch', 'this', 'throw', + 'try', 'vararg', 'while', 'with', 'yield'), suffix=r'\b'), + Keyword), + (r'(false|true|null)\b', Keyword.Constant), + # FloatLiteral + (r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?', + Number.Float), + # IntegerLiteral + # -- Binary + (r'0[bB][01][01_]*', Number.Bin), + # -- Hexadecimal + (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), + # -- Decimal + (r'([0-9][0-9_]*)(?![.eE])', Number.Integer), + # CharacterLiteral + (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", + String.Char), + # StringLiteral + # -- WysiwygString + (r'@"(""|[^"])*"', String), + (r'@`(``|[^`])*`', String), + (r"@'(''|[^'])*'", String), + # -- DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"', String), + # Tokens + (r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>' + r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)' + r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation), + # Identifier + (r'[a-zA-Z_]\w*', Name), + ], + 'nestedcomment': [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + } + + +class MiniDLexer(CrocLexer): + """ + For MiniD source. MiniD is now known as Croc. + """ + name = 'MiniD' + filenames = [] # don't lex .md as MiniD, reserve for Markdown + aliases = ['minid'] + mimetypes = ['text/x-minidsrc'] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/dalvik.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dalvik.py similarity index 65% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/dalvik.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dalvik.py index de9b11f..5f5c6c7 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/dalvik.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dalvik.py @@ -5,13 +5,15 @@ Pygments lexers for Dalvik VM-related languages. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ +import re + from pygments.lexer import RegexLexer, include, bygroups from pygments.token import Keyword, Text, Comment, Name, String, Number, \ - Punctuation + Punctuation __all__ = ['SmaliLexer'] @@ -21,7 +23,7 @@ class SmaliLexer(RegexLexer): For `Smali `_ (Android/Dalvik) assembly code. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'Smali' aliases = ['smali'] @@ -63,8 +65,8 @@ class SmaliLexer(RegexLexer): (r'\s+', Text), ], 'instruction': [ - (r'\b[vp]\d+\b', Name.Builtin), # registers - (r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions + (r'\b[vp]\d+\b', Name.Builtin), # registers + (r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions ], 'literal': [ (r'".*"', String), @@ -73,27 +75,27 @@ class SmaliLexer(RegexLexer): (r'[0-9]+L?', Number.Integer), ], 'field': [ - (r'(\$?\b)([A-Za-z0-9_$]*)(:)', + (r'(\$?\b)([\w$]*)(:)', bygroups(Punctuation, Name.Variable, Punctuation)), ], 'method': [ - (r'<(?:cl)?init>', Name.Function), # constructor - (r'(\$?\b)([A-Za-z0-9_$]*)(\()', + (r'<(?:cl)?init>', Name.Function), # constructor + (r'(\$?\b)([\w$]*)(\()', bygroups(Punctuation, Name.Function, Punctuation)), ], 'label': [ - (r':[A-Za-z0-9_]+', Name.Label), + (r':\w+', Name.Label), ], 'class': [ # class names in the form Lcom/namespace/ClassName; # I only want to color the ClassName part, so the namespace part is # treated as 'Text' - (r'(L)((?:[A-Za-z0-9_$]+/)*)([A-Za-z0-9_$]+)(;)', + (r'(L)((?:[\w$]+/)*)([\w$]+)(;)', bygroups(Keyword.Type, Text, Name.Class, Text)), ], 'punctuation': [ (r'->', Punctuation), - (r'[{},\(\):=\.-]', Punctuation), + (r'[{},():=.-]', Punctuation), ], 'type': [ (r'[ZBSCIJFDV\[]+', Keyword.Type), @@ -102,3 +104,22 @@ class SmaliLexer(RegexLexer): (r'#.*?\n', Comment), ], } + + def analyse_text(text): + score = 0 + if re.search(r'^\s*\.class\s', text, re.MULTILINE): + score += 0.5 + if re.search(r'\b((check-cast|instance-of|throw-verification-error' + r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|' + r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|' + r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE): + score += 0.3 + if re.search(r'(\.(catchall|epilogue|restart local|prologue)|' + r'\b(array-data|class-change-error|declared-synchronized|' + r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|' + r'illegal-class-access|illegal-field-access|' + r'illegal-method-access|instantiation-error|no-error|' + r'no-such-class|no-such-field|no-such-method|' + r'packed-switch|sparse-switch))\b', text, re.MULTILINE): + score += 0.6 + return score diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/data.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/data.py new file mode 100644 index 0000000..f41a51e --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/data.py @@ -0,0 +1,530 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.data + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for data file format. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \ + include, bygroups, inherit +from pygments.token import Text, Comment, Keyword, Name, String, Number, \ + Punctuation, Literal + +__all__ = ['YamlLexer', 'JsonLexer', 'JsonLdLexer'] + + +class YamlLexerContext(LexerContext): + """Indentation context for the YAML lexer.""" + + def __init__(self, *args, **kwds): + super(YamlLexerContext, self).__init__(*args, **kwds) + self.indent_stack = [] + self.indent = -1 + self.next_indent = 0 + self.block_scalar_indent = None + + +class YamlLexer(ExtendedRegexLexer): + """ + Lexer for `YAML `_, a human-friendly data serialization + language. + + .. versionadded:: 0.11 + """ + + name = 'YAML' + aliases = ['yaml'] + filenames = ['*.yaml', '*.yml'] + mimetypes = ['text/x-yaml'] + + def something(token_class): + """Do not produce empty tokens.""" + def callback(lexer, match, context): + text = match.group() + if not text: + return + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def reset_indent(token_class): + """Reset the indentation levels.""" + def callback(lexer, match, context): + text = match.group() + context.indent_stack = [] + context.indent = -1 + context.next_indent = 0 + context.block_scalar_indent = None + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def save_indent(token_class, start=False): + """Save a possible indentation level.""" + def callback(lexer, match, context): + text = match.group() + extra = '' + if start: + context.next_indent = len(text) + if context.next_indent < context.indent: + while context.next_indent < context.indent: + context.indent = context.indent_stack.pop() + if context.next_indent > context.indent: + extra = text[context.indent:] + text = text[:context.indent] + else: + context.next_indent += len(text) + if text: + yield match.start(), token_class, text + if extra: + yield match.start()+len(text), token_class.Error, extra + context.pos = match.end() + return callback + + def set_indent(token_class, implicit=False): + """Set the previously saved indentation level.""" + def callback(lexer, match, context): + text = match.group() + if context.indent < context.next_indent: + context.indent_stack.append(context.indent) + context.indent = context.next_indent + if not implicit: + context.next_indent += len(text) + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def set_block_scalar_indent(token_class): + """Set an explicit indentation level for a block scalar.""" + def callback(lexer, match, context): + text = match.group() + context.block_scalar_indent = None + if not text: + return + increment = match.group(1) + if increment: + current_indent = max(context.indent, 0) + increment = int(increment) + context.block_scalar_indent = current_indent + increment + if text: + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def parse_block_scalar_empty_line(indent_token_class, content_token_class): + """Process an empty line in a block scalar.""" + def callback(lexer, match, context): + text = match.group() + if (context.block_scalar_indent is None or + len(text) <= context.block_scalar_indent): + if text: + yield match.start(), indent_token_class, text + else: + indentation = text[:context.block_scalar_indent] + content = text[context.block_scalar_indent:] + yield match.start(), indent_token_class, indentation + yield (match.start()+context.block_scalar_indent, + content_token_class, content) + context.pos = match.end() + return callback + + def parse_block_scalar_indent(token_class): + """Process indentation spaces in a block scalar.""" + def callback(lexer, match, context): + text = match.group() + if context.block_scalar_indent is None: + if len(text) <= max(context.indent, 0): + context.stack.pop() + context.stack.pop() + return + context.block_scalar_indent = len(text) + else: + if len(text) < context.block_scalar_indent: + context.stack.pop() + context.stack.pop() + return + if text: + yield match.start(), token_class, text + context.pos = match.end() + return callback + + def parse_plain_scalar_indent(token_class): + """Process indentation spaces in a plain scalar.""" + def callback(lexer, match, context): + text = match.group() + if len(text) <= context.indent: + context.stack.pop() + context.stack.pop() + return + if text: + yield match.start(), token_class, text + context.pos = match.end() + return callback + + tokens = { + # the root rules + 'root': [ + # ignored whitespaces + (r'[ ]+(?=#|$)', Text), + # line breaks + (r'\n+', Text), + # a comment + (r'#[^\n]*', Comment.Single), + # the '%YAML' directive + (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'), + # the %TAG directive + (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'), + # document start and document end indicators + (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace), + 'block-line'), + # indentation spaces + (r'[ ]*(?!\s|$)', save_indent(Text, start=True), + ('block-line', 'indentation')), + ], + + # trailing whitespaces after directives or a block scalar indicator + 'ignored-line': [ + # ignored whitespaces + (r'[ ]+(?=#|$)', Text), + # a comment + (r'#[^\n]*', Comment.Single), + # line break + (r'\n', Text, '#pop:2'), + ], + + # the %YAML directive + 'yaml-directive': [ + # the version number + (r'([ ]+)([0-9]+\.[0-9]+)', + bygroups(Text, Number), 'ignored-line'), + ], + + # the %YAG directive + 'tag-directive': [ + # a tag handle and the corresponding prefix + (r'([ ]+)(!|![\w-]*!)' + r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)', + bygroups(Text, Keyword.Type, Text, Keyword.Type), + 'ignored-line'), + ], + + # block scalar indicators and indentation spaces + 'indentation': [ + # trailing whitespaces are ignored + (r'[ ]*$', something(Text), '#pop:2'), + # whitespaces preceeding block collection indicators + (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)), + # block collection indicators + (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)), + # the beginning a block line + (r'[ ]*', save_indent(Text), '#pop'), + ], + + # an indented line in the block context + 'block-line': [ + # the line end + (r'[ ]*(?=#|$)', something(Text), '#pop'), + # whitespaces separating tokens + (r'[ ]+', Text), + # tags, anchors and aliases, + include('descriptors'), + # block collections and scalars + include('block-nodes'), + # flow collections and quoted scalars + include('flow-nodes'), + # a plain scalar + (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)', + something(Name.Variable), + 'plain-scalar-in-block-context'), + ], + + # tags, anchors, aliases + 'descriptors': [ + # a full-form tag + (r'!<[\w;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type), + # a tag in the form '!', '!suffix' or '!handle!suffix' + (r'!(?:[\w-]+)?' + r'(?:![\w;/?:@&=+$,.!~*\'()\[\]%-]+)?', Keyword.Type), + # an anchor + (r'&[\w-]+', Name.Label), + # an alias + (r'\*[\w-]+', Name.Variable), + ], + + # block collections and scalars + 'block-nodes': [ + # implicit key + (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)), + # literal and folded scalars + (r'[|>]', Punctuation.Indicator, + ('block-scalar-content', 'block-scalar-header')), + ], + + # flow collections and quoted scalars + 'flow-nodes': [ + # a flow sequence + (r'\[', Punctuation.Indicator, 'flow-sequence'), + # a flow mapping + (r'\{', Punctuation.Indicator, 'flow-mapping'), + # a single-quoted scalar + (r'\'', String, 'single-quoted-scalar'), + # a double-quoted scalar + (r'\"', String, 'double-quoted-scalar'), + ], + + # the content of a flow collection + 'flow-collection': [ + # whitespaces + (r'[ ]+', Text), + # line breaks + (r'\n+', Text), + # a comment + (r'#[^\n]*', Comment.Single), + # simple indicators + (r'[?:,]', Punctuation.Indicator), + # tags, anchors and aliases + include('descriptors'), + # nested collections and quoted scalars + include('flow-nodes'), + # a plain scalar + (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])', + something(Name.Variable), + 'plain-scalar-in-flow-context'), + ], + + # a flow sequence indicated by '[' and ']' + 'flow-sequence': [ + # include flow collection rules + include('flow-collection'), + # the closing indicator + (r'\]', Punctuation.Indicator, '#pop'), + ], + + # a flow mapping indicated by '{' and '}' + 'flow-mapping': [ + # include flow collection rules + include('flow-collection'), + # the closing indicator + (r'\}', Punctuation.Indicator, '#pop'), + ], + + # block scalar lines + 'block-scalar-content': [ + # line break + (r'\n', Text), + # empty line + (r'^[ ]+$', + parse_block_scalar_empty_line(Text, Name.Constant)), + # indentation spaces (we may leave the state here) + (r'^[ ]*', parse_block_scalar_indent(Text)), + # line content + (r'[\S\t ]+', Name.Constant), + ], + + # the content of a literal or folded scalar + 'block-scalar-header': [ + # indentation indicator followed by chomping flag + (r'([1-9])?[+-]?(?=[ ]|$)', + set_block_scalar_indent(Punctuation.Indicator), + 'ignored-line'), + # chomping flag followed by indentation indicator + (r'[+-]?([1-9])?(?=[ ]|$)', + set_block_scalar_indent(Punctuation.Indicator), + 'ignored-line'), + ], + + # ignored and regular whitespaces in quoted scalars + 'quoted-scalar-whitespaces': [ + # leading and trailing whitespaces are ignored + (r'^[ ]+', Text), + (r'[ ]+$', Text), + # line breaks are ignored + (r'\n+', Text), + # other whitespaces are a part of the value + (r'[ ]+', Name.Variable), + ], + + # single-quoted scalars + 'single-quoted-scalar': [ + # include whitespace and line break rules + include('quoted-scalar-whitespaces'), + # escaping of the quote character + (r'\'\'', String.Escape), + # regular non-whitespace characters + (r'[^\s\']+', String), + # the closing quote + (r'\'', String, '#pop'), + ], + + # double-quoted scalars + 'double-quoted-scalar': [ + # include whitespace and line break rules + include('quoted-scalar-whitespaces'), + # escaping of special characters + (r'\\[0abt\tn\nvfre "\\N_LP]', String), + # escape codes + (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})', + String.Escape), + # regular non-whitespace characters + (r'[^\s"\\]+', String), + # the closing quote + (r'"', String, '#pop'), + ], + + # the beginning of a new line while scanning a plain scalar + 'plain-scalar-in-block-context-new-line': [ + # empty lines + (r'^[ ]+$', Text), + # line breaks + (r'\n+', Text), + # document start and document end indicators + (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'), + # indentation spaces (we may leave the block line state here) + (r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'), + ], + + # a plain scalar in the block context + 'plain-scalar-in-block-context': [ + # the scalar ends with the ':' indicator + (r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'), + # the scalar ends with whitespaces followed by a comment + (r'[ ]+(?=#)', Text, '#pop'), + # trailing whitespaces are ignored + (r'[ ]+$', Text), + # line breaks are ignored + (r'\n+', Text, 'plain-scalar-in-block-context-new-line'), + # other whitespaces are a part of the value + (r'[ ]+', Literal.Scalar.Plain), + # regular non-whitespace characters + (r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain), + ], + + # a plain scalar is the flow context + 'plain-scalar-in-flow-context': [ + # the scalar ends with an indicator character + (r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'), + # the scalar ends with a comment + (r'[ ]+(?=#)', Text, '#pop'), + # leading and trailing whitespaces are ignored + (r'^[ ]+', Text), + (r'[ ]+$', Text), + # line breaks are ignored + (r'\n+', Text), + # other whitespaces are a part of the value + (r'[ ]+', Name.Variable), + # regular non-whitespace characters + (r'[^\s,:?\[\]{}]+', Name.Variable), + ], + + } + + def get_tokens_unprocessed(self, text=None, context=None): + if context is None: + context = YamlLexerContext(text, 0) + return super(YamlLexer, self).get_tokens_unprocessed(text, context) + + +class JsonLexer(RegexLexer): + """ + For JSON data structures. + + .. versionadded:: 1.5 + """ + + name = 'JSON' + aliases = ['json'] + filenames = ['*.json'] + mimetypes = ['application/json'] + + flags = re.DOTALL + + # integer part of a number + int_part = r'-?(0|[1-9]\d*)' + + # fractional part of a number + frac_part = r'\.\d+' + + # exponential part of a number + exp_part = r'[eE](\+|-)?\d+' + + tokens = { + 'whitespace': [ + (r'\s+', Text), + ], + + # represents a simple terminal value + 'simplevalue': [ + (r'(true|false|null)\b', Keyword.Constant), + (('%(int_part)s(%(frac_part)s%(exp_part)s|' + '%(exp_part)s|%(frac_part)s)') % vars(), + Number.Float), + (int_part, Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String.Double), + ], + + + # the right hand side of an object, after the attribute name + 'objectattribute': [ + include('value'), + (r':', Punctuation), + # comma terminates the attribute but expects more + (r',', Punctuation, '#pop'), + # a closing bracket terminates the entire object, so pop twice + (r'\}', Punctuation, ('#pop', '#pop')), + ], + + # a json object - { attr, attr, ... } + 'objectvalue': [ + include('whitespace'), + (r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'), + (r'\}', Punctuation, '#pop'), + ], + + # json array - [ value, value, ... } + 'arrayvalue': [ + include('whitespace'), + include('value'), + (r',', Punctuation), + (r'\]', Punctuation, '#pop'), + ], + + # a json value - either a simple value or a complex value (object or array) + 'value': [ + include('whitespace'), + include('simplevalue'), + (r'\{', Punctuation, 'objectvalue'), + (r'\[', Punctuation, 'arrayvalue'), + ], + + # the root of a json document whould be a value + 'root': [ + include('value'), + ], + } + +class JsonLdLexer(JsonLexer): + """ + For `JSON-LD `_ linked data. + + .. versionadded:: 2.0 + """ + + name = 'JSON-LD' + aliases = ['jsonld', 'json-ld'] + filenames = ['*.jsonld'] + mimetypes = ['application/ld+json'] + + tokens = { + 'objectvalue': [ + (r'"@(context|id|value|language|type|container|list|set|' + r'reverse|index|base|vocab|graph)"', Name.Decorator, + 'objectattribute'), + inherit, + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/diff.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/diff.py new file mode 100644 index 0000000..a910a97 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/diff.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.diff + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for diff/patch formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \ + Literal + +__all__ = ['DiffLexer', 'DarcsPatchLexer'] + + +class DiffLexer(RegexLexer): + """ + Lexer for unified or context-style diffs or patches. + """ + + name = 'Diff' + aliases = ['diff', 'udiff'] + filenames = ['*.diff', '*.patch'] + mimetypes = ['text/x-diff', 'text/x-patch'] + + tokens = { + 'root': [ + (r' .*\n', Text), + (r'\+.*\n', Generic.Inserted), + (r'-.*\n', Generic.Deleted), + (r'!.*\n', Generic.Strong), + (r'@.*\n', Generic.Subheading), + (r'([Ii]ndex|diff).*\n', Generic.Heading), + (r'=.*\n', Generic.Heading), + (r'.*\n', Text), + ] + } + + def analyse_text(text): + if text[:7] == 'Index: ': + return True + if text[:5] == 'diff ': + return True + if text[:4] == '--- ': + return 0.9 + + +class DarcsPatchLexer(RegexLexer): + """ + DarcsPatchLexer is a lexer for the various versions of the darcs patch + format. Examples of this format are derived by commands such as + ``darcs annotate --patch`` and ``darcs send``. + + .. versionadded:: 0.10 + """ + + name = 'Darcs Patch' + aliases = ['dpatch'] + filenames = ['*.dpatch', '*.darcspatch'] + + DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move', + 'replace') + + tokens = { + 'root': [ + (r'<', Operator), + (r'>', Operator), + (r'\{', Operator), + (r'\}', Operator), + (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])', + bygroups(Operator, Keyword, Name, Text, Name, Operator, + Literal.Date, Text, Operator)), + (r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)', + bygroups(Operator, Keyword, Name, Text, Name, Operator, + Literal.Date, Text), 'comment'), + (r'New patches:', Generic.Heading), + (r'Context:', Generic.Heading), + (r'Patch bundle hash:', Generic.Heading), + (r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS), + bygroups(Text, Keyword, Text)), + (r'\+', Generic.Inserted, "insert"), + (r'-', Generic.Deleted, "delete"), + (r'.*\n', Text), + ], + 'comment': [ + (r'[^\]].*\n', Comment), + (r'\]', Operator, "#pop"), + ], + 'specialText': [ # darcs add [_CODE_] special operators for clarity + (r'\n', Text, "#pop"), # line-based + (r'\[_[^_]*_]', Operator), + ], + 'insert': [ + include('specialText'), + (r'\[', Generic.Inserted), + (r'[^\n\[]+', Generic.Inserted), + ], + 'delete': [ + include('specialText'), + (r'\[', Generic.Deleted), + (r'[^\n\[]+', Generic.Deleted), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/dotnet.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dotnet.py similarity index 80% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/dotnet.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dotnet.py index a603086..39c03d4 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/dotnet.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dotnet.py @@ -5,19 +5,19 @@ Lexers for .net languages. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \ - using, this + using, this, default from pygments.token import Punctuation, \ - Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other -from pygments.util import get_choice_opt + Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other +from pygments.util import get_choice_opt, iteritems from pygments import unistring as uni -from pygments.lexers.web import XmlLexer +from pygments.lexers.html import XmlLexer __all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer', 'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer'] @@ -44,24 +44,24 @@ class CSharpLexer(RegexLexer): The default value is ``basic``. - *New in Pygments 0.8.* + .. versionadded:: 0.8 """ name = 'C#' aliases = ['csharp', 'c#'] filenames = ['*.cs'] - mimetypes = ['text/x-csharp'] # inferred + mimetypes = ['text/x-csharp'] # inferred flags = re.MULTILINE | re.DOTALL | re.UNICODE - # for the range of allowed unicode characters in identifiers, - # see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf + # for the range of allowed unicode characters in identifiers, see + # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf levels = { - 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*', - 'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' + - '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + - uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'), + 'none': '@?[_a-zA-Z]\w*', + 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', + 'Cf', 'Mn', 'Mc') + ']*'), 'full': ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', @@ -71,17 +71,17 @@ class CSharpLexer(RegexLexer): tokens = {} token_variants = True - for levelname, cs_ident in levels.items(): + for levelname, cs_ident in iteritems(levels): tokens[levelname] = { 'root': [ # method names - (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type - r'(' + cs_ident + ')' # method name + (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type + r'(' + cs_ident + ')' # method name r'(\s*)(\()', # signature start bygroups(using(this), Name.Function, Text, Punctuation)), (r'^\s*\[.*?\]', Name.Attribute), (r'[^\S\n]+', Text), - (r'\\\n', Text), # line continuation + (r'\\\n', Text), # line continuation (r'//.*?\n', Comment.Single), (r'/[*].*?[*]/', Comment.Multiline), (r'\n', Text), @@ -117,16 +117,17 @@ class CSharpLexer(RegexLexer): (cs_ident, Name), ], 'class': [ - (cs_ident, Name.Class, '#pop') + (cs_ident, Name.Class, '#pop'), + default('#pop'), ], 'namespace': [ - (r'(?=\()', Text, '#pop'), # using (resource) - ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop') + (r'(?=\()', Text, '#pop'), # using (resource) + ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'), ] } def __init__(self, **options): - level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic') + level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic') if level not in self._all_tokens: # compile the regexes now self._tokens = self.__class__.process_tokendef(level) @@ -156,44 +157,44 @@ class NemerleLexer(RegexLexer): The default value is ``basic``. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'Nemerle' aliases = ['nemerle'] filenames = ['*.n'] - mimetypes = ['text/x-nemerle'] # inferred + mimetypes = ['text/x-nemerle'] # inferred flags = re.MULTILINE | re.DOTALL | re.UNICODE # for the range of allowed unicode characters in identifiers, see # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf - levels = dict( - none = '@?[_a-zA-Z][a-zA-Z0-9_]*', - basic = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' + - '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + - uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'), - full = ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', - 'Nl') + '])' - + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', - 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), - ) + levels = { + 'none': '@?[_a-zA-Z]\w*', + 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', + 'Cf', 'Mn', 'Mc') + ']*'), + 'full': ('@?(?:_|[^' + + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', + 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), + } tokens = {} token_variants = True - for levelname, cs_ident in levels.items(): + for levelname, cs_ident in iteritems(levels): tokens[levelname] = { 'root': [ # method names - (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type - r'(' + cs_ident + ')' # method name + (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type + r'(' + cs_ident + ')' # method name r'(\s*)(\()', # signature start bygroups(using(this), Name.Function, Text, Punctuation)), (r'^\s*\[.*?\]', Name.Attribute), (r'[^\S\n]+', Text), - (r'\\\n', Text), # line continuation + (r'\\\n', Text), # line continuation (r'//.*?\n', Comment.Single), (r'/[*].*?[*]/', Comment.Multiline), (r'\n', Text), @@ -249,7 +250,7 @@ class NemerleLexer(RegexLexer): (cs_ident, Name.Class, '#pop') ], 'namespace': [ - (r'(?=\()', Text, '#pop'), # using (resource) + (r'(?=\()', Text, '#pop'), # using (resource) ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop') ], 'splice-string': [ @@ -284,7 +285,7 @@ class NemerleLexer(RegexLexer): } def __init__(self, **options): - level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), + level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic') if level not in self._all_tokens: # compile the regexes now @@ -336,9 +337,9 @@ class BooLexer(RegexLexer): (r'"""(\\\\|\\"|.*?)"""', String.Double), (r'"(\\\\|\\"|[^"]*?)"', String.Double), (r"'(\\\\|\\'|[^']*?)'", String.Single), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), + (r'[a-zA-Z_]\w*', Name), (r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float), - (r'[0-9][0-9\.]*(ms?|d|h|s)', Number), + (r'[0-9][0-9.]*(ms?|d|h|s)', Number), (r'0\d+', Number.Oct), (r'0x[a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), @@ -351,13 +352,13 @@ class BooLexer(RegexLexer): ('[*/]', Comment.Multiline) ], 'funcname': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop') + ('[a-zA-Z_]\w*', Name.Function, '#pop') ], 'classname': [ - ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') + ('[a-zA-Z_]\w*', Name.Class, '#pop') ], 'namespace': [ - ('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop') + ('[a-zA-Z_][\w.]*', Name.Namespace, '#pop') ] } @@ -372,7 +373,11 @@ class VbNetLexer(RegexLexer): name = 'VB.net' aliases = ['vb.net', 'vbnet'] filenames = ['*.vb', '*.bas'] - mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?) + mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?) + + uni_name = '[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + \ + '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', + 'Cf', 'Mn', 'Mc') + ']*' flags = re.MULTILINE | re.IGNORECASE tokens = { @@ -382,11 +387,11 @@ class VbNetLexer(RegexLexer): (r'\n', Text), (r'rem\b.*?\n', Comment), (r"'.*?\n", Comment), - (r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#End\s+If|#Const|' + (r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|' r'#ExternalSource.*?\n|#End\s+ExternalSource|' r'#Region.*?\n|#End\s+Region|#ExternalChecksum', Comment.Preproc), - (r'[\(\){}!#,.:]', Punctuation), + (r'[(){}!#,.:]', Punctuation), (r'Option\s+(Strict|Explicit|Compare)\s+' r'(On|Off|Binary|Text)', Keyword.Declaration), (r'(?>=|<<|>>|:=|' - r'<=|>=|<>|[-&*/\\^+=<>]', + r'<=|>=|<>|[-&*/\\^+=<>\[\]]', Operator), ('"', String, 'string'), - ('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name), + (r'_\n', Text), # Line continuation (must be before Name) + (uni_name + '[%&@!#$]?', Name), ('#.*?#', Literal.Date), - (r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float), + (r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float), (r'\d+([SILDFR]|US|UI|UL)?', Number.Integer), (r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer), (r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer), - (r'_\n', Text), # Line continuation ], 'string': [ (r'""', String), @@ -439,26 +444,32 @@ class VbNetLexer(RegexLexer): (r'[^"]+', String), ], 'dim': [ - (r'[a-z_][a-z0-9_]*', Name.Variable, '#pop'), - (r'', Text, '#pop'), # any other syntax + (uni_name, Name.Variable, '#pop'), + default('#pop'), # any other syntax ], 'funcname': [ - (r'[a-z_][a-z0-9_]*', Name.Function, '#pop'), + (uni_name, Name.Function, '#pop'), ], 'classname': [ - (r'[a-z_][a-z0-9_]*', Name.Class, '#pop'), + (uni_name, Name.Class, '#pop'), ], 'namespace': [ - (r'[a-z_][a-z0-9_.]*', Name.Namespace, '#pop'), + (uni_name, Name.Namespace), + (r'\.', Name.Namespace), + default('#pop'), ], 'end': [ (r'\s+', Text), (r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b', Keyword, '#pop'), - (r'', Text, '#pop'), + default('#pop'), ] } + def analyse_text(text): + if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE): + return 0.5 + class GenericAspxLexer(RegexLexer): """ @@ -483,7 +494,7 @@ class GenericAspxLexer(RegexLexer): } -#TODO support multiple languages within the same source file +# TODO support multiple languages within the same source file class CSharpAspxLexer(DelegatingLexer): """ Lexer for highligting C# within ASP.NET pages. @@ -495,7 +506,7 @@ class CSharpAspxLexer(DelegatingLexer): mimetypes = [] def __init__(self, **options): - super(CSharpAspxLexer, self).__init__(CSharpLexer,GenericAspxLexer, + super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer, **options) def analyse_text(text): @@ -516,8 +527,8 @@ class VbNetAspxLexer(DelegatingLexer): mimetypes = [] def __init__(self, **options): - super(VbNetAspxLexer, self).__init__(VbNetLexer,GenericAspxLexer, - **options) + super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer, + **options) def analyse_text(text): if re.search(r'Page\s*Language="Vb"', text, re.I) is not None: @@ -531,7 +542,10 @@ class FSharpLexer(RegexLexer): """ For the F# language (version 3.0). - *New in Pygments 1.5.* + AAAAACK Strings + http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775 + + .. versionadded:: 1.5 """ name = 'FSharp' @@ -540,15 +554,15 @@ class FSharpLexer(RegexLexer): mimetypes = ['text/x-fsharp'] keywords = [ - 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default', - 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else', - 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function', - 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal', - 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable', - 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public', - 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to', - 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when', - 'while', 'with', 'yield!', 'yield', + 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default', + 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else', + 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function', + 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal', + 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable', + 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public', + 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to', + 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when', + 'while', 'with', 'yield!', 'yield', ] # Reserved words; cannot hurt to color them as keywords too. keywords += [ @@ -559,10 +573,10 @@ class FSharpLexer(RegexLexer): 'virtual', 'volatile', ] keyopts = [ - '!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.', - '->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-', - '<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]', - '_', '`', '{', '\|\]', '\|', '}', '~', '<@@', '<@', '=', '@>', '@@>', + '!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.', + '->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-', + '<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]', + '_', '`', '\{', '\|\]', '\|', '\}', '~', '<@@', '<@', '=', '@>', '@@>', ] operators = r'[!$%&*+\./:<=>?@^|~-]' @@ -581,7 +595,7 @@ class FSharpLexer(RegexLexer): tokens = { 'escape-sequence': [ - (r'\\[\\\"\'ntbrafv]', String.Escape), + (r'\\[\\"\'ntbrafv]', String.Escape), (r'\\[0-9]{3}', String.Escape), (r'\\u[0-9a-fA-F]{4}', String.Escape), (r'\\U[0-9a-fA-F]{8}', String.Escape), @@ -589,9 +603,9 @@ class FSharpLexer(RegexLexer): 'root': [ (r'\s+', Text), (r'\(\)|\[\]', Name.Builtin.Pseudo), - (r'\b(?`_ + definition files. + + .. versionadded:: 1.4 + """ + + name = 'Protocol Buffer' + aliases = ['protobuf', 'proto'] + filenames = ['*.proto'] + + tokens = { + 'root': [ + (r'[ \t]+', Text), + (r'[,;{}\[\]()]', Punctuation), + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), + (words(( + 'import', 'option', 'optional', 'required', 'repeated', 'default', + 'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns', + 'oneof'), prefix=r'\b', suffix=r'\b'), + Keyword), + (words(( + 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64', + 'fixed32', 'fixed64', 'sfixed32', 'sfixed64', + 'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'), + Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + (r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'), + (r'(message|extend)(\s+)', + bygroups(Keyword.Declaration, Text), 'message'), + (r'(enum|group|service)(\s+)', + bygroups(Keyword.Declaration, Text), 'type'), + (r'\".*?\"', String), + (r'\'.*?\'', String), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'(\-?(inf|nan))\b', Number.Float), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'0[0-7]+[LlUu]*', Number.Oct), + (r'\d+[LlUu]*', Number.Integer), + (r'[+-=]', Operator), + (r'([a-zA-Z_][\w.]*)([ \t]*)(=)', + bygroups(Name.Attribute, Text, Operator)), + ('[a-zA-Z_][\w.]*', Name), + ], + 'package': [ + (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'), + default('#pop'), + ], + 'message': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + default('#pop'), + ], + 'type': [ + (r'[a-zA-Z_]\w*', Name, '#pop'), + default('#pop'), + ], + } + + +class BroLexer(RegexLexer): + """ + For `Bro `_ scripts. + + .. versionadded:: 1.5 + """ + name = 'Bro' + aliases = ['bro'] + filenames = ['*.bro'] + + _hex = r'[0-9a-fA-F_]' + _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?' + _h = r'[A-Za-z0-9][-A-Za-z0-9]*' + + tokens = { + 'root': [ + # Whitespace + (r'^@.*?\n', Comment.Preproc), + (r'#.*?\n', Comment.Single), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), + # Keywords + (r'(add|alarm|break|case|const|continue|delete|do|else|enum|event' + r'|export|for|function|if|global|hook|local|module|next' + r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword), + (r'(addr|any|bool|count|counter|double|file|int|interval|net' + r'|pattern|port|record|set|string|subnet|table|time|timer' + r'|vector)\b', Keyword.Type), + (r'(T|F)\b', Keyword.Constant), + (r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire' + r'|default|disable_print_hook|raw_output|encrypt|group|log' + r'|mergeable|optional|persistent|priority|redef' + r'|rotate_(?:interval|size)|synchronized)\b', + bygroups(Punctuation, Keyword)), + (r'\s+module\b', Keyword.Namespace), + # Addresses, ports and networks + (r'\d+/(tcp|udp|icmp|unknown)\b', Number), + (r'(\d+\.){3}\d+', Number), + (r'(' + _hex + r'){7}' + _hex, Number), + (r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number), + (r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number), + (r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number), + # Hostnames + (_h + r'(\.' + _h + r')+', String), + # Numeric + (_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date), + (r'0[xX]' + _hex, Number.Hex), + (_float, Number.Float), + (r'\d+', Number.Integer), + (r'/', String.Regex, 'regex'), + (r'"', String, 'string'), + # Operators + (r'[!%*/+:<=>?~|-]', Operator), + (r'([-+=&|]{2}|[+=!><-]=)', Operator), + (r'(in|match)\b', Operator.Word), + (r'[{}()\[\]$.,;]', Punctuation), + # Identfier + (r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)), + (r'[a-zA-Z_]\w*', Name) + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), + (r'\\\n', String), + (r'\\', String) + ], + 'regex': [ + (r'/', String.Regex, '#pop'), + (r'\\[\\nt/]', String.Regex), # String.Escape is too intense here. + (r'[^\\/\n]+', String.Regex), + (r'\\\n', String.Regex), + (r'\\', String.Regex) + ] + } + + +class PuppetLexer(RegexLexer): + """ + For `Puppet `__ configuration DSL. + + .. versionadded:: 1.6 + """ + name = 'Puppet' + aliases = ['puppet'] + filenames = ['*.pp'] + + tokens = { + 'root': [ + include('comments'), + include('keywords'), + include('names'), + include('numbers'), + include('operators'), + include('strings'), + + (r'[]{}:(),;[]', Punctuation), + (r'[^\S\n]+', Text), + ], + + 'comments': [ + (r'\s*#.*$', Comment), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + ], + + 'operators': [ + (r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator), + (r'(in|and|or|not)\b', Operator.Word), + ], + + 'names': [ + ('[a-zA-Z_]\w*', Name.Attribute), + (r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation, + String, Punctuation)), + (r'\$\S+', Name.Variable), + ], + + 'numbers': [ + # Copypasta from the Python lexer + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), + (r'\d+[eE][+-]?[0-9]+j?', Number.Float), + (r'0[0-7]+j?', Number.Oct), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+j?', Number.Integer) + ], + + 'keywords': [ + # Left out 'group' and 'require' + # Since they're often used as attributes + (words(( + 'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case', + 'check', 'class', 'computer', 'configured', 'contained', + 'create_resources', 'crit', 'cron', 'debug', 'default', + 'define', 'defined', 'directory', 'else', 'elsif', 'emerg', + 'err', 'exec', 'extlookup', 'fail', 'false', 'file', + 'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import', + 'include', 'info', 'inherits', 'inline_template', 'installed', + 'interface', 'k5login', 'latest', 'link', 'loglevel', + 'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5', + 'mount', 'mounted', 'nagios_command', 'nagios_contact', + 'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency', + 'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup', + 'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation', + 'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod', + 'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged', + 'realize', 'regsubst', 'resources', 'role', 'router', 'running', + 'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule', + 'service', 'sha1', 'shellquote', 'split', 'sprintf', + 'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe', + 'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted', + 'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone', + 'zpool'), prefix='(?i)', suffix=r'\b'), + Keyword), + ], + + 'strings': [ + (r'"([^"])*"', String), + (r"'(\\'|[^'])*'", String), + ], + + } + + +class RslLexer(RegexLexer): + """ + `RSL `_ is the formal specification + language used in RAISE (Rigorous Approach to Industrial Software Engineering) + method. + + .. versionadded:: 2.0 + """ + name = 'RSL' + aliases = ['rsl'] + filenames = ['*.rsl'] + mimetypes = ['text/rsl'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'root': [ + (words(( + 'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs', + 'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel', + 'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif', + 'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if', + 'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len', + 'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post', + 'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap', + 'then', 'theory', 'test_case', 'tl', 'transition_system', 'true', + 'type', 'union', 'until', 'use', 'value', 'variable', 'while', + 'with', 'write', '~isin', '-inflist', '-infset', '-list', + '-set'), prefix=r'\b', suffix=r'\b'), + Keyword), + (r'(variable|value)\b', Keyword.Declaration), + (r'--.*?\n', Comment), + (r'<:.*?:>', Comment), + (r'\{!.*?!\}', Comment), + (r'/\*.*?\*/', Comment), + (r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function), + (r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)', + bygroups(Text, Name.Function, Text, Keyword)), + (r'\b[A-Z]\w*\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + (r'".*"', String), + (r'\'.\'', String.Char), + (r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|' + r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)', + Operator), + (r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-f]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'.', Text), + ], + } + + def analyse_text(text): + """ + Check for the most common text in the beginning of a RSL file. + """ + if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None: + return 1.0 + + +class MscgenLexer(RegexLexer): + """ + For `Mscgen `_ files. + + .. versionadded:: 1.6 + """ + name = 'Mscgen' + aliases = ['mscgen', 'msc'] + filenames = ['*.msc'] + + _var = r'(\w+|"(?:\\"|[^"])*")' + + tokens = { + 'root': [ + (r'msc\b', Keyword.Type), + # Options + (r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS' + r'|arcgradient|ARCGRADIENT)\b', Name.Property), + # Operators + (r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word), + (r'(\.|-|\|){3}', Keyword), + (r'(?:-|=|\.|:){2}' + r'|<<=>>|<->|<=>|<<>>|<:>' + r'|->|=>>|>>|=>|:>|-x|-X' + r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator), + # Names + (r'\*', Name.Builtin), + (_var, Name.Variable), + # Other + (r'\[', Punctuation, 'attrs'), + (r'\{|\}|,|;', Punctuation), + include('comments') + ], + 'attrs': [ + (r'\]', Punctuation, '#pop'), + (_var + r'(\s*)(=)(\s*)' + _var, + bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace, + String)), + (r',', Punctuation), + include('comments') + ], + 'comments': [ + (r'(?://|#).*?\n', Comment.Single), + (r'/\*(?:.|\n)*?\*/', Comment.Multiline), + (r'[ \t\r\n]+', Text.Whitespace) + ] + } + + +class VGLLexer(RegexLexer): + """ + For `SampleManager VGL `_ + source code. + + .. versionadded:: 1.6 + """ + name = 'VGL' + aliases = ['vgl'] + filenames = ['*.rpf'] + + flags = re.MULTILINE | re.DOTALL | re.IGNORECASE + + tokens = { + 'root': [ + (r'\{[^}]*\}', Comment.Multiline), + (r'declare', Keyword.Constant), + (r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object' + r'|create|on|line|with|global|routine|value|endroutine|constant' + r'|global|set|join|library|compile_option|file|exists|create|copy' + r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])', + Keyword), + (r'(true|false|null|empty|error|locked)', Keyword.Constant), + (r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator), + (r'"[^"]*"', String), + (r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)), + (r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number), + (r'[a-z_$][\w$]*', Name), + (r'[\r\n]+', Text), + (r'\s+', Text) + ] + } + + +class AlloyLexer(RegexLexer): + """ + For `Alloy `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Alloy' + aliases = ['alloy'] + filenames = ['*.als'] + mimetypes = ['text/x-alloy'] + + flags = re.MULTILINE | re.DOTALL + + iden_rex = r'[a-zA-Z_][\w\']*' + text_tuple = (r'[^\S\n]+', Text) + + tokens = { + 'sig': [ + (r'(extends)\b', Keyword, '#pop'), + (iden_rex, Name), + text_tuple, + (r',', Punctuation), + (r'\{', Operator, '#pop'), + ], + 'module': [ + text_tuple, + (iden_rex, Name, '#pop'), + ], + 'fun': [ + text_tuple, + (r'\{', Operator, '#pop'), + (iden_rex, Name, '#pop'), + ], + 'root': [ + (r'--.*?$', Comment.Single), + (r'//.*?$', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + text_tuple, + (r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text), + 'module'), + (r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'), + (r'(iden|univ|none)\b', Keyword.Constant), + (r'(int|Int)\b', Keyword.Type), + (r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword), + (r'(all|some|no|sum|disj|when|else)\b', Keyword), + (r'(run|check|for|but|exactly|expect|as)\b', Keyword), + (r'(and|or|implies|iff|in)\b', Operator.Word), + (r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'), + (r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator), + (r'[-+/*%=<>&!^|~{}\[\]().]', Operator), + (iden_rex, Name), + (r'[:,]', Punctuation), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String), + (r'\n', Text), + ] + } + + +class PanLexer(RegexLexer): + """ + Lexer for `pan `_ source files. + + Based on tcsh lexer. + + .. versionadded:: 2.0 + """ + + name = 'Pan' + aliases = ['pan'] + filenames = ['*.pan'] + + tokens = { + 'root': [ + include('basic'), + (r'\(', Keyword, 'paren'), + (r'\{', Keyword, 'curly'), + include('data'), + ], + 'basic': [ + (words(( + 'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final', 'prefix', + 'unique', 'object', 'foreach', 'include', 'template', 'function', 'variable', + 'structure', 'extensible', 'declaration'), prefix=r'\b', suffix=r'\s*\b'), + Keyword), + (words(( + 'file_contents', 'format', 'index', 'length', 'match', 'matches', 'replace', + 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase', 'debug', 'error', + 'traceback', 'deprecated', 'base64_decode', 'base64_encode', 'digest', 'escape', + 'unescape', 'append', 'create', 'first', 'nlist', 'key', 'list', 'merge', 'next', + 'prepend', 'is_boolean', 'is_defined', 'is_double', 'is_list', 'is_long', + 'is_nlist', 'is_null', 'is_number', 'is_property', 'is_resource', 'is_string', + 'to_boolean', 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists', + 'path_exists', 'if_exists', 'return', 'value'), prefix=r'\b', suffix=r'\s*\b'), + Name.Builtin), + (r'#.*', Comment), + (r'\\[\w\W]', String.Escape), + (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), + (r'[\[\]{}()=]+', Operator), + (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), + (r';', Punctuation), + ], + 'data': [ + (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), + (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r'\s+', Text), + (r'[^=\s\[\]{}()$"\'`\\;#]+', Text), + (r'\d+(?= |\Z)', Number), + ], + 'curly': [ + (r'\}', Keyword, '#pop'), + (r':-', Keyword), + (r'\w+', Name.Variable), + (r'[^}:"\'`$]+', Punctuation), + (r':', Punctuation), + include('root'), + ], + 'paren': [ + (r'\)', Keyword, '#pop'), + include('root'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dylan.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dylan.py new file mode 100644 index 0000000..9875fc0 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/dylan.py @@ -0,0 +1,289 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.dylan + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Dylan language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Literal + +__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer'] + + +class DylanLexer(RegexLexer): + """ + For the `Dylan `_ language. + + .. versionadded:: 0.7 + """ + + name = 'Dylan' + aliases = ['dylan'] + filenames = ['*.dylan', '*.dyl', '*.intr'] + mimetypes = ['text/x-dylan'] + + flags = re.IGNORECASE + + builtins = set(( + 'subclass', 'abstract', 'block', 'concrete', 'constant', 'class', + 'compiler-open', 'compiler-sideways', 'domain', 'dynamic', + 'each-subclass', 'exception', 'exclude', 'function', 'generic', + 'handler', 'inherited', 'inline', 'inline-only', 'instance', + 'interface', 'import', 'keyword', 'library', 'macro', 'method', + 'module', 'open', 'primary', 'required', 'sealed', 'sideways', + 'singleton', 'slot', 'thread', 'variable', 'virtual')) + + keywords = set(( + 'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup', + 'create', 'define', 'else', 'elseif', 'end', 'export', 'finally', + 'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename', + 'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when', + 'while')) + + operators = set(( + '~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=', + '>', '>=', '&', '|')) + + functions = set(( + 'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!', + 'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply', + 'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!', + 'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol', + 'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose', + 'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as', + 'condition-format-arguments', 'condition-format-string', 'conjoin', + 'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions', + 'direct-subclasses', 'direct-superclasses', 'disjoin', 'do', + 'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?', + 'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first', + 'first-setter', 'floor', 'floor/', 'forward-iteration-protocol', + 'function-arguments', 'function-return-values', + 'function-specializers', 'gcd', 'generic-function-mandatory-keywords', + 'generic-function-methods', 'head', 'head-setter', 'identity', + 'initialize', 'instance?', 'integral?', 'intersection', + 'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited', + 'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make', + 'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes', + 'min', 'modulo', 'negative', 'negative?', 'next-method', + 'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop', + 'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank', + 'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!', + 'remove-duplicates', 'remove-duplicates!', 'remove-key!', + 'remove-method', 'replace-elements!', 'replace-subsequence!', + 'restart-query', 'return-allowed?', 'return-description', + 'return-query', 'reverse', 'reverse!', 'round', 'round/', + 'row-major-index', 'second', 'second-setter', 'shallow-copy', + 'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?', + 'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position', + 'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third', + 'third-setter', 'truncate', 'truncate/', 'type-error-expected-type', + 'type-error-value', 'type-for-copy', 'type-union', 'union', 'values', + 'vector', 'zero?')) + + valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+' + + def get_tokens_unprocessed(self, text): + for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): + if token is Name: + lowercase_value = value.lower() + if lowercase_value in self.builtins: + yield index, Name.Builtin, value + continue + if lowercase_value in self.keywords: + yield index, Keyword, value + continue + if lowercase_value in self.functions: + yield index, Name.Builtin, value + continue + if lowercase_value in self.operators: + yield index, Operator, value + continue + yield index, token, value + + tokens = { + 'root': [ + # Whitespace + (r'\s+', Text), + + # single line comment + (r'//.*?\n', Comment.Single), + + # lid header + (r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)', + bygroups(Name.Attribute, Operator, Text, String)), + + default('code') # no header match, switch to code + ], + 'code': [ + # Whitespace + (r'\s+', Text), + + # single line comment + (r'//.*?\n', Comment.Single), + + # multi-line comment + (r'/\*', Comment.Multiline, 'comment'), + + # strings and characters + (r'"', String, 'string'), + (r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char), + + # binary integer + (r'#b[01]+', Number.Bin), + + # octal integer + (r'#o[0-7]+', Number.Oct), + + # floating point + (r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float), + + # decimal integer + (r'[-+]?\d+', Number.Integer), + + # hex integer + (r'#x[0-9a-f]+', Number.Hex), + + # Macro parameters + (r'(\?' + valid_name + ')(:)' + r'(token|name|variable|expression|body|case-body|\*)', + bygroups(Name.Tag, Operator, Name.Builtin)), + (r'(\?)(:)(token|name|variable|expression|body|case-body|\*)', + bygroups(Name.Tag, Operator, Name.Builtin)), + (r'\?' + valid_name, Name.Tag), + + # Punctuation + (r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation), + + # Most operators are picked up as names and then re-flagged. + # This one isn't valid in a name though, so we pick it up now. + (r':=', Operator), + + # Pick up #t / #f before we match other stuff with #. + (r'#[tf]', Literal), + + # #"foo" style keywords + (r'#"', String.Symbol, 'keyword'), + + # #rest, #key, #all-keys, etc. + (r'#[a-z0-9-]+', Keyword), + + # required-init-keyword: style keywords. + (valid_name + ':', Keyword), + + # class names + (r'<' + valid_name + '>', Name.Class), + + # define variable forms. + (r'\*' + valid_name + '\*', Name.Variable.Global), + + # define constant forms. + (r'\$' + valid_name, Name.Constant), + + # everything else. We re-flag some of these in the method above. + (valid_name, Name), + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'keyword': [ + (r'"', String.Symbol, '#pop'), + (r'[^\\"]+', String.Symbol), # all other characters + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ] + } + + +class DylanLidLexer(RegexLexer): + """ + For Dylan LID (Library Interchange Definition) files. + + .. versionadded:: 1.6 + """ + + name = 'DylanLID' + aliases = ['dylan-lid', 'lid'] + filenames = ['*.lid', '*.hdp'] + mimetypes = ['text/x-dylan-lid'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + # Whitespace + (r'\s+', Text), + + # single line comment + (r'//.*?\n', Comment.Single), + + # lid header + (r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)', + bygroups(Name.Attribute, Operator, Text, String)), + ] + } + + +class DylanConsoleLexer(Lexer): + """ + For Dylan interactive console output like: + + .. sourcecode:: dylan-console + + ? let a = 1; + => 1 + ? a + => 1 + + This is based on a copy of the RubyConsoleLexer. + + .. versionadded:: 1.6 + """ + name = 'Dylan session' + aliases = ['dylan-console', 'dylan-repl'] + filenames = ['*.dylan-console'] + mimetypes = ['text/x-dylan-console'] + + _line_re = re.compile('.*?\n') + _prompt_re = re.compile('\?| ') + + def get_tokens_unprocessed(self, text): + dylexer = DylanLexer(**self.options) + + curcode = '' + insertions = [] + for match in self._line_re.finditer(text): + line = match.group() + m = self._prompt_re.match(line) + if m is not None: + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions(insertions, + dylexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions(insertions, + dylexer.get_tokens_unprocessed(curcode)): + yield item diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ecl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ecl.py new file mode 100644 index 0000000..5c9b3bd --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ecl.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ecl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the ECL language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['ECLLexer'] + + +class ECLLexer(RegexLexer): + """ + Lexer for the declarative big-data `ECL + `_ + language. + + .. versionadded:: 1.5 + """ + + name = 'ECL' + aliases = ['ecl'] + filenames = ['*.ecl'] + mimetypes = ['application/x-ecl'] + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + include('whitespace'), + include('statements'), + ], + 'whitespace': [ + (r'\s+', Text), + (r'\/\/.*', Comment.Single), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), + ], + 'statements': [ + include('types'), + include('keywords'), + include('functions'), + include('hash'), + (r'"', String, 'string'), + (r'\'', String, 'string'), + (r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float), + (r'0x[0-9a-f]+[lu]*', Number.Hex), + (r'0[0-7]+[lu]*', Number.Oct), + (r'\d+[lu]*', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]+', Operator), + (r'[{}()\[\],.;]', Punctuation), + (r'[a-z_]\w*', Name), + ], + 'hash': [ + (r'^#.*$', Comment.Preproc), + ], + 'types': [ + (r'(RECORD|END)\D', Keyword.Declaration), + (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|' + r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|' + r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)', + bygroups(Keyword.Type, Text)), + ], + 'keywords': [ + (words(( + 'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL', + 'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT', + 'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED', + 'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT', + 'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS', + 'WAIT', 'WHEN'), suffix=r'\b'), + Keyword.Reserved), + # These are classed differently, check later + (words(( + 'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE', + 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT', + 'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', + 'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED', + 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO', + 'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', + 'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE', + 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP', + 'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', + 'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM', + 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', + 'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'), + Keyword.Reserved), + ], + 'functions': [ + (words(( + 'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE', + 'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', + 'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE', + 'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE', + 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE', + 'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32', + 'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID', + 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', + 'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', + 'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE', + 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED', + 'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', + 'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF', + 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', + 'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP', + 'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE', + 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'), + Name.Function), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\'', String, '#pop'), + (r'[^"\']+', String), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/eiffel.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/eiffel.py new file mode 100644 index 0000000..e3bf81f --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/eiffel.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.eiffel + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Eiffel language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['EiffelLexer'] + + +class EiffelLexer(RegexLexer): + """ + For `Eiffel `_ source code. + + .. versionadded:: 2.0 + """ + name = 'Eiffel' + aliases = ['eiffel'] + filenames = ['*.e'] + mimetypes = ['text/x-eiffel'] + + tokens = { + 'root': [ + (r'[^\S\n]+', Text), + (r'--.*?\n', Comment.Single), + (r'[^\S\n]+', Text), + # Please note that keyword and operator are case insensitive. + (r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant), + (r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word), + (words(( + 'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached', + 'attribute', 'check', 'class', 'convert', 'create', 'debug', + 'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure', + 'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if', + 'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none', + 'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename', + 'require', 'rescue', 'retry', 'select', 'separate', 'then', + 'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'), + Keyword.Reserved), + (r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String), + (r'"([^"%\n]|%.)*?"', String), + include('numbers'), + (r"'([^'%]|%'|%%)'", String.Char), + (r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator), + (r"([{}():;,.])", Punctuation), + (r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name), + (r'([A-Z][A-Z0-9_]*)', Name.Class), + (r'\n+', Text), + ], + 'numbers': [ + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'0[bB][01]+', Number.Bin), + (r'0[cC][0-7]+', Number.Oct), + (r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float), + (r'[0-9]+', Number.Integer), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/erlang.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/erlang.py new file mode 100644 index 0000000..563f774 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/erlang.py @@ -0,0 +1,511 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.erlang + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Erlang. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \ + include, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic + +__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer', + 'ElixirLexer'] + + +line_re = re.compile('.*?\n') + + +class ErlangLexer(RegexLexer): + """ + For the Erlang functional programming language. + + Blame Jeremy Thurgood (http://jerith.za.net/). + + .. versionadded:: 0.9 + """ + + name = 'Erlang' + aliases = ['erlang'] + filenames = ['*.erl', '*.hrl', '*.es', '*.escript'] + mimetypes = ['text/x-erlang'] + + keywords = ( + 'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if', + 'let', 'of', 'query', 'receive', 'try', 'when', + ) + + builtins = ( # See erlang(3) man page + 'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list', + 'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions', + 'byte_size', 'cancel_timer', 'check_process_code', 'delete_module', + 'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit', + 'float', 'float_to_list', 'fun_info', 'fun_to_list', + 'function_exported', 'garbage_collect', 'get', 'get_keys', + 'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary', + 'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean', + 'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list', + 'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record', + 'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom', + 'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom', + 'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple', + 'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5', + 'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor', + 'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2', + 'pid_to_list', 'port_close', 'port_command', 'port_connect', + 'port_control', 'port_call', 'port_info', 'port_to_list', + 'process_display', 'process_flag', 'process_info', 'purge_module', + 'put', 'read_timer', 'ref_to_list', 'register', 'resume_process', + 'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie', + 'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor', + 'spawn_opt', 'split_binary', 'start_timer', 'statistics', + 'suspend_process', 'system_flag', 'system_info', 'system_monitor', + 'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered', + 'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list', + 'universaltime_to_localtime', 'unlink', 'unregister', 'whereis' + ) + + operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)' + word_operators = ( + 'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor', + 'div', 'not', 'or', 'orelse', 'rem', 'xor' + ) + + atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')" + + variable_re = r'(?:[A-Z_]\w*)' + + escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))' + + macro_re = r'(?:'+variable_re+r'|'+atom_re+r')' + + base_re = r'(?:[2-9]|[12][0-9]|3[0-6])' + + tokens = { + 'root': [ + (r'\s+', Text), + (r'%.*\n', Comment), + (words(keywords, suffix=r'\b'), Keyword), + (words(builtins, suffix=r'\b'), Name.Builtin), + (words(word_operators, suffix=r'\b'), Operator.Word), + (r'^-', Punctuation, 'directive'), + (operators, Operator), + (r'"', String, 'string'), + (r'<<', Name.Label), + (r'>>', Name.Label), + ('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)), + ('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()', + bygroups(Name.Function, Text, Punctuation)), + (r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer), + (r'[+-]?\d+', Number.Integer), + (r'[+-]?\d+.\d+', Number.Float), + (r'[]\[:_@\".{}()|;,]', Punctuation), + (variable_re, Name.Variable), + (atom_re, Name), + (r'\?'+macro_re, Name.Constant), + (r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char), + (r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label), + ], + 'string': [ + (escape_re, String.Escape), + (r'"', String, '#pop'), + (r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol), + (r'[^"\\~]+', String), + (r'~', String), + ], + 'directive': [ + (r'(define)(\s*)(\()('+macro_re+r')', + bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'), + (r'(record)(\s*)(\()('+macro_re+r')', + bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'), + (atom_re, Name.Entity, '#pop'), + ], + } + + +class ErlangShellLexer(Lexer): + """ + Shell sessions in erl (for Erlang code). + + .. versionadded:: 1.1 + """ + name = 'Erlang erl session' + aliases = ['erl'] + filenames = ['*.erl-sh'] + mimetypes = ['text/x-erl-shellsession'] + + _prompt_re = re.compile(r'\d+>(?=\s|\Z)') + + def get_tokens_unprocessed(self, text): + erlexer = ErlangLexer(**self.options) + + curcode = '' + insertions = [] + for match in line_re.finditer(text): + line = match.group() + m = self._prompt_re.match(line) + if m is not None: + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions(insertions, + erlexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + if line.startswith('*'): + yield match.start(), Generic.Traceback, line + else: + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions(insertions, + erlexer.get_tokens_unprocessed(curcode)): + yield item + + +def gen_elixir_string_rules(name, symbol, token): + states = {} + states['string_' + name] = [ + (r'[^#%s\\]+' % (symbol,), token), + include('escapes'), + (r'\\.', token), + (r'(%s)' % (symbol,), bygroups(token), "#pop"), + include('interpol') + ] + return states + + +def gen_elixir_sigstr_rules(term, token, interpol=True): + if interpol: + return [ + (r'[^#%s\\]+' % (term,), token), + include('escapes'), + (r'\\.', token), + (r'%s[a-zA-Z]*' % (term,), token, '#pop'), + include('interpol') + ] + else: + return [ + (r'[^%s\\]+' % (term,), token), + (r'\\.', token), + (r'%s[a-zA-Z]*' % (term,), token, '#pop'), + ] + + +class ElixirLexer(RegexLexer): + """ + For the `Elixir language `_. + + .. versionadded:: 1.5 + """ + + name = 'Elixir' + aliases = ['elixir', 'ex', 'exs'] + filenames = ['*.ex', '*.exs'] + mimetypes = ['text/x-elixir'] + + KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch') + KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in') + BUILTIN = ( + 'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise', + 'quote', 'unquote', 'unquote_splicing', 'throw', 'super' + ) + BUILTIN_DECLARATION = ( + 'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop', + 'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback' + ) + + BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias') + CONSTANT = ('nil', 'true', 'false') + + PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__') + + OPERATORS3 = ( + '<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==', + '~>>', '<~>', '|~>', '<|>', + ) + OPERATORS2 = ( + '==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~', + '->', '<-', '|', '.', '=', '~>', '<~', + ) + OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&') + + PUNCTUATION = ( + '\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']' + ) + + def get_tokens_unprocessed(self, text): + for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): + if token is Name: + if value in self.KEYWORD: + yield index, Keyword, value + elif value in self.KEYWORD_OPERATOR: + yield index, Operator.Word, value + elif value in self.BUILTIN: + yield index, Keyword, value + elif value in self.BUILTIN_DECLARATION: + yield index, Keyword.Declaration, value + elif value in self.BUILTIN_NAMESPACE: + yield index, Keyword.Namespace, value + elif value in self.CONSTANT: + yield index, Name.Constant, value + elif value in self.PSEUDO_VAR: + yield index, Name.Builtin.Pseudo, value + else: + yield index, token, value + else: + yield index, token, value + + def gen_elixir_sigil_rules(): + # all valid sigil terminators (excluding heredocs) + terminators = [ + (r'\{', r'\}', 'cb'), + (r'\[', r'\]', 'sb'), + (r'\(', r'\)', 'pa'), + (r'<', r'>', 'ab'), + (r'/', r'/', 'slas'), + (r'\|', r'\|', 'pipe'), + ('"', '"', 'quot'), + ("'", "'", 'apos'), + ] + + # heredocs have slightly different rules + triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')] + + token = String.Other + states = {'sigils': []} + + for term, name in triquotes: + states['sigils'] += [ + (r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc), + (name + '-end', name + '-intp')), + (r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc), + (name + '-end', name + '-no-intp')), + ] + + states[name + '-end'] = [ + (r'[a-zA-Z]+', token, '#pop'), + default('#pop'), + ] + states[name + '-intp'] = [ + (r'^\s*' + term, String.Heredoc, '#pop'), + include('heredoc_interpol'), + ] + states[name + '-no-intp'] = [ + (r'^\s*' + term, String.Heredoc, '#pop'), + include('heredoc_no_interpol'), + ] + + for lterm, rterm, name in terminators: + states['sigils'] += [ + (r'~[a-z]' + lterm, token, name + '-intp'), + (r'~[A-Z]' + lterm, token, name + '-no-intp'), + ] + states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token) + states[name + '-no-intp'] = \ + gen_elixir_sigstr_rules(rterm, token, interpol=False) + + return states + + op3_re = "|".join(re.escape(s) for s in OPERATORS3) + op2_re = "|".join(re.escape(s) for s in OPERATORS2) + op1_re = "|".join(re.escape(s) for s in OPERATORS1) + ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re) + punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION) + alnum = '\w' + name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum + modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum} + complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re) + special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})' + + long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})' + hex_char_re = r'(\\x[\da-fA-F]{1,2})' + escape_char_re = r'(\\[abdefnrstv])' + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*$', Comment.Single), + + # Various kinds of characters + (r'(\?)' + long_hex_char_re, + bygroups(String.Char, + String.Escape, Number.Hex, String.Escape)), + (r'(\?)' + hex_char_re, + bygroups(String.Char, String.Escape)), + (r'(\?)' + escape_char_re, + bygroups(String.Char, String.Escape)), + (r'\?\\?.', String.Char), + + # '::' has to go before atoms + (r':::', String.Symbol), + (r'::', Operator), + + # atoms + (r':' + special_atom_re, String.Symbol), + (r':' + complex_name_re, String.Symbol), + (r':"', String.Symbol, 'string_double_atom'), + (r":'", String.Symbol, 'string_single_atom'), + + # [keywords: ...] + (r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re), + bygroups(String.Symbol, Punctuation)), + + # @attributes + (r'@' + name_re, Name.Attribute), + + # identifiers + (name_re, Name), + (r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)), + + # operators and punctuation + (op3_re, Operator), + (op2_re, Operator), + (punctuation_re, Punctuation), + (r'&\d', Name.Entity), # anon func arguments + (op1_re, Operator), + + # numbers + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[\da-fA-F]+', Number.Hex), + (r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float), + (r'\d(_?\d)*', Number.Integer), + + # strings and heredocs + (r'"""\s*', String.Heredoc, 'heredoc_double'), + (r"'''\s*$", String.Heredoc, 'heredoc_single'), + (r'"', String.Double, 'string_double'), + (r"'", String.Single, 'string_single'), + + include('sigils'), + + (r'%\{', Punctuation, 'map_key'), + (r'\{', Punctuation, 'tuple'), + ], + 'heredoc_double': [ + (r'^\s*"""', String.Heredoc, '#pop'), + include('heredoc_interpol'), + ], + 'heredoc_single': [ + (r"^\s*'''", String.Heredoc, '#pop'), + include('heredoc_interpol'), + ], + 'heredoc_interpol': [ + (r'[^#\\\n]+', String.Heredoc), + include('escapes'), + (r'\\.', String.Heredoc), + (r'\n+', String.Heredoc), + include('interpol'), + ], + 'heredoc_no_interpol': [ + (r'[^\\\n]+', String.Heredoc), + (r'\\.', String.Heredoc), + (r'\n+', String.Heredoc), + ], + 'escapes': [ + (long_hex_char_re, + bygroups(String.Escape, Number.Hex, String.Escape)), + (hex_char_re, String.Escape), + (escape_char_re, String.Escape), + ], + 'interpol': [ + (r'#\{', String.Interpol, 'interpol_string'), + ], + 'interpol_string': [ + (r'\}', String.Interpol, "#pop"), + include('root') + ], + 'map_key': [ + include('root'), + (r':', Punctuation, 'map_val'), + (r'=>', Punctuation, 'map_val'), + (r'\}', Punctuation, '#pop'), + ], + 'map_val': [ + include('root'), + (r',', Punctuation, '#pop'), + (r'(?=\})', Punctuation, '#pop'), + ], + 'tuple': [ + include('root'), + (r'\}', Punctuation, '#pop'), + ], + } + tokens.update(gen_elixir_string_rules('double', '"', String.Double)) + tokens.update(gen_elixir_string_rules('single', "'", String.Single)) + tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol)) + tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol)) + tokens.update(gen_elixir_sigil_rules()) + + +class ElixirConsoleLexer(Lexer): + """ + For Elixir interactive console (iex) output like: + + .. sourcecode:: iex + + iex> [head | tail] = [1,2,3] + [1,2,3] + iex> head + 1 + iex> tail + [2,3] + iex> [head | tail] + [1,2,3] + iex> length [head | tail] + 3 + + .. versionadded:: 1.5 + """ + + name = 'Elixir iex session' + aliases = ['iex'] + mimetypes = ['text/x-elixir-shellsession'] + + _prompt_re = re.compile('(iex|\.{3})(\(\d+\))?> ') + + def get_tokens_unprocessed(self, text): + exlexer = ElixirLexer(**self.options) + + curcode = '' + in_error = False + insertions = [] + for match in line_re.finditer(text): + line = match.group() + if line.startswith(u'** '): + in_error = True + insertions.append((len(curcode), + [(0, Generic.Error, line[:-1])])) + curcode += line[-1:] + else: + m = self._prompt_re.match(line) + if m is not None: + in_error = False + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions( + insertions, exlexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + token = Generic.Error if in_error else Generic.Output + yield match.start(), token, line + if curcode: + for item in do_insertions( + insertions, exlexer.get_tokens_unprocessed(curcode)): + yield item diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/esoteric.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/esoteric.py new file mode 100644 index 0000000..7a026ae --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/esoteric.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.esoteric + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for esoteric languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer'] + + +class BrainfuckLexer(RegexLexer): + """ + Lexer for the esoteric `BrainFuck `_ + language. + """ + + name = 'Brainfuck' + aliases = ['brainfuck', 'bf'] + filenames = ['*.bf', '*.b'] + mimetypes = ['application/x-brainfuck'] + + tokens = { + 'common': [ + # use different colors for different instruction types + (r'[.,]+', Name.Tag), + (r'[+-]+', Name.Builtin), + (r'[<>]+', Name.Variable), + (r'[^.,+\-<>\[\]]+', Comment), + ], + 'root': [ + (r'\[', Keyword, 'loop'), + (r'\]', Error), + include('common'), + ], + 'loop': [ + (r'\[', Keyword, '#push'), + (r'\]', Keyword, '#pop'), + include('common'), + ] + } + + +class BefungeLexer(RegexLexer): + """ + Lexer for the esoteric `Befunge `_ + language. + + .. versionadded:: 0.7 + """ + name = 'Befunge' + aliases = ['befunge'] + filenames = ['*.befunge'] + mimetypes = ['application/x-befunge'] + + tokens = { + 'root': [ + (r'[0-9a-f]', Number), + (r'[+*/%!`-]', Operator), # Traditional math + (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives + (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives + (r'[|_mw]', Keyword), + (r'[{}]', Name.Tag), # Befunge-98 stack ops + (r'".*?"', String.Double), # Strings don't appear to allow escapes + (r'\'.', String.Single), # Single character + (r'[#;]', Comment), # Trampoline... depends on direction hit + (r'[pg&~=@iotsy]', Keyword), # Misc + (r'[()A-Z]', Comment), # Fingerprints + (r'\s+', Text), # Whitespace doesn't matter + ], + } + + +class RedcodeLexer(RegexLexer): + """ + A simple Redcode lexer based on ICWS'94. + Contributed by Adam Blinkinsop . + + .. versionadded:: 0.8 + """ + name = 'Redcode' + aliases = ['redcode'] + filenames = ['*.cw'] + + opcodes = ('DAT', 'MOV', 'ADD', 'SUB', 'MUL', 'DIV', 'MOD', + 'JMP', 'JMZ', 'JMN', 'DJN', 'CMP', 'SLT', 'SPL', + 'ORG', 'EQU', 'END') + modifiers = ('A', 'B', 'AB', 'BA', 'F', 'X', 'I') + + tokens = { + 'root': [ + # Whitespace: + (r'\s+', Text), + (r';.*$', Comment.Single), + # Lexemes: + # Identifiers + (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), + (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), + (r'[A-Za-z_]\w+', Name), + # Operators + (r'[-+*/%]', Operator), + (r'[#$@<>]', Operator), # mode + (r'[.,]', Punctuation), # mode + # Numbers + (r'[-+]?\d+', Number.Integer), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/factor.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/factor.py new file mode 100644 index 0000000..402fc12 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/factor.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.factor + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Factor language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, default, words +from pygments.token import Text, Comment, Keyword, Name, String, Number + +__all__ = ['FactorLexer'] + + +class FactorLexer(RegexLexer): + """ + Lexer for the `Factor `_ language. + + .. versionadded:: 1.4 + """ + name = 'Factor' + aliases = ['factor'] + filenames = ['*.factor'] + mimetypes = ['text/x-factor'] + + flags = re.MULTILINE | re.UNICODE + + builtin_kernel = words(( + '-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip', + '2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep', + '3tri', '4dip', '4drop', '4dup', '4keep', '', '=', '>boolean', 'clone', + '?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry', + 'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?', + 'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)', + 'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop', + 'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*', + 'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*', + 'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over', + 'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw', + 'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple', + 'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with', + 'wrapper', 'wrapper?', 'xor'), suffix=r'\s') + + builtin_assocs = words(( + '2cache', '', '>alist', '?at', '?of', 'assoc', 'assoc-all?', + 'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff', + 'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?', + 'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find', + 'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map', + 'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size', + 'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!', + 'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at', + 'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys', + 'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of', + 'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute', + 'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'\s') + + builtin_combinators = words(( + '2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave', + '4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find', + 'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot', + 'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond', + 'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread', + 'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'\s') + + builtin_math = words(( + '-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '', '>', + '>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)', + '(each-integer)', '(find-integer)', '*', '+', '?1+', + 'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand', + 'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex', + 'complex?', 'denominator', 'double>bits', 'each-integer', 'even?', + 'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float', + 'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload', + 'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?', + 'if-zero', 'imaginary-part', 'integer', 'integer>fixnum', + 'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive', + 'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float', + 'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?', + 'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?', + 'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real', + 'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times', + 'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero', + 'zero?'), suffix=r'\s') + + builtin_sequences = words(( + '1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce', + '2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as', + '3sequence', '4sequence', '', '', '', '?first', + '?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!', + 'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as', + 'assert-sequence', 'assert-sequence=', 'assert-sequence?', + 'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error', + 'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each', + 'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice', + 'check-slice-error', 'clone-like', 'collapse-slice', 'collector', + 'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice', + 'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from', + 'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find', + 'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from', + 'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves', + 'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?', + 'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?', + 'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by', + 'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join', + 'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen', + 'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find', + 'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum', + 'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length', + 'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence', + 'non-negative-integer-expected', 'non-negative-integer-expected?', + 'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*', + 'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push', + 'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove', + 'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition', + 'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest', + 'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second', + 'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=', + 'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth', + 'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?', + 'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?', + 'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix', + 'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail', + 'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim', + 'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice', + 'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty', + 'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@', + 'when-empty'), suffix=r'\s') + + builtin_namespaces = words(( + '+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global', + 'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc', + 'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack', + 'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'), + suffix=r'\s') + + builtin_arrays = words(( + '1array', '2array', '3array', '4array', '', '>array', 'array', + 'array?', 'pair', 'pair?', 'resize-array'), suffix=r'\s') + + builtin_io = words(( + '(each-stream-block-slice)', '(each-stream-block)', + '(stream-contents-by-block)', '(stream-contents-by-element)', + '(stream-contents-by-length-or-block)', + '(stream-contents-by-length)', '+byte+', '+character+', + 'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block', + 'each-block-size', 'each-block-slice', 'each-line', 'each-morsel', + 'each-stream-block', 'each-stream-block-slice', 'each-stream-line', + 'error-stream', 'flush', 'input-stream', 'input-stream?', + 'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl', + 'output-stream', 'output-stream?', 'print', 'read', 'read-into', + 'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln', + 'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?', + 'seek-input', 'seek-output', 'seek-relative', 'seek-relative?', + 'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy', + 'stream-copy*', 'stream-element-type', 'stream-flush', + 'stream-length', 'stream-lines', 'stream-nl', 'stream-print', + 'stream-read', 'stream-read-into', 'stream-read-partial', + 'stream-read-partial-into', 'stream-read-partial-unsafe', + 'stream-read-unsafe', 'stream-read-until', 'stream-read1', + 'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell', + 'stream-write', 'stream-write1', 'tell-input', 'tell-output', + 'with-error-stream', 'with-error-stream*', 'with-error>output', + 'with-input-output+error-streams', + 'with-input-output+error-streams*', 'with-input-stream', + 'with-input-stream*', 'with-output-stream', 'with-output-stream*', + 'with-output>error', 'with-output+error-stream', + 'with-output+error-stream*', 'with-streams', 'with-streams*', + 'write', 'write1'), suffix=r'\s') + + builtin_strings = words(( + '1string', '', '>string', 'resize-string', 'string', + 'string?'), suffix=r'\s') + + builtin_vectors = words(( + '1vector', '', '>vector', '?push', 'vector', 'vector?'), + suffix=r'\s') + + builtin_continuations = words(( + '', '', '', 'attempt-all', + 'attempt-all-error', 'attempt-all-error?', 'callback-error-hook', + 'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition', + 'condition?', 'continuation', 'continuation?', 'continue', + 'continue-restart', 'continue-with', 'current-continuation', + 'error', 'error-continuation', 'error-in-thread', 'error-thread', + 'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover', + 'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts', + 'return', 'return-continuation', 'thread-error-hook', 'throw-continue', + 'throw-restarts', 'with-datastack', 'with-return'), suffix=r'\s') + + tokens = { + 'root': [ + # factor allows a file to start with a shebang + (r'#!.*$', Comment.Preproc), + default('base'), + ], + 'base': [ + (r'\s+', Text), + + # defining words + (r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function)), + (r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Function)), + (r'(C:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function, Text, Name.Class)), + (r'(GENERIC:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function)), + (r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function, Text, Name.Function)), + (r'\(\s', Name.Function, 'stackeffect'), + (r';\s', Keyword), + + # imports and namespaces + (r'(USING:)(\s+)', + bygroups(Keyword.Namespace, Text), 'vocabs'), + (r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Namespace)), + (r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Name.Namespace)), + (r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)', + bygroups(Keyword.Namespace, Text, Name.Namespace, Text), 'words'), + (r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+=>\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Namespace, Text, Name.Function)), + (r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function)), + (r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)', + bygroups(Keyword.Namespace, Text, Name.Function)), + + # tuples and classes + (r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+<\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'), + (r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class), 'slots'), + (r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class)), + (r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Class)), + (r'(C:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function, Text, Name.Class)), + (r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Class, Text, Name.Class)), + (r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)), + (r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)), + (r'SINGLETONS:', Keyword, 'classes'), + + # other syntax + (r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)', + bygroups(Keyword, Text, Name.Function)), + (r'SYMBOLS:\s', Keyword, 'words'), + (r'SYNTAX:\s', Keyword), + (r'ALIEN:\s', Keyword), + (r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)), + (r'(FUNCTION:)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)', + bygroups(Keyword.Namespace, Text, Name.Function, Text)), + (r'(FUNCTION-ALIAS:)(\s+)(\S+)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)', + bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function, Text)), + + # vocab.private + (r'(?:)\s', Keyword.Namespace), + + # strings + (r'"""\s+(?:.|\n)*?\s+"""', String), + (r'"(?:\\\\|\\"|[^"])*"', String), + (r'\S+"\s+(?:\\\\|\\"|[^"])*"', String), + (r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char), + + # comments + (r'!\s+.*$', Comment), + (r'#!\s+.*$', Comment), + (r'/\*\s+(?:.|\n)*?\s\*/\s', Comment), + + # boolean constants + (r'[tf]\s', Name.Constant), + + # symbols and literals + (r'[\\$]\s+\S+', Name.Constant), + (r'M\\\s+\S+\s+\S+', Name.Constant), + + # numbers + (r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number), + (r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number), + (r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number), + (r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number), + (r'0b[01]+\s', Number.Bin), + (r'0o[0-7]+\s', Number.Oct), + (r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number), + (r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number), + + # keywords + (r'(?:deprecated|final|foldable|flushable|inline|recursive)\s', + Keyword), + + # builtins + (builtin_kernel, Name.Builtin), + (builtin_assocs, Name.Builtin), + (builtin_combinators, Name.Builtin), + (builtin_math, Name.Builtin), + (builtin_sequences, Name.Builtin), + (builtin_namespaces, Name.Builtin), + (builtin_arrays, Name.Builtin), + (builtin_io, Name.Builtin), + (builtin_strings, Name.Builtin), + (builtin_vectors, Name.Builtin), + (builtin_continuations, Name.Builtin), + + # everything else is text + (r'\S+', Text), + ], + 'stackeffect': [ + (r'\s+', Text), + (r'\(\s+', Name.Function, 'stackeffect'), + (r'\)\s', Name.Function, '#pop'), + (r'--\s', Name.Function), + (r'\S+', Name.Variable), + ], + 'slots': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)', + bygroups(Text, Name.Variable, Text)), + (r'\S+', Name.Variable), + ], + 'vocabs': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'\S+', Name.Namespace), + ], + 'classes': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'\S+', Name.Class), + ], + 'words': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), + (r'\S+', Name.Function), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/fantom.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/fantom.py new file mode 100644 index 0000000..953b324 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/fantom.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.fantom + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Fantom language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from string import Template + +from pygments.lexer import RegexLexer, include, bygroups, using, \ + this, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal + +__all__ = ['FantomLexer'] + + +class FantomLexer(RegexLexer): + """ + For Fantom source code. + + .. versionadded:: 1.5 + """ + name = 'Fantom' + aliases = ['fan'] + filenames = ['*.fan'] + mimetypes = ['application/x-fantom'] + + # often used regexes + def s(str): + return Template(str).substitute( + dict( + pod=r'[\"\w\.]+', + eos=r'\n|;', + id=r'[a-zA-Z_]\w*', + # all chars which can be part of type definition. Starts with + # either letter, or [ (maps), or | (funcs) + type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?', + ) + ) + + tokens = { + 'comments': [ + (r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline + (r'//.*?\n', Comment.Single), # Single line + # TODO: highlight references in fandocs + (r'\*\*.*?\n', Comment.Special), # Fandoc + (r'#.*\n', Comment.Single) # Shell-style + ], + 'literals': [ + (r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration + (r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot + (r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal + (r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex + (r'\b-?[\d_]+', Number.Integer), # Int + (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char + (r'"', Punctuation, 'insideStr'), # Opening quote + (r'`', Punctuation, 'insideUri'), # Opening accent + (r'\b(true|false|null)\b', Keyword.Constant), # Bool & null + (r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL + bygroups(Name.Namespace, Punctuation, Name.Class, + Punctuation, String, Punctuation)), + (r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal + bygroups(Name.Namespace, Punctuation, Name.Class, + Punctuation, Name.Function)), + (r'\[,\]', Literal), # Empty list + (s(r'($type)(\[,\])'), # Typed empty list + bygroups(using(this, state='inType'), Literal)), + (r'\[:\]', Literal), # Empty Map + (s(r'($type)(\[:\])'), + bygroups(using(this, state='inType'), Literal)), + ], + 'insideStr': [ + (r'\\\\', String.Escape), # Escaped backslash + (r'\\"', String.Escape), # Escaped " + (r'\\`', String.Escape), # Escaped ` + (r'\$\w+', String.Interpol), # Subst var + (r'\$\{.*?\}', String.Interpol), # Subst expr + (r'"', Punctuation, '#pop'), # Closing quot + (r'.', String) # String content + ], + 'insideUri': [ # TODO: remove copy/paste str/uri + (r'\\\\', String.Escape), # Escaped backslash + (r'\\"', String.Escape), # Escaped " + (r'\\`', String.Escape), # Escaped ` + (r'\$\w+', String.Interpol), # Subst var + (r'\$\{.*?\}', String.Interpol), # Subst expr + (r'`', Punctuation, '#pop'), # Closing tick + (r'.', String.Backtick) # URI content + ], + 'protectionKeywords': [ + (r'\b(public|protected|private|internal)\b', Keyword), + ], + 'typeKeywords': [ + (r'\b(abstract|final|const|native|facet|enum)\b', Keyword), + ], + 'methodKeywords': [ + (r'\b(abstract|native|once|override|static|virtual|final)\b', + Keyword), + ], + 'fieldKeywords': [ + (r'\b(abstract|const|final|native|override|static|virtual|' + r'readonly)\b', Keyword) + ], + 'otherKeywords': [ + (words(( + 'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while', + 'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue', + 'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'), + Keyword), + (r'\b(it|this|super)\b', Name.Builtin.Pseudo), + ], + 'operators': [ + (r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator) + ], + 'inType': [ + (r'[\[\]|\->:?]', Punctuation), + (s(r'$id'), Name.Class), + default('#pop'), + + ], + 'root': [ + include('comments'), + include('protectionKeywords'), + include('typeKeywords'), + include('methodKeywords'), + include('fieldKeywords'), + include('literals'), + include('otherKeywords'), + include('operators'), + (r'using\b', Keyword.Namespace, 'using'), # Using stmt + (r'@\w+', Name.Decorator, 'facet'), # Symbol + (r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class), + 'inheritance'), # Inheritance list + + # Type var := val + (s(r'($type)([ \t]+)($id)(\s*)(:=)'), + bygroups(using(this, state='inType'), Text, + Name.Variable, Text, Operator)), + + # var := val + (s(r'($id)(\s*)(:=)'), + bygroups(Name.Variable, Text, Operator)), + + # .someId( or ->someId( ### + (s(r'(\.|(?:\->))($id)(\s*)(\()'), + bygroups(Operator, Name.Function, Text, Punctuation), + 'insideParen'), + + # .someId or ->someId + (s(r'(\.|(?:\->))($id)'), + bygroups(Operator, Name.Function)), + + # new makeXXX ( + (r'(new)(\s+)(make\w*)(\s*)(\()', + bygroups(Keyword, Text, Name.Function, Text, Punctuation), + 'insideMethodDeclArgs'), + + # Type name ( + (s(r'($type)([ \t]+)' # Return type and whitespace + r'($id)(\s*)(\()'), # method name + open brace + bygroups(using(this, state='inType'), Text, + Name.Function, Text, Punctuation), + 'insideMethodDeclArgs'), + + # ArgType argName, + (s(r'($type)(\s+)($id)(\s*)(,)'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation)), + + # ArgType argName) + # Covered in 'insideParen' state + + # ArgType argName -> ArgType| + (s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation, Text, using(this, state='inType'), + Punctuation)), + + # ArgType argName| + (s(r'($type)(\s+)($id)(\s*)(\|)'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation)), + + # Type var + (s(r'($type)([ \t]+)($id)'), + bygroups(using(this, state='inType'), Text, + Name.Variable)), + + (r'\(', Punctuation, 'insideParen'), + (r'\{', Punctuation, 'insideBrace'), + (r'.', Text) + ], + 'insideParen': [ + (r'\)', Punctuation, '#pop'), + include('root'), + ], + 'insideMethodDeclArgs': [ + (r'\)', Punctuation, '#pop'), + (s(r'($type)(\s+)($id)(\s*)(\))'), + bygroups(using(this, state='inType'), Text, Name.Variable, + Text, Punctuation), '#pop'), + include('root'), + ], + 'insideBrace': [ + (r'\}', Punctuation, '#pop'), + include('root'), + ], + 'inheritance': [ + (r'\s+', Text), # Whitespace + (r':|,', Punctuation), + (r'(?:(\w+)(::))?(\w+)', + bygroups(Name.Namespace, Punctuation, Name.Class)), + (r'\{', Punctuation, '#pop') + ], + 'using': [ + (r'[ \t]+', Text), # consume whitespaces + (r'(\[)(\w+)(\])', + bygroups(Punctuation, Comment.Special, Punctuation)), # ffi + (r'(\")?([\w.]+)(\")?', + bygroups(Punctuation, Name.Namespace, Punctuation)), # podname + (r'::', Punctuation, 'usingClass'), + default('#pop') + ], + 'usingClass': [ + (r'[ \t]+', Text), # consume whitespaces + (r'(as)(\s+)(\w+)', + bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'), + (r'[\w$]+', Name.Class), + default('#pop:2') # jump out to root state + ], + 'facet': [ + (r'\s+', Text), + (r'\{', Punctuation, 'facetFields'), + default('#pop') + ], + 'facetFields': [ + include('comments'), + include('literals'), + include('operators'), + (r'\s+', Text), + (r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)), + (r'\}', Punctuation, '#pop'), + (r'.', Text) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/felix.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/felix.py new file mode 100644 index 0000000..ca8df57 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/felix.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.felix + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Felix language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, default, words, \ + combined +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['FelixLexer'] + + +class FelixLexer(RegexLexer): + """ + For `Felix `_ source code. + + .. versionadded:: 1.2 + """ + + name = 'Felix' + aliases = ['felix', 'flx'] + filenames = ['*.flx', '*.flxh'] + mimetypes = ['text/x-felix'] + + preproc = ( + 'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef', + ) + + keywords = ( + '_', '_deref', 'all', 'as', + 'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass', + 'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else', + 'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except', + 'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork', + 'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance', + 'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace', + 'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise', + 'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then', + 'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto', + 'when', 'whilst', 'with', 'yield', + ) + + keyword_directives = ( + '_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export', + 'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn', + 'package', 'private', 'pod', 'property', 'public', 'publish', + 'requires', 'todo', 'virtual', 'use', + ) + + keyword_declarations = ( + 'def', 'let', 'ref', 'val', 'var', + ) + + keyword_types = ( + 'unit', 'void', 'any', 'bool', + 'byte', 'offset', + 'address', 'caddress', 'cvaddress', 'vaddress', + 'tiny', 'short', 'int', 'long', 'vlong', + 'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong', + 'int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float', 'double', 'ldouble', + 'complex', 'dcomplex', 'lcomplex', + 'imaginary', 'dimaginary', 'limaginary', + 'char', 'wchar', 'uchar', + 'charp', 'charcp', 'ucharp', 'ucharcp', + 'string', 'wstring', 'ustring', + 'cont', + 'array', 'varray', 'list', + 'lvalue', 'opt', 'slice', + ) + + keyword_constants = ( + 'false', 'true', + ) + + operator_words = ( + 'and', 'not', 'in', 'is', 'isin', 'or', 'xor', + ) + + name_builtins = ( + '_svc', 'while', + ) + + name_pseudo = ( + 'root', 'self', 'this', + ) + + decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?' + + tokens = { + 'root': [ + include('whitespace'), + + # Keywords + (words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce', + 'union'), suffix=r'\b'), + Keyword, 'funcname'), + (words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'), + Keyword, 'classname'), + (r'(instance|module|typeclass)\b', Keyword, 'modulename'), + + (words(keywords, suffix=r'\b'), Keyword), + (words(keyword_directives, suffix=r'\b'), Name.Decorator), + (words(keyword_declarations, suffix=r'\b'), Keyword.Declaration), + (words(keyword_types, suffix=r'\b'), Keyword.Type), + (words(keyword_constants, suffix=r'\b'), Keyword.Constant), + + # Operators + include('operators'), + + # Float Literal + # -- Hex Float + (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' + r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float), + # -- DecimalFloat + (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float), + (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?', + Number.Float), + + # IntegerLiteral + # -- Binary + (r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin), + # -- Octal + (r'0[0-7_]+%s' % decimal_suffixes, Number.Oct), + # -- Hexadecimal + (r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex), + # -- Decimal + (r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer), + + # Strings + ('([rR][cC]?|[cC][rR])"""', String, 'tdqs'), + ("([rR][cC]?|[cC][rR])'''", String, 'tsqs'), + ('([rR][cC]?|[cC][rR])"', String, 'dqs'), + ("([rR][cC]?|[cC][rR])'", String, 'sqs'), + ('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')), + ("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')), + ('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')), + ("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')), + + # Punctuation + (r'[\[\]{}:(),;?]', Punctuation), + + # Labels + (r'[a-zA-Z_]\w*:>', Name.Label), + + # Identifiers + (r'(%s)\b' % '|'.join(name_builtins), Name.Builtin), + (r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo), + (r'[a-zA-Z_]\w*', Name), + ], + 'whitespace': [ + (r'\n', Text), + (r'\s+', Text), + + include('comment'), + + # Preprocessor + (r'#\s*if\s+0', Comment.Preproc, 'if0'), + (r'#', Comment.Preproc, 'macro'), + ], + 'operators': [ + (r'(%s)\b' % '|'.join(operator_words), Operator.Word), + (r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator), + ], + 'comment': [ + (r'//(.*?)\n', Comment.Single), + (r'/[*]', Comment.Multiline, 'comment2'), + ], + 'comment2': [ + (r'[^/*]', Comment.Multiline), + (r'/[*]', Comment.Multiline, '#push'), + (r'[*]/', Comment.Multiline, '#pop'), + (r'[/*]', Comment.Multiline), + ], + 'if0': [ + (r'^\s*#if.*?(?]*?>)', + bygroups(Comment.Preproc, Text, String), '#pop'), + (r'(import|include)(\s+)("[^"]*?")', + bygroups(Comment.Preproc, Text, String), '#pop'), + (r"(import|include)(\s+)('[^']*?')", + bygroups(Comment.Preproc, Text, String), '#pop'), + (r'[^/\n]+', Comment.Preproc), + # (r'/[*](.|\n)*?[*]/', Comment), + # (r'//.*?\n', Comment, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'funcname': [ + include('whitespace'), + (r'[a-zA-Z_]\w*', Name.Function, '#pop'), + # anonymous functions + (r'(?=\()', Text, '#pop'), + ], + 'classname': [ + include('whitespace'), + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + # anonymous classes + (r'(?=\{)', Text, '#pop'), + ], + 'modulename': [ + include('whitespace'), + (r'\[', Punctuation, ('modulename2', 'tvarlist')), + default('modulename2'), + ], + 'modulename2': [ + include('whitespace'), + (r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'), + ], + 'tvarlist': [ + include('whitespace'), + include('operators'), + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), + (r',', Punctuation), + (r'(with|where)\b', Keyword), + (r'[a-zA-Z_]\w*', Name), + ], + 'stringescape': [ + (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' + r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) + ], + 'strings': [ + (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol), + (r'[^\\\'"%\n]+', String), + # quotes, percents and backslashes must be parsed one at a time + (r'[\'"\\]', String), + # unhandled string formatting sign + (r'%', String) + # newlines are an error (use "nl" state) + ], + 'nl': [ + (r'\n', String) + ], + 'dqs': [ + (r'"', String, '#pop'), + # included here again for raw strings + (r'\\\\|\\"|\\\n', String.Escape), + include('strings') + ], + 'sqs': [ + (r"'", String, '#pop'), + # included here again for raw strings + (r"\\\\|\\'|\\\n", String.Escape), + include('strings') + ], + 'tdqs': [ + (r'"""', String, '#pop'), + include('strings'), + include('nl') + ], + 'tsqs': [ + (r"'''", String, '#pop'), + include('strings'), + include('nl') + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/fortran.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/fortran.py new file mode 100644 index 0000000..3ef6ff4 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/fortran.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.fortran + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Fortran languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['FortranLexer'] + + +class FortranLexer(RegexLexer): + """ + Lexer for FORTRAN 90 code. + + .. versionadded:: 0.10 + """ + name = 'Fortran' + aliases = ['fortran'] + filenames = ['*.f', '*.f90', '*.F', '*.F90'] + mimetypes = ['text/x-fortran'] + flags = re.IGNORECASE | re.MULTILINE + + # Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION + # Operators: **, *, +, -, /, <, >, <=, >=, ==, /= + # Logical (?): NOT, AND, OR, EQV, NEQV + + # Builtins: + # http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html + + tokens = { + 'root': [ + (r'^#.*\n', Comment.Preproc), + (r'!.*\n', Comment), + include('strings'), + include('core'), + (r'[a-z][\w$]*', Name), + include('nums'), + (r'[\s]+', Text), + ], + 'core': [ + # Statements + (words(( + 'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE', + 'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND', + 'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE', + 'CODIMENSION', 'COMMON', 'CONCURRRENT', 'CONTIGUOUS', 'CONTAINS', + 'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE', + 'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END', + 'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'EXIT', 'EXTENDS', + 'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT', + 'FUNCTION', 'GENERIC', 'GOTO', 'IF', 'IMAGES', 'IMPLICIT', + 'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE', + 'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY', + 'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'OPEN', 'OPTIONAL', + 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE', + 'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ', + 'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE', + 'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES', + 'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE', + 'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'), + Keyword), + + # Data Types + (words(( + 'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER', + 'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG', 'C_SIGNED_CHAR', + 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T', 'C_INT64_T', 'C_INT_LEAST8_T', + 'C_INT_LEAST16_T', 'C_INT_LEAST32_T', 'C_INT_LEAST64_T', 'C_INT_FAST8_T', + 'C_INT_FAST16_T', 'C_INT_FAST32_T', 'C_INT_FAST64_T', 'C_INTMAX_T', + 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE', 'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', + 'C_DOUBLE_COMPLEX', 'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', + 'C_FUNPTR'), prefix=r'\b', suffix=r'\s*\b'), + Keyword.Type), + + # Operators + (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator), + + (r'(::)', Keyword.Declaration), + + (r'[()\[\],:&%;.]', Punctuation), + # Intrinsics + (words(( + 'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL', + 'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog', + 'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH', + 'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref', + 'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0', + 'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE', + 'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp', + 'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count', + 'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift', + 'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated', + 'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer', + 'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc', + 'C_Sizeof', 'C_New_Line', 'C_Carriage_Return', + 'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin', + 'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY', + 'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF', + 'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax', + 'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH', + 'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime', + 'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime', + 'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of', + 'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush', + 'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell', + 'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument', + 'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog', + 'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs', + 'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits', + 'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr', + 'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index', + 'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC', + 'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End', + 'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound', + 'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk', + 'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat', + 'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent', + 'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc', + 'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits', + 'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images', + 'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product', + 'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real', + 'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift', + 'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind', + 'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape', + 'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH', + 'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand', + 'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock', + 'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer', + 'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask', + 'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp', + 'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'), + Name.Builtin), + + # Booleans + (r'\.(true|false)\.', Name.Builtin), + # Comparing Operators + (r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word), + ], + + 'strings': [ + (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), + (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + ], + + 'nums': [ + (r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer), + (r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), + (r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/foxpro.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/foxpro.py similarity index 99% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/foxpro.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/foxpro.py index 51cd499..99a65ce 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/foxpro.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/foxpro.py @@ -5,7 +5,7 @@ Simple lexer for Microsoft Visual FoxPro source code. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -24,11 +24,11 @@ class FoxProLexer(RegexLexer): FoxPro syntax allows to shorten all keywords and function names to 4 characters. Shortened forms are not recognized by this lexer. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'FoxPro' - aliases = ['Clipper', 'XBase'] + aliases = ['foxpro', 'vfp', 'clipper', 'xbase'] filenames = ['*.PRG', '*.prg'] mimetype = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/functional.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/functional.py new file mode 100644 index 0000000..791e8b6 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/functional.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.functional + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \ + NewLispLexer +from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \ + KokaLexer +from pygments.lexers.theorem import CoqLexer +from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \ + ElixirConsoleLexer, ElixirLexer +from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/go.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/go.py new file mode 100644 index 0000000..11e2935 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/go.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.go + ~~~~~~~~~~~~~~~~~~ + + Lexers for the Google Go language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['GoLexer'] + + +class GoLexer(RegexLexer): + """ + For `Go `_ source. + + .. versionadded:: 1.2 + """ + name = 'Go' + filenames = ['*.go'] + aliases = ['go'] + mimetypes = ['text/x-gosrc'] + + flags = re.MULTILINE | re.UNICODE + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuations + (r'//(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'(import|package)\b', Keyword.Namespace), + (r'(var|func|struct|map|chan|type|interface|const)\b', + Keyword.Declaration), + (words(( + 'break', 'default', 'select', 'case', 'defer', 'go', + 'else', 'goto', 'switch', 'fallthrough', 'if', 'range', + 'continue', 'for', 'return'), suffix=r'\b'), + Keyword), + (r'(true|false|iota|nil)\b', Keyword.Constant), + # It seems the builtin types aren't actually keywords, but + # can be used as functions. So we need two declarations. + (words(( + 'uint', 'uint8', 'uint16', 'uint32', 'uint64', + 'int', 'int8', 'int16', 'int32', 'int64', + 'float', 'float32', 'float64', + 'complex64', 'complex128', 'byte', 'rune', + 'string', 'bool', 'error', 'uintptr', + 'print', 'println', 'panic', 'recover', 'close', 'complex', + 'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete', + 'new', 'make'), suffix=r'\b(\()'), + bygroups(Name.Builtin, Punctuation)), + (words(( + 'uint', 'uint8', 'uint16', 'uint32', 'uint64', + 'int', 'int8', 'int16', 'int32', 'int64', + 'float', 'float32', 'float64', + 'complex64', 'complex128', 'byte', 'rune', + 'string', 'bool', 'error', 'uintptr'), suffix=r'\b'), + Keyword.Type), + # imaginary_lit + (r'\d+i', Number), + (r'\d+\.\d*([Ee][-+]\d+)?i', Number), + (r'\.\d+([Ee][-+]\d+)?i', Number), + (r'\d+[Ee][-+]\d+i', Number), + # float_lit + (r'\d+(\.\d+[eE][+\-]?\d+|' + r'\.\d*|[eE][+\-]?\d+)', Number.Float), + (r'\.\d+([eE][+\-]?\d+)?', Number.Float), + # int_lit + # -- octal_lit + (r'0[0-7]+', Number.Oct), + # -- hex_lit + (r'0[xX][0-9a-fA-F]+', Number.Hex), + # -- decimal_lit + (r'(0|[1-9][0-9]*)', Number.Integer), + # char_lit + (r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""", + String.Char), + # StringLiteral + # -- raw_string_lit + (r'`[^`]*`', String), + # -- interpreted_string_lit + (r'"(\\\\|\\"|[^"])*"', String), + # Tokens + (r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|' + r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator), + (r'[|^<>=!()\[\]{}.,;:]', Punctuation), + # identifier + (r'[^\W\d]\w*', Name.Other), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/graph.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/graph.py new file mode 100644 index 0000000..6137363 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/graph.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.graph + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for graph query languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, this +from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\ + String, Number, Whitespace + + +__all__ = ['CypherLexer'] + + +class CypherLexer(RegexLexer): + """ + For `Cypher Query Language + `_ + + For the Cypher version in Neo4J 2.0 + + .. versionadded:: 2.0 + """ + name = 'Cypher' + aliases = ['cypher'] + filenames = ['*.cyp', '*.cypher'] + + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + include('comment'), + include('keywords'), + include('clauses'), + include('relations'), + include('strings'), + include('whitespace'), + include('barewords'), + ], + 'comment': [ + (r'^.*//.*\n', Comment.Single), + ], + 'keywords': [ + (r'(create|order|match|limit|set|skip|start|return|with|where|' + r'delete|foreach|not|by)\b', Keyword), + ], + 'clauses': [ + # TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/ + (r'(all|any|as|asc|create|create\s+unique|delete|' + r'desc|distinct|foreach|in|is\s+null|limit|match|none|' + r'order\s+by|return|set|skip|single|start|union|where|with)\b', + Keyword), + ], + 'relations': [ + (r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)), + (r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)), + (r'-->|<--|\[|\]', Operator), + (r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation), + (r'[.*{}]', Punctuation), + ], + 'strings': [ + (r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String), + (r'`(?:``|[^`])+`', Name.Variable), + ], + 'whitespace': [ + (r'\s+', Whitespace), + ], + 'barewords': [ + (r'[a-z]\w*', Name), + (r'\d+', Number), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/graphics.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/graphics.py new file mode 100644 index 0000000..0b8bf5d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/graphics.py @@ -0,0 +1,553 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.graphics + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for computer graphics and plotting related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words, include, bygroups, using, \ + this, default +from pygments.token import Text, Comment, Operator, Keyword, Name, \ + Number, Punctuation, String + +__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer', + 'PovrayLexer'] + + +class GLShaderLexer(RegexLexer): + """ + GLSL (OpenGL Shader) lexer. + + .. versionadded:: 1.1 + """ + name = 'GLSL' + aliases = ['glsl'] + filenames = ['*.vert', '*.frag', '*.geo'] + mimetypes = ['text/x-glslsrc'] + + tokens = { + 'root': [ + (r'^#.*', Comment.Preproc), + (r'//.*', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?', + Operator), + (r'[?:]', Operator), # quick hack for ternary + (r'\bdefined\b', Operator), + (r'[;{}(),\[\]]', Punctuation), + # FIXME when e is present, no decimal point needed + (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float), + (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float), + (r'0[xX][0-9a-fA-F]*', Number.Hex), + (r'0[0-7]*', Number.Oct), + (r'[1-9][0-9]*', Number.Integer), + (words(( + 'attribute', 'const', 'uniform', 'varying', 'centroid', 'break', + 'continue', 'do', 'for', 'while', 'if', 'else', 'in', 'out', + 'inout', 'float', 'int', 'void', 'bool', 'true', 'false', + 'invariant', 'discard', 'return', 'mat2', 'mat3' 'mat4', + 'mat2x2', 'mat3x2', 'mat4x2', 'mat2x3', 'mat3x3', 'mat4x3', + 'mat2x4', 'mat3x4', 'mat4x4', 'vec2', 'vec3', 'vec4', + 'ivec2', 'ivec3', 'ivec4', 'bvec2', 'bvec3', 'bvec4', + 'sampler1D', 'sampler2D', 'sampler3D' 'samplerCube', + 'sampler1DShadow', 'sampler2DShadow', 'struct'), + prefix=r'\b', suffix=r'\b'), + Keyword), + (words(( + 'asm', 'class', 'union', 'enum', 'typedef', 'template', 'this', + 'packed', 'goto', 'switch', 'default', 'inline', 'noinline', + 'volatile', 'public', 'static', 'extern', 'external', 'interface', + 'long', 'short', 'double', 'half', 'fixed', 'unsigned', 'lowp', + 'mediump', 'highp', 'precision', 'input', 'output', + 'hvec2', 'hvec3', 'hvec4', 'dvec2', 'dvec3', 'dvec4', + 'fvec2', 'fvec3', 'fvec4', 'sampler2DRect', 'sampler3DRect', + 'sampler2DRectShadow', 'sizeof', 'cast', 'namespace', 'using'), + prefix=r'\b', suffix=r'\b'), + Keyword), # future use + (r'[a-zA-Z_]\w*', Name), + (r'\.', Punctuation), + (r'\s+', Text), + ], + } + + +class PostScriptLexer(RegexLexer): + """ + Lexer for PostScript files. + + The PostScript Language Reference published by Adobe at + + is the authority for this. + + .. versionadded:: 1.4 + """ + name = 'PostScript' + aliases = ['postscript', 'postscr'] + filenames = ['*.ps', '*.eps'] + mimetypes = ['application/postscript'] + + delimiter = r'()<>\[\]{}/%\s' + delimiter_end = r'(?=[%s])' % delimiter + + valid_name_chars = r'[^%s]' % delimiter + valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) + + tokens = { + 'root': [ + # All comment types + (r'^%!.+\n', Comment.Preproc), + (r'%%.*\n', Comment.Special), + (r'(^%.*\n){2,}', Comment.Multiline), + (r'%.*\n', Comment.Single), + + # String literals are awkward; enter separate state. + (r'\(', String, 'stringliteral'), + + (r'[{}<>\[\]]', Punctuation), + + # Numbers + (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex), + # Slight abuse: use Oct to signify any explicit base system + (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)' + r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct), + (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + + delimiter_end, Number.Float), + (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer), + + # References + (r'\/%s' % valid_name, Name.Variable), + + # Names + (valid_name, Name.Function), # Anything else is executed + + # These keywords taken from + # + # Is there an authoritative list anywhere that doesn't involve + # trawling documentation? + + (r'(false|true)' + delimiter_end, Keyword.Constant), + + # Conditionals / flow control + (r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)' + + delimiter_end, Keyword.Reserved), + + (words(( + 'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin', + 'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat', + 'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix', + 'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix', + 'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end', + 'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get', + 'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv', + 'idtransform', 'index', 'invertmatrix', 'itransform', 'length', + 'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto', + 'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print', + 'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat', + 'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run', + 'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray', + 'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix', + 'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt', + 'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror', + 'transform', 'translate', 'truncate', 'typecheck', 'undefined', + 'undefinedfilename', 'undefinedresult'), suffix=delimiter_end), + Name.Builtin), + + (r'\s+', Text), + ], + + 'stringliteral': [ + (r'[^()\\]+', String), + (r'\\', String.Escape, 'escape'), + (r'\(', String, '#push'), + (r'\)', String, '#pop'), + ], + + 'escape': [ + (r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'), + default('#pop'), + ], + } + + +class AsymptoteLexer(RegexLexer): + """ + For `Asymptote `_ source code. + + .. versionadded:: 1.2 + """ + name = 'Asymptote' + aliases = ['asy', 'asymptote'] + filenames = ['*.asy'] + mimetypes = ['text/x-asymptote'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' + + tokens = { + 'whitespace': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), + ], + 'statements': [ + # simple string (TeX friendly) + (r'"(\\\\|\\"|[^"])*"', String), + # C style string (with character escapes) + (r"'", String, 'string'), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), + (r'0[0-7]+[Ll]?', Number.Oct), + (r'\d+[Ll]?', Number.Integer), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.]', Punctuation), + (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), + (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' + r'return|break|continue|struct|typedef|new|access|import|' + r'unravel|from|include|quote|static|public|private|restricted|' + r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), + # Since an asy-type-name can be also an asy-function-name, + # in the following we test if the string " [a-zA-Z]" follows + # the Keyword.Type. + # Of course it is not perfect ! + (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' + r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' + r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' + r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' + r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' + r'path3|pen|picture|point|position|projection|real|revolution|' + r'scaleT|scientific|segment|side|slice|splitface|string|surface|' + r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' + r'transformation|tree|triangle|trilinear|triple|vector|' + r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type), + # Now the asy-type-name which are not asy-function-name + # except yours ! + # Perhaps useless + (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' + r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' + r'picture|position|real|revolution|slice|splitface|ticksgridT|' + r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), + ('[a-zA-Z_]\w*:(?!:)', Name.Label), + ('[a-zA-Z_]\w*', Name), + ], + 'root': [ + include('whitespace'), + # functions + (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')(\{)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation), + 'function'), + # function declarations + (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments + r'([a-zA-Z_]\w*)' # method name + r'(\s*\([^;]*?\))' # signature + r'(' + _ws + r')(;)', + bygroups(using(this), Name.Function, using(this), using(this), + Punctuation)), + default('statement'), + ], + 'statement': [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'function': [ + include('whitespace'), + include('statements'), + (';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'string': [ + (r"'", String, '#pop'), + (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'\n', String), + (r"[^\\'\n]+", String), # all other characters + (r'\\\n', String), + (r'\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + } + + def get_tokens_unprocessed(self, text): + from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name and value in ASYFUNCNAME: + token = Name.Function + elif token is Name and value in ASYVARNAME: + token = Name.Variable + yield index, token, value + + +def _shortened(word): + dpos = word.find('$') + return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b' + for i in range(len(word), dpos, -1)) + + +def _shortened_many(*words): + return '|'.join(map(_shortened, words)) + + +class GnuplotLexer(RegexLexer): + """ + For `Gnuplot `_ plotting scripts. + + .. versionadded:: 0.11 + """ + + name = 'Gnuplot' + aliases = ['gnuplot'] + filenames = ['*.plot', '*.plt'] + mimetypes = ['text/x-gnuplot'] + + tokens = { + 'root': [ + include('whitespace'), + (_shortened('bi$nd'), Keyword, 'bind'), + (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), + (_shortened('f$it'), Keyword, 'fit'), + (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), + (r'else\b', Keyword), + (_shortened('pa$use'), Keyword, 'pause'), + (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), + (_shortened('sa$ve'), Keyword, 'save'), + (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), + (_shortened_many('sh$ow', 'uns$et'), + Keyword, ('noargs', 'optionarg')), + (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', + 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', + 'pwd$', 're$read', 'res$et', 'scr$eendump', + 'she$ll', 'sy$stem', 'up$date'), + Keyword, 'genericargs'), + (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', + 'she$ll', 'test$'), + Keyword, 'noargs'), + ('([a-zA-Z_]\w*)(\s*)(=)', + bygroups(Name.Variable, Text, Operator), 'genericargs'), + ('([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)', + bygroups(Name.Function, Text, Operator), 'genericargs'), + (r'@[a-zA-Z_]\w*', Name.Constant), # macros + (r';', Keyword), + ], + 'comment': [ + (r'[^\\\n]', Comment), + (r'\\\n', Comment), + (r'\\', Comment), + # don't add the newline to the Comment token + default('#pop'), + ], + 'whitespace': [ + ('#', Comment, 'comment'), + (r'[ \t\v\f]+', Text), + ], + 'noargs': [ + include('whitespace'), + # semicolon and newline end the argument list + (r';', Punctuation, '#pop'), + (r'\n', Text, '#pop'), + ], + 'dqstring': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + (r'\n', String, '#pop'), # newline ends the string too + ], + 'sqstring': [ + (r"''", String), # escaped single quote + (r"'", String, '#pop'), + (r"[^\\'\n]+", String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # normal backslash + (r'\n', String, '#pop'), # newline ends the string too + ], + 'genericargs': [ + include('noargs'), + (r'"', String, 'dqstring'), + (r"'", String, 'sqstring'), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), + (r'(\d+\.\d*|\.\d+)', Number.Float), + (r'-?\d+', Number.Integer), + ('[,.~!%^&*+=|?:<>/-]', Operator), + ('[{}()\[\]]', Punctuation), + (r'(eq|ne)\b', Operator.Word), + (r'([a-zA-Z_]\w*)(\s*)(\()', + bygroups(Name.Function, Text, Punctuation)), + (r'[a-zA-Z_]\w*', Name), + (r'@[a-zA-Z_]\w*', Name.Constant), # macros + (r'\\\n', Text), + ], + 'optionarg': [ + include('whitespace'), + (_shortened_many( + "a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der", + "box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta", + "data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign", + "fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid", + "hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle", + "la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale", + "mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin", + "rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot", + "mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics", + "nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics", + "mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput", + "pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot", + "poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze", + "st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs", + "ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le", + "v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta", + "yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel", + "yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs", + "x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs", + "zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs", + "x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs", + "noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs", + "xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs", + "noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs", + "cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange", + "y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange", + "vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis", + "zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'), + ], + 'bind': [ + ('!', Keyword, '#pop'), + (_shortened('all$windows'), Name.Builtin), + include('genericargs'), + ], + 'quit': [ + (r'gnuplot\b', Keyword), + include('noargs'), + ], + 'fit': [ + (r'via\b', Name.Builtin), + include('plot'), + ], + 'if': [ + (r'\)', Punctuation, '#pop'), + include('genericargs'), + ], + 'pause': [ + (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), + (_shortened('key$press'), Name.Builtin), + include('genericargs'), + ], + 'plot': [ + (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', + 'mat$rix', 's$mooth', 'thru$', 't$itle', + 'not$itle', 'u$sing', 'w$ith'), + Name.Builtin), + include('genericargs'), + ], + 'save': [ + (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), + Name.Builtin), + include('genericargs'), + ], + } + + +class PovrayLexer(RegexLexer): + """ + For `Persistence of Vision Raytracer `_ files. + + .. versionadded:: 0.11 + """ + name = 'POVRay' + aliases = ['pov'] + filenames = ['*.pov', '*.inc'] + mimetypes = ['text/x-povray'] + + tokens = { + 'root': [ + (r'/\*[\w\W]*?\*/', Comment.Multiline), + (r'//.*\n', Comment.Single), + (r'(?s)"(?:\\.|[^"\\])+"', String.Double), + (words(( + 'break', 'case', 'debug', 'declare', 'default', 'define', 'else', + 'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef', + 'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render', + 'statistics', 'switch', 'undef', 'version', 'warning', 'while', + 'write'), prefix=r'#', suffix=r'\b'), + Comment.Preproc), + (words(( + 'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout', + 'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle', + 'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma', + 'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation', + 'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples', + 'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size', + 'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map', + 'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock', + 'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat', + 'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh', + 'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping', + 'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction', + 'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity', + 'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent', + 'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false', + 'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor', + 'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif', + 'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold', + 'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex', + 'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate', + 'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard', + 'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like', + 'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map', + 'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level', + 'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar', + 'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves', + 'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open', + 'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3', + 'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment', + 'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm', + 'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color', + 'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow', + 'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red', + 'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt', + 'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave', + 'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere', + 'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral', + 'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str', + 'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys', + 't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3', + 'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold', + 'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit', + 'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type', + 'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps', + 'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength', + 'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light', + 'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width', + 'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'), + Keyword), + (words(( + 'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference', + 'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe', + 'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism', + 'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid', + 'text', 'torus', 'triangle', 'union'), suffix=r'\b'), + Name.Builtin), + # TODO: <=, etc + (r'[\[\](){}<>;,]', Punctuation), + (r'[-+*/=]', Operator), + (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), + (r'[a-zA-Z_]\w*', Name), + (r'[0-9]+\.[0-9]*', Number.Float), + (r'\.[0-9]+', Number.Float), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String), + (r'\s+', Text), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/haskell.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/haskell.py new file mode 100644 index 0000000..089cdf4 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/haskell.py @@ -0,0 +1,840 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.haskell + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Haskell and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \ + default, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic +from pygments import unistring as uni + +__all__ = ['HaskellLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer', + 'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer', + 'LiterateCryptolLexer', 'KokaLexer'] + + +line_re = re.compile('.*?\n') + + +class HaskellLexer(RegexLexer): + """ + A Haskell lexer based on the lexemes defined in the Haskell 98 Report. + + .. versionadded:: 0.8 + """ + name = 'Haskell' + aliases = ['haskell', 'hs'] + filenames = ['*.hs'] + mimetypes = ['text/x-haskell'] + + flags = re.MULTILINE | re.UNICODE + + reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else', + 'if', 'in', 'infix[lr]?', 'instance', + 'let', 'newtype', 'of', 'then', 'type', 'where', '_') + ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', + 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', + 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', + 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') + + tokens = { + 'root': [ + # Whitespace: + (r'\s+', Text), + # (r'--\s*|.*$', Comment.Doc), + (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + # Lexemes: + # Identifiers + (r'\bimport\b', Keyword.Reserved, 'import'), + (r'\bmodule\b', Keyword.Reserved, 'module'), + (r'\berror\b', Name.Exception), + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r"'[^\\]'", String.Char), # this has to come before the TH quote + (r'^[_' + uni.Ll + r'][\w\']*', Name.Function), + (r"'?[_" + uni.Ll + r"][\w']*", Name), + (r"('')?[" + uni.Lu + r"][\w\']*", Keyword.Type), + # Operators + (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator + (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials + (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[oO][0-7]+', Number.Oct), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Character/String Literals + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + # Special + (r'\[\]', Keyword.Type), + (r'\(\)', Name.Builtin), + (r'[][(),;`{}]', Punctuation), + ], + 'import': [ + # Import statements + (r'\s+', Text), + (r'"', String, 'string'), + # after "funclist" state + (r'\)', Punctuation, '#pop'), + (r'qualified\b', Keyword), + # import X as Y + (r'([' + uni.Lu + r'][\w.]*)(\s+)(as)(\s+)([' + uni.Lu + r'][\w.]*)', + bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'), + # import X hiding (functions) + (r'([' + uni.Lu + r'][\w.]*)(\s+)(hiding)(\s+)(\()', + bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'), + # import X (functions) + (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + # import X + (r'[\w.]+', Name.Namespace, '#pop'), + ], + 'module': [ + (r'\s+', Text), + (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + (r'[' + uni.Lu + r'][\w.]*', Name.Namespace, '#pop'), + ], + 'funclist': [ + (r'\s+', Text), + (r'[' + uni.Lu + r']\w*', Keyword.Type), + (r'(_[\w\']+|[' + uni.Ll + r'][\w\']*)', Name.Function), + (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + (r',', Punctuation), + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), + # (HACK, but it makes sense to push two instances, believe me) + (r'\(', Punctuation, ('funclist', 'funclist')), + (r'\)', Punctuation, '#pop:2'), + ], + # NOTE: the next four states are shared in the AgdaLexer; make sure + # any change is compatible with Agda as well or copy over and change + 'comment': [ + # Multiline Comments + (r'[^-{}]+', Comment.Multiline), + (r'\{-', Comment.Multiline, '#push'), + (r'-\}', Comment.Multiline, '#pop'), + (r'[-{}]', Comment.Multiline), + ], + 'character': [ + # Allows multi-chars, incorrectly. + (r"[^\\']'", String.Char, '#pop'), + (r"\\", String.Escape, 'escape'), + ("'", String.Char, '#pop'), + ], + 'string': [ + (r'[^\\"]+', String), + (r"\\", String.Escape, 'escape'), + ('"', String, '#pop'), + ], + 'escape': [ + (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), + (r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'), + ('|'.join(ascii), String.Escape, '#pop'), + (r'o[0-7]+', String.Escape, '#pop'), + (r'x[\da-fA-F]+', String.Escape, '#pop'), + (r'\d+', String.Escape, '#pop'), + (r'\s+\\', String.Escape, '#pop'), + ], + } + + +class IdrisLexer(RegexLexer): + """ + A lexer for the dependently typed programming language Idris. + + Based on the Haskell and Agda Lexer. + + .. versionadded:: 2.0 + """ + name = 'Idris' + aliases = ['idris', 'idr'] + filenames = ['*.idr'] + mimetypes = ['text/x-idris'] + + reserved = ('case', 'class', 'data', 'default', 'using', 'do', 'else', + 'if', 'in', 'infix[lr]?', 'instance', 'rewrite', 'auto', + 'namespace', 'codata', 'mutual', 'private', 'public', 'abstract', + 'total', 'partial', + 'let', 'proof', 'of', 'then', 'static', 'where', '_', 'with', + 'pattern', 'term', 'syntax', 'prefix', + 'postulate', 'parameters', 'record', 'dsl', 'impossible', 'implicit', + 'tactics', 'intros', 'intro', 'compute', 'refine', 'exact', 'trivial') + + ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', + 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', + 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', + 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') + + directives = ('lib', 'link', 'flag', 'include', 'hide', 'freeze', 'access', + 'default', 'logging', 'dynamic', 'name', 'error_handlers', 'language') + + tokens = { + 'root': [ + # Comments + (r'^(\s*)(%%%s)' % '|'.join(directives), + bygroups(Text, Keyword.Reserved)), + (r'(\s*)(--(?![!#$%&*+./<=>?@^|_~:\\]).*?)$', bygroups(Text, Comment.Single)), + (r'(\s*)(\|{3}.*?)$', bygroups(Text, Comment.Single)), + (r'(\s*)(\{-)', bygroups(Text, Comment.Multiline), 'comment'), + # Declaration + (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)', + bygroups(Text, Name.Function, Text, Operator.Word, Text)), + # Identifiers + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'), + (r"('')?[A-Z][\w\']*", Keyword.Type), + (r'[a-z][\w\']*', Text), + # Special Symbols + (r'(<-|::|->|=>|=)', Operator.Word), # specials + (r'([(){}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Strings + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + (r'[^\s(){}]+', Text), + (r'\s+?', Text), # Whitespace + ], + 'module': [ + (r'\s+', Text), + (r'([A-Z][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + (r'[A-Z][\w.]*', Name.Namespace, '#pop'), + ], + 'funclist': [ + (r'\s+', Text), + (r'[A-Z]\w*', Keyword.Type), + (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), + (r'--.*$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + (r',', Punctuation), + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), + # (HACK, but it makes sense to push two instances, believe me) + (r'\(', Punctuation, ('funclist', 'funclist')), + (r'\)', Punctuation, '#pop:2'), + ], + # NOTE: the next four states are shared in the AgdaLexer; make sure + # any change is compatible with Agda as well or copy over and change + 'comment': [ + # Multiline Comments + (r'[^-{}]+', Comment.Multiline), + (r'\{-', Comment.Multiline, '#push'), + (r'-\}', Comment.Multiline, '#pop'), + (r'[-{}]', Comment.Multiline), + ], + 'character': [ + # Allows multi-chars, incorrectly. + (r"[^\\']", String.Char), + (r"\\", String.Escape, 'escape'), + ("'", String.Char, '#pop'), + ], + 'string': [ + (r'[^\\"]+', String), + (r"\\", String.Escape, 'escape'), + ('"', String, '#pop'), + ], + 'escape': [ + (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), + (r'\^[][A-Z@^_]', String.Escape, '#pop'), + ('|'.join(ascii), String.Escape, '#pop'), + (r'o[0-7]+', String.Escape, '#pop'), + (r'x[\da-fA-F]+', String.Escape, '#pop'), + (r'\d+', String.Escape, '#pop'), + (r'\s+\\', String.Escape, '#pop') + ], + } + + +class AgdaLexer(RegexLexer): + """ + For the `Agda `_ + dependently typed functional programming language and proof assistant. + + .. versionadded:: 2.0 + """ + + name = 'Agda' + aliases = ['agda'] + filenames = ['*.agda'] + mimetypes = ['text/x-agda'] + + reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data', + 'field', 'forall', 'hiding', 'in', 'inductive', 'infix', + 'infixl', 'infixr', 'instance', 'let', 'mutual', 'open', + 'pattern', 'postulate', 'primitive', 'private', + 'quote', 'quoteGoal', 'quoteTerm', + 'record', 'renaming', 'rewrite', 'syntax', 'tactic', + 'unquote', 'unquoteDecl', 'using', 'where', 'with'] + + tokens = { + 'root': [ + # Declaration + (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)', + bygroups(Text, Name.Function, Text, Operator.Word, Text)), + # Comments + (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + (r'\{-', Comment.Multiline, 'comment'), + # Holes + (r'\{!', Comment.Directive, 'hole'), + # Lexemes: + # Identifiers + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'), + (r'\b(Set|Prop)\b', Keyword.Type), + # Special Symbols + (r'(\(|\)|\{|\})', Operator), + (u'(\\.{1,3}|\\||\u039B|\u2200|\u2192|:|=|->)', Operator.Word), + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Strings + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + (r'[^\s(){}]+', Text), + (r'\s+?', Text), # Whitespace + ], + 'hole': [ + # Holes + (r'[^!{}]+', Comment.Directive), + (r'\{!', Comment.Directive, '#push'), + (r'!\}', Comment.Directive, '#pop'), + (r'[!{}]', Comment.Directive), + ], + 'module': [ + (r'\{-', Comment.Multiline, 'comment'), + (r'[a-zA-Z][\w.]*', Name, '#pop'), + (r'[^a-zA-Z]+', Text) + ], + 'comment': HaskellLexer.tokens['comment'], + 'character': HaskellLexer.tokens['character'], + 'string': HaskellLexer.tokens['string'], + 'escape': HaskellLexer.tokens['escape'] + } + + +class CryptolLexer(RegexLexer): + """ + FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report. + + .. versionadded:: 2.0 + """ + name = 'Cryptol' + aliases = ['cryptol', 'cry'] + filenames = ['*.cry'] + mimetypes = ['text/x-cryptol'] + + reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else', + 'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2', + 'max', 'min', 'module', 'newtype', 'pragma', 'property', + 'then', 'type', 'where', 'width') + ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', + 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', + 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', + 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') + + tokens = { + 'root': [ + # Whitespace: + (r'\s+', Text), + # (r'--\s*|.*$', Comment.Doc), + (r'//.*$', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + # Lexemes: + # Identifiers + (r'\bimport\b', Keyword.Reserved, 'import'), + (r'\bmodule\b', Keyword.Reserved, 'module'), + (r'\berror\b', Name.Exception), + (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), + (r'^[_a-z][\w\']*', Name.Function), + (r"'?[_a-z][\w']*", Name), + (r"('')?[A-Z][\w\']*", Keyword.Type), + # Operators + (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator + (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials + (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators + # Numbers + (r'\d+[eE][+-]?\d+', Number.Float), + (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), + (r'0[oO][0-7]+', Number.Oct), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + # Character/String Literals + (r"'", String.Char, 'character'), + (r'"', String, 'string'), + # Special + (r'\[\]', Keyword.Type), + (r'\(\)', Name.Builtin), + (r'[][(),;`{}]', Punctuation), + ], + 'import': [ + # Import statements + (r'\s+', Text), + (r'"', String, 'string'), + # after "funclist" state + (r'\)', Punctuation, '#pop'), + (r'qualified\b', Keyword), + # import X as Y + (r'([A-Z][\w.]*)(\s+)(as)(\s+)([A-Z][\w.]*)', + bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'), + # import X hiding (functions) + (r'([A-Z][\w.]*)(\s+)(hiding)(\s+)(\()', + bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'), + # import X (functions) + (r'([A-Z][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + # import X + (r'[\w.]+', Name.Namespace, '#pop'), + ], + 'module': [ + (r'\s+', Text), + (r'([A-Z][\w.]*)(\s+)(\()', + bygroups(Name.Namespace, Text, Punctuation), 'funclist'), + (r'[A-Z][\w.]*', Name.Namespace, '#pop'), + ], + 'funclist': [ + (r'\s+', Text), + (r'[A-Z]\w*', Keyword.Type), + (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), + # TODO: these don't match the comments in docs, remove. + #(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), + #(r'{-', Comment.Multiline, 'comment'), + (r',', Punctuation), + (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), + # (HACK, but it makes sense to push two instances, believe me) + (r'\(', Punctuation, ('funclist', 'funclist')), + (r'\)', Punctuation, '#pop:2'), + ], + 'comment': [ + # Multiline Comments + (r'[^/*]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'character': [ + # Allows multi-chars, incorrectly. + (r"[^\\']'", String.Char, '#pop'), + (r"\\", String.Escape, 'escape'), + ("'", String.Char, '#pop'), + ], + 'string': [ + (r'[^\\"]+', String), + (r"\\", String.Escape, 'escape'), + ('"', String, '#pop'), + ], + 'escape': [ + (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), + (r'\^[][A-Z@^_]', String.Escape, '#pop'), + ('|'.join(ascii), String.Escape, '#pop'), + (r'o[0-7]+', String.Escape, '#pop'), + (r'x[\da-fA-F]+', String.Escape, '#pop'), + (r'\d+', String.Escape, '#pop'), + (r'\s+\\', String.Escape, '#pop'), + ], + } + + EXTRA_KEYWORDS = set(('join', 'split', 'reverse', 'transpose', 'width', + 'length', 'tail', '<<', '>>', '<<<', '>>>', 'const', + 'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error', + 'trace')) + + def get_tokens_unprocessed(self, text): + stack = ['root'] + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text, stack): + if token is Name and value in self.EXTRA_KEYWORDS: + yield index, Name.Builtin, value + else: + yield index, token, value + + +class LiterateLexer(Lexer): + """ + Base class for lexers of literate file formats based on LaTeX or Bird-style + (prefixing each code line with ">"). + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + """ + + bird_re = re.compile(r'(>[ \t]*)(.*\n)') + + def __init__(self, baselexer, **options): + self.baselexer = baselexer + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + style = self.options.get('litstyle') + if style is None: + style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird' + + code = '' + insertions = [] + if style == 'bird': + # bird-style + for match in line_re.finditer(text): + line = match.group() + m = self.bird_re.match(line) + if m: + insertions.append((len(code), + [(0, Comment.Special, m.group(1))])) + code += m.group(2) + else: + insertions.append((len(code), [(0, Text, line)])) + else: + # latex-style + from pygments.lexers.markup import TexLexer + lxlexer = TexLexer(**self.options) + codelines = 0 + latex = '' + for match in line_re.finditer(text): + line = match.group() + if codelines: + if line.lstrip().startswith('\\end{code}'): + codelines = 0 + latex += line + else: + code += line + elif line.lstrip().startswith('\\begin{code}'): + codelines = 1 + latex += line + insertions.append((len(code), + list(lxlexer.get_tokens_unprocessed(latex)))) + latex = '' + else: + latex += line + insertions.append((len(code), + list(lxlexer.get_tokens_unprocessed(latex)))) + for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)): + yield item + + +class LiterateHaskellLexer(LiterateLexer): + """ + For Literate Haskell (Bird-style or LaTeX) source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 0.9 + """ + name = 'Literate Haskell' + aliases = ['lhs', 'literate-haskell', 'lhaskell'] + filenames = ['*.lhs'] + mimetypes = ['text/x-literate-haskell'] + + def __init__(self, **options): + hslexer = HaskellLexer(**options) + LiterateLexer.__init__(self, hslexer, **options) + + +class LiterateIdrisLexer(LiterateLexer): + """ + For Literate Idris (Bird-style or LaTeX) source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 2.0 + """ + name = 'Literate Idris' + aliases = ['lidr', 'literate-idris', 'lidris'] + filenames = ['*.lidr'] + mimetypes = ['text/x-literate-idris'] + + def __init__(self, **options): + hslexer = IdrisLexer(**options) + LiterateLexer.__init__(self, hslexer, **options) + + +class LiterateAgdaLexer(LiterateLexer): + """ + For Literate Agda source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 2.0 + """ + name = 'Literate Agda' + aliases = ['lagda', 'literate-agda'] + filenames = ['*.lagda'] + mimetypes = ['text/x-literate-agda'] + + def __init__(self, **options): + agdalexer = AgdaLexer(**options) + LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options) + + +class LiterateCryptolLexer(LiterateLexer): + """ + For Literate Cryptol (Bird-style or LaTeX) source. + + Additional options accepted: + + `litstyle` + If given, must be ``"bird"`` or ``"latex"``. If not given, the style + is autodetected: if the first non-whitespace character in the source + is a backslash or percent character, LaTeX is assumed, else Bird. + + .. versionadded:: 2.0 + """ + name = 'Literate Cryptol' + aliases = ['lcry', 'literate-cryptol', 'lcryptol'] + filenames = ['*.lcry'] + mimetypes = ['text/x-literate-cryptol'] + + def __init__(self, **options): + crylexer = CryptolLexer(**options) + LiterateLexer.__init__(self, crylexer, **options) + + +class KokaLexer(RegexLexer): + """ + Lexer for the `Koka `_ + language. + + .. versionadded:: 1.6 + """ + + name = 'Koka' + aliases = ['koka'] + filenames = ['*.kk', '*.kki'] + mimetypes = ['text/x-koka'] + + keywords = [ + 'infix', 'infixr', 'infixl', + 'type', 'cotype', 'rectype', 'alias', + 'struct', 'con', + 'fun', 'function', 'val', 'var', + 'external', + 'if', 'then', 'else', 'elif', 'return', 'match', + 'private', 'public', 'private', + 'module', 'import', 'as', + 'include', 'inline', + 'rec', + 'try', 'yield', 'enum', + 'interface', 'instance', + ] + + # keywords that are followed by a type + typeStartKeywords = [ + 'type', 'cotype', 'rectype', 'alias', 'struct', 'enum', + ] + + # keywords valid in a type + typekeywords = [ + 'forall', 'exists', 'some', 'with', + ] + + # builtin names and special names + builtin = [ + 'for', 'while', 'repeat', + 'foreach', 'foreach-indexed', + 'error', 'catch', 'finally', + 'cs', 'js', 'file', 'ref', 'assigned', + ] + + # symbols that can be in an operator + symbols = r'[$%&*+@!/\\^~=.:\-?|<>]+' + + # symbol boundary: an operator keyword should not be followed by any of these + sboundary = '(?!'+symbols+')' + + # name boundary: a keyword should not be followed by any of these + boundary = '(?![\w/])' + + # koka token abstractions + tokenType = Name.Attribute + tokenTypeDef = Name.Class + tokenConstructor = Generic.Emph + + # main lexer + tokens = { + 'root': [ + include('whitespace'), + + # go into type mode + (r'::?' + sboundary, tokenType, 'type'), + (r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), + 'alias-type'), + (r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), + 'struct-type'), + ((r'(%s)' % '|'.join(typeStartKeywords)) + + r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef), + 'type'), + + # special sequences of tokens (we use ?: for non-capturing group as + # required by 'bygroups') + (r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)', + bygroups(Keyword, Text, Keyword, Name.Namespace)), + (r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)' + r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)' + r'((?:[a-z]\w*/)*[a-z]\w*))?', + bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text, + Keyword, Name.Namespace)), + + (r'(^(?:(?:public|private)\s*)?(?:function|fun|val))' + r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))', + bygroups(Keyword, Text, Name.Function)), + (r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?' + r'([a-z]\w*|\((?:' + symbols + r'|/)\))', + bygroups(Keyword, Text, Keyword, Name.Function)), + + # keywords + (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type), + (r'(%s)' % '|'.join(keywords) + boundary, Keyword), + (r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo), + (r'::?|:=|\->|[=.]' + sboundary, Keyword), + + # names + (r'((?:[a-z]\w*/)*)([A-Z]\w*)', + bygroups(Name.Namespace, tokenConstructor)), + (r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)), + (r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))', + bygroups(Name.Namespace, Name)), + (r'_\w*', Name.Variable), + + # literal string + (r'@"', String.Double, 'litstring'), + + # operators + (symbols + "|/(?![*/])", Operator), + (r'`', Operator), + (r'[{}()\[\];,]', Punctuation), + + # literals. No check for literal characters with len > 1 + (r'[0-9]+\.[0-9]+([eE][\-+]?[0-9]+)?', Number.Float), + (r'0[xX][0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + + (r"'", String.Char, 'char'), + (r'"', String.Double, 'string'), + ], + + # type started by alias + 'alias-type': [ + (r'=', Keyword), + include('type') + ], + + # type started by struct + 'struct-type': [ + (r'(?=\((?!,*\)))', Punctuation, '#pop'), + include('type') + ], + + # type started by colon + 'type': [ + (r'[(\[<]', tokenType, 'type-nested'), + include('type-content') + ], + + # type nested in brackets: can contain parameters, comma etc. + 'type-nested': [ + (r'[)\]>]', tokenType, '#pop'), + (r'[(\[<]', tokenType, 'type-nested'), + (r',', tokenType), + (r'([a-z]\w*)(\s*)(:)(?!:)', + bygroups(Name, Text, tokenType)), # parameter name + include('type-content') + ], + + # shared contents of a type + 'type-content': [ + include('whitespace'), + + # keywords + (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword), + (r'(?=((%s)' % '|'.join(keywords) + boundary + '))', + Keyword, '#pop'), # need to match because names overlap... + + # kinds + (r'[EPHVX]' + boundary, tokenType), + + # type names + (r'[a-z][0-9]*(?![\w/])', tokenType), + (r'_\w*', tokenType.Variable), # Generic.Emph + (r'((?:[a-z]\w*/)*)([A-Z]\w*)', + bygroups(Name.Namespace, tokenType)), + (r'((?:[a-z]\w*/)*)([a-z]\w+)', + bygroups(Name.Namespace, tokenType)), + + # type keyword operators + (r'::|->|[.:|]', tokenType), + + # catchall + default('#pop') + ], + + # comments and literals + 'whitespace': [ + (r'\n\s*#.*$', Comment.Preproc), + (r'\s+', Text), + (r'/\*', Comment.Multiline, 'comment'), + (r'//.*$', Comment.Single) + ], + 'comment': [ + (r'[^/*]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'litstring': [ + (r'[^"]+', String.Double), + (r'""', String.Escape), + (r'"', String.Double, '#pop'), + ], + 'string': [ + (r'[^\\"\n]+', String.Double), + include('escape-sequence'), + (r'["\n]', String.Double, '#pop'), + ], + 'char': [ + (r'[^\\\'\n]+', String.Char), + include('escape-sequence'), + (r'[\'\n]', String.Char, '#pop'), + ], + 'escape-sequence': [ + (r'\\[nrt\\"\']', String.Escape), + (r'\\x[0-9a-fA-F]{2}', String.Escape), + (r'\\u[0-9a-fA-F]{4}', String.Escape), + # Yes, \U literals are 6 hex digits. + (r'\\U[0-9a-fA-F]{6}', String.Escape) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/haxe.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/haxe.py new file mode 100644 index 0000000..69c0add --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/haxe.py @@ -0,0 +1,936 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.haxe + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Haxe and related stuff. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \ + default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Whitespace + +__all__ = ['HaxeLexer', 'HxmlLexer'] + + +class HaxeLexer(ExtendedRegexLexer): + """ + For Haxe source code (http://haxe.org/). + + .. versionadded:: 1.3 + """ + + name = 'Haxe' + aliases = ['hx', 'haxe', 'hxsl'] + filenames = ['*.hx', '*.hxsl'] + mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx'] + + # keywords extracted from lexer.mll in the haxe compiler source + keyword = (r'(?:function|class|static|var|if|else|while|do|for|' + r'break|return|continue|extends|implements|import|' + r'switch|case|default|public|private|try|untyped|' + r'catch|new|this|throw|extern|enum|in|interface|' + r'cast|override|dynamic|typedef|package|' + r'inline|using|null|true|false|abstract)\b') + + # idtype in lexer.mll + typeid = r'_*[A-Z]\w*' + + # combined ident and dollar and idtype + ident = r'(?:_*[a-z]\w*|_+[0-9]\w*|' + typeid + '|_+|\$\w+)' + + binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|' + r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|' + r'/|\-|=>|=)') + + # ident except keywords + ident_no_keyword = r'(?!' + keyword + ')' + ident + + flags = re.DOTALL | re.MULTILINE + + preproc_stack = [] + + def preproc_callback(self, match, ctx): + proc = match.group(2) + + if proc == 'if': + # store the current stack + self.preproc_stack.append(ctx.stack[:]) + elif proc in ['else', 'elseif']: + # restore the stack back to right before #if + if self.preproc_stack: + ctx.stack = self.preproc_stack[-1][:] + elif proc == 'end': + # remove the saved stack of previous #if + if self.preproc_stack: + self.preproc_stack.pop() + + # #if and #elseif should follow by an expr + if proc in ['if', 'elseif']: + ctx.stack.append('preproc-expr') + + # #error can be optionally follow by the error msg + if proc in ['error']: + ctx.stack.append('preproc-error') + + yield match.start(), Comment.Preproc, '#' + proc + ctx.pos = match.end() + + tokens = { + 'root': [ + include('spaces'), + include('meta'), + (r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')), + (r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')), + (r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')), + (r'(?:extern|private)\b', Keyword.Declaration), + (r'(?:abstract)\b', Keyword.Declaration, 'abstract'), + (r'(?:class|interface)\b', Keyword.Declaration, 'class'), + (r'(?:enum)\b', Keyword.Declaration, 'enum'), + (r'(?:typedef)\b', Keyword.Declaration, 'typedef'), + + # top-level expression + # although it is not supported in haxe, but it is common to write + # expression in web pages the positive lookahead here is to prevent + # an infinite loop at the EOF + (r'(?=.)', Text, 'expr-statement'), + ], + + # space/tab/comment/preproc + 'spaces': [ + (r'\s+', Text), + (r'//[^\n\r]*', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'(#)(if|elseif|else|end|error)\b', preproc_callback), + ], + + 'string-single-interpol': [ + (r'\$\{', String.Interpol, ('string-interpol-close', 'expr')), + (r'\$\$', String.Escape), + (r'\$(?=' + ident + ')', String.Interpol, 'ident'), + include('string-single'), + ], + + 'string-single': [ + (r"'", String.Single, '#pop'), + (r'\\.', String.Escape), + (r'.', String.Single), + ], + + 'string-double': [ + (r'"', String.Double, '#pop'), + (r'\\.', String.Escape), + (r'.', String.Double), + ], + + 'string-interpol-close': [ + (r'\$'+ident, String.Interpol), + (r'\}', String.Interpol, '#pop'), + ], + + 'package': [ + include('spaces'), + (ident, Name.Namespace), + (r'\.', Punctuation, 'import-ident'), + default('#pop'), + ], + + 'import': [ + include('spaces'), + (ident, Name.Namespace), + (r'\*', Keyword), # wildcard import + (r'\.', Punctuation, 'import-ident'), + (r'in', Keyword.Namespace, 'ident'), + default('#pop'), + ], + + 'import-ident': [ + include('spaces'), + (r'\*', Keyword, '#pop'), # wildcard import + (ident, Name.Namespace, '#pop'), + ], + + 'using': [ + include('spaces'), + (ident, Name.Namespace), + (r'\.', Punctuation, 'import-ident'), + default('#pop'), + ], + + 'preproc-error': [ + (r'\s+', Comment.Preproc), + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + default('#pop'), + ], + + 'preproc-expr': [ + (r'\s+', Comment.Preproc), + (r'\!', Comment.Preproc), + (r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')), + + (ident, Comment.Preproc, '#pop'), + + # Float + (r'\.[0-9]+', Number.Float), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float), + (r'[0-9]+\.[0-9]+', Number.Float), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + + # String + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + ], + + 'preproc-parenthesis': [ + (r'\s+', Comment.Preproc), + (r'\)', Comment.Preproc, '#pop'), + default('preproc-expr-in-parenthesis'), + ], + + 'preproc-expr-chain': [ + (r'\s+', Comment.Preproc), + (binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')), + default('#pop'), + ], + + # same as 'preproc-expr' but able to chain 'preproc-expr-chain' + 'preproc-expr-in-parenthesis': [ + (r'\s+', Comment.Preproc), + (r'\!', Comment.Preproc), + (r'\(', Comment.Preproc, + ('#pop', 'preproc-expr-chain', 'preproc-parenthesis')), + + (ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')), + + # Float + (r'\.[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'preproc-expr-chain')), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'preproc-expr-chain')), + (r'[0-9]+', Number.Integer, ('#pop', 'preproc-expr-chain')), + + # String + (r"'", String.Single, + ('#pop', 'preproc-expr-chain', 'string-single')), + (r'"', String.Double, + ('#pop', 'preproc-expr-chain', 'string-double')), + ], + + 'abstract': [ + include('spaces'), + default(('#pop', 'abstract-body', 'abstract-relation', + 'abstract-opaque', 'type-param-constraint', 'type-name')), + ], + + 'abstract-body': [ + include('spaces'), + (r'\{', Punctuation, ('#pop', 'class-body')), + ], + + 'abstract-opaque': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')), + default('#pop'), + ], + + 'abstract-relation': [ + include('spaces'), + (r'(?:to|from)', Keyword.Declaration, 'type'), + (r',', Punctuation), + default('#pop'), + ], + + 'meta': [ + include('spaces'), + (r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')), + ], + + # optional colon + 'meta-colon': [ + include('spaces'), + (r':', Name.Decorator, '#pop'), + default('#pop'), + ], + + # same as 'ident' but set token as Name.Decorator instead of Name + 'meta-ident': [ + include('spaces'), + (ident, Name.Decorator, '#pop'), + ], + + 'meta-body': [ + include('spaces'), + (r'\(', Name.Decorator, ('#pop', 'meta-call')), + default('#pop'), + ], + + 'meta-call': [ + include('spaces'), + (r'\)', Name.Decorator, '#pop'), + default(('#pop', 'meta-call-sep', 'expr')), + ], + + 'meta-call-sep': [ + include('spaces'), + (r'\)', Name.Decorator, '#pop'), + (r',', Punctuation, ('#pop', 'meta-call')), + ], + + 'typedef': [ + include('spaces'), + default(('#pop', 'typedef-body', 'type-param-constraint', + 'type-name')), + ], + + 'typedef-body': [ + include('spaces'), + (r'=', Operator, ('#pop', 'optional-semicolon', 'type')), + ], + + 'enum': [ + include('spaces'), + default(('#pop', 'enum-body', 'bracket-open', + 'type-param-constraint', 'type-name')), + ], + + 'enum-body': [ + include('spaces'), + include('meta'), + (r'\}', Punctuation, '#pop'), + (ident_no_keyword, Name, ('enum-member', 'type-param-constraint')), + ], + + 'enum-member': [ + include('spaces'), + (r'\(', Punctuation, + ('#pop', 'semicolon', 'flag', 'function-param')), + default(('#pop', 'semicolon', 'flag')), + ], + + 'class': [ + include('spaces'), + default(('#pop', 'class-body', 'bracket-open', 'extends', + 'type-param-constraint', 'type-name')), + ], + + 'extends': [ + include('spaces'), + (r'(?:extends|implements)\b', Keyword.Declaration, 'type'), + (r',', Punctuation), # the comma is made optional here, since haxe2 + # requires the comma but haxe3 does not allow it + default('#pop'), + ], + + 'bracket-open': [ + include('spaces'), + (r'\{', Punctuation, '#pop'), + ], + + 'bracket-close': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + ], + + 'class-body': [ + include('spaces'), + include('meta'), + (r'\}', Punctuation, '#pop'), + (r'(?:static|public|private|override|dynamic|inline|macro)\b', + Keyword.Declaration), + default('class-member'), + ], + + 'class-member': [ + include('spaces'), + (r'(var)\b', Keyword.Declaration, + ('#pop', 'optional-semicolon', 'var')), + (r'(function)\b', Keyword.Declaration, + ('#pop', 'optional-semicolon', 'class-method')), + ], + + # local function, anonymous or not + 'function-local': [ + include('spaces'), + (ident_no_keyword, Name.Function, + ('#pop', 'optional-expr', 'flag', 'function-param', + 'parenthesis-open', 'type-param-constraint')), + default(('#pop', 'optional-expr', 'flag', 'function-param', + 'parenthesis-open', 'type-param-constraint')), + ], + + 'optional-expr': [ + include('spaces'), + include('expr'), + default('#pop'), + ], + + 'class-method': [ + include('spaces'), + (ident, Name.Function, ('#pop', 'optional-expr', 'flag', + 'function-param', 'parenthesis-open', + 'type-param-constraint')), + ], + + # function arguments + 'function-param': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r'\?', Punctuation), + (ident_no_keyword, Name, + ('#pop', 'function-param-sep', 'assign', 'flag')), + ], + + 'function-param-sep': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'function-param')), + ], + + 'prop-get-set': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'parenthesis-close', + 'prop-get-set-opt', 'comma', 'prop-get-set-opt')), + default('#pop'), + ], + + 'prop-get-set-opt': [ + include('spaces'), + (r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'), + (ident_no_keyword, Text, '#pop'), # custom getter/setter + ], + + 'expr-statement': [ + include('spaces'), + # makes semicolon optional here, just to avoid checking the last + # one is bracket or not. + default(('#pop', 'optional-semicolon', 'expr')), + ], + + 'expr': [ + include('spaces'), + (r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body', + 'meta-ident', 'meta-colon')), + (r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator), + (r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')), + (r'(?:static|public|private|override|dynamic|inline)\b', + Keyword.Declaration), + (r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain', + 'function-local')), + (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')), + (r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')), + (r'(?:this)\b', Keyword, ('#pop', 'expr-chain')), + (r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')), + (r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')), + (r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')), + (r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')), + (r'(?:switch)\b', Keyword, ('#pop', 'switch')), + (r'(?:if)\b', Keyword, ('#pop', 'if')), + (r'(?:do)\b', Keyword, ('#pop', 'do')), + (r'(?:while)\b', Keyword, ('#pop', 'while')), + (r'(?:for)\b', Keyword, ('#pop', 'for')), + (r'(?:untyped|throw)\b', Keyword), + (r'(?:return)\b', Keyword, ('#pop', 'optional-expr')), + (r'(?:macro)\b', Keyword, ('#pop', 'macro')), + (r'(?:continue|break)\b', Keyword, '#pop'), + (r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')), + (ident_no_keyword, Name, ('#pop', 'expr-chain')), + + # Float + (r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')), + (r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')), + + # String + (r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')), + (r'"', String.Double, ('#pop', 'expr-chain', 'string-double')), + + # EReg + (r'~/(\\\\|\\/|[^/\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')), + + # Array + (r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')), + ], + + 'expr-chain': [ + include('spaces'), + (r'(?:\+\+|\-\-)', Operator), + (binop, Operator, ('#pop', 'expr')), + (r'(?:in)\b', Keyword, ('#pop', 'expr')), + (r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')), + (r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)), + (r'\[', Punctuation, 'array-access'), + (r'\(', Punctuation, 'call'), + default('#pop'), + ], + + # macro reification + 'macro': [ + include('spaces'), + include('meta'), + (r':', Punctuation, ('#pop', 'type')), + + (r'(?:extern|private)\b', Keyword.Declaration), + (r'(?:abstract)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'abstract')), + (r'(?:class|interface)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'macro-class')), + (r'(?:enum)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'enum')), + (r'(?:typedef)\b', Keyword.Declaration, ('#pop', 'optional-semicolon', 'typedef')), + + default(('#pop', 'expr')), + ], + + 'macro-class': [ + (r'\{', Punctuation, ('#pop', 'class-body')), + include('class') + ], + + # cast can be written as "cast expr" or "cast(expr, type)" + 'cast': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'parenthesis-close', + 'cast-type', 'expr')), + default(('#pop', 'expr')), + ], + + # optionally give a type as the 2nd argument of cast() + 'cast-type': [ + include('spaces'), + (r',', Punctuation, ('#pop', 'type')), + default('#pop'), + ], + + 'catch': [ + include('spaces'), + (r'(?:catch)\b', Keyword, ('expr', 'function-param', + 'parenthesis-open')), + default('#pop'), + ], + + # do-while loop + 'do': [ + include('spaces'), + default(('#pop', 'do-while', 'expr')), + ], + + # the while after do + 'do-while': [ + include('spaces'), + (r'(?:while)\b', Keyword, ('#pop', 'parenthesis', + 'parenthesis-open')), + ], + + 'while': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), + ], + + 'for': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')), + ], + + 'if': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr', + 'parenthesis')), + ], + + 'else': [ + include('spaces'), + (r'(?:else)\b', Keyword, ('#pop', 'expr')), + default('#pop'), + ], + + 'switch': [ + include('spaces'), + default(('#pop', 'switch-body', 'bracket-open', 'expr')), + ], + + 'switch-body': [ + include('spaces'), + (r'(?:case|default)\b', Keyword, ('case-block', 'case')), + (r'\}', Punctuation, '#pop'), + ], + + 'case': [ + include('spaces'), + (r':', Punctuation, '#pop'), + default(('#pop', 'case-sep', 'case-guard', 'expr')), + ], + + 'case-sep': [ + include('spaces'), + (r':', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'case')), + ], + + 'case-guard': [ + include('spaces'), + (r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')), + default('#pop'), + ], + + # optional multiple expr under a case + 'case-block': [ + include('spaces'), + (r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'), + default('#pop'), + ], + + 'new': [ + include('spaces'), + default(('#pop', 'call', 'parenthesis-open', 'type')), + ], + + 'array-decl': [ + include('spaces'), + (r'\]', Punctuation, '#pop'), + default(('#pop', 'array-decl-sep', 'expr')), + ], + + 'array-decl-sep': [ + include('spaces'), + (r'\]', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'array-decl')), + ], + + 'array-access': [ + include('spaces'), + default(('#pop', 'array-access-close', 'expr')), + ], + + 'array-access-close': [ + include('spaces'), + (r'\]', Punctuation, '#pop'), + ], + + 'comma': [ + include('spaces'), + (r',', Punctuation, '#pop'), + ], + + 'colon': [ + include('spaces'), + (r':', Punctuation, '#pop'), + ], + + 'semicolon': [ + include('spaces'), + (r';', Punctuation, '#pop'), + ], + + 'optional-semicolon': [ + include('spaces'), + (r';', Punctuation, '#pop'), + default('#pop'), + ], + + # identity that CAN be a Haxe keyword + 'ident': [ + include('spaces'), + (ident, Name, '#pop'), + ], + + 'dollar': [ + include('spaces'), + (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket-close', 'expr')), + default(('#pop', 'expr-chain')), + ], + + 'type-name': [ + include('spaces'), + (typeid, Name, '#pop'), + ], + + 'type-full-name': [ + include('spaces'), + (r'\.', Punctuation, 'ident'), + default('#pop'), + ], + + 'type': [ + include('spaces'), + (r'\?', Punctuation), + (ident, Name, ('#pop', 'type-check', 'type-full-name')), + (r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')), + (r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')), + ], + + 'type-parenthesis': [ + include('spaces'), + default(('#pop', 'parenthesis-close', 'type')), + ], + + 'type-check': [ + include('spaces'), + (r'->', Punctuation, ('#pop', 'type')), + (r'<(?!=)', Punctuation, 'type-param'), + default('#pop'), + ], + + 'type-struct': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + (r'\?', Punctuation), + (r'>', Punctuation, ('comma', 'type')), + (ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')), + include('class-body'), + ], + + 'type-struct-sep': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'type-struct')), + ], + + # type-param can be a normal type or a constant literal... + 'type-param-type': [ + # Float + (r'\.[0-9]+', Number.Float, '#pop'), + (r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, '#pop'), + (r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, '#pop'), + (r'[0-9]+\.[0-9]+', Number.Float, '#pop'), + (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'), + + # Int + (r'0x[0-9a-fA-F]+', Number.Hex, '#pop'), + (r'[0-9]+', Number.Integer, '#pop'), + + # String + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + + # EReg + (r'~/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex, '#pop'), + + # Array + (r'\[', Operator, ('#pop', 'array-decl')), + + include('type'), + ], + + # type-param part of a type + # ie. the path in Map + 'type-param': [ + include('spaces'), + default(('#pop', 'type-param-sep', 'type-param-type')), + ], + + 'type-param-sep': [ + include('spaces'), + (r'>', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'type-param')), + ], + + # optional type-param that may include constraint + # ie. + 'type-param-constraint': [ + include('spaces'), + (r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep', + 'type-param-constraint-flag', 'type-name')), + default('#pop'), + ], + + 'type-param-constraint-sep': [ + include('spaces'), + (r'>', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'type-param-constraint-sep', + 'type-param-constraint-flag', 'type-name')), + ], + + # the optional constraint inside type-param + 'type-param-constraint-flag': [ + include('spaces'), + (r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')), + default('#pop'), + ], + + 'type-param-constraint-flag-type': [ + include('spaces'), + (r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep', + 'type')), + default(('#pop', 'type')), + ], + + 'type-param-constraint-flag-type-sep': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r',', Punctuation, 'type'), + ], + + # a parenthesis expr that contain exactly one expr + 'parenthesis': [ + include('spaces'), + default(('#pop', 'parenthesis-close', 'flag', 'expr')), + ], + + 'parenthesis-open': [ + include('spaces'), + (r'\(', Punctuation, '#pop'), + ], + + 'parenthesis-close': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + ], + + 'var': [ + include('spaces'), + (ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag', 'prop-get-set')), + ], + + # optional more var decl. + 'var-sep': [ + include('spaces'), + (r',', Punctuation, ('#pop', 'var')), + default('#pop'), + ], + + # optional assignment + 'assign': [ + include('spaces'), + (r'=', Operator, ('#pop', 'expr')), + default('#pop'), + ], + + # optional type flag + 'flag': [ + include('spaces'), + (r':', Punctuation, ('#pop', 'type')), + default('#pop'), + ], + + # colon as part of a ternary operator (?:) + 'ternary': [ + include('spaces'), + (r':', Operator, '#pop'), + ], + + # function call + 'call': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + default(('#pop', 'call-sep', 'expr')), + ], + + # after a call param + 'call-sep': [ + include('spaces'), + (r'\)', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'call')), + ], + + # bracket can be block or object + 'bracket': [ + include('spaces'), + (r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name, + ('#pop', 'bracket-check')), + (r"'", String.Single, ('#pop', 'bracket-check', 'string-single')), + (r'"', String.Double, ('#pop', 'bracket-check', 'string-double')), + default(('#pop', 'block')), + ], + + 'bracket-check': [ + include('spaces'), + (r':', Punctuation, ('#pop', 'object-sep', 'expr')), # is object + default(('#pop', 'block', 'optional-semicolon', 'expr-chain')), # is block + ], + + # code block + 'block': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + default('expr-statement'), + ], + + # object in key-value pairs + 'object': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + default(('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string')) + ], + + # a key of an object + 'ident-or-string': [ + include('spaces'), + (ident_no_keyword, Name, '#pop'), + (r"'", String.Single, ('#pop', 'string-single')), + (r'"', String.Double, ('#pop', 'string-double')), + ], + + # after a key-value pair in object + 'object-sep': [ + include('spaces'), + (r'\}', Punctuation, '#pop'), + (r',', Punctuation, ('#pop', 'object')), + ], + + + + } + + def analyse_text(text): + if re.match(r'\w+\s*:\s*\w', text): + return 0.3 + + +class HxmlLexer(RegexLexer): + """ + Lexer for `haXe build `_ files. + + .. versionadded:: 1.6 + """ + name = 'Hxml' + aliases = ['haxeml', 'hxml'] + filenames = ['*.hxml'] + + tokens = { + 'root': [ + # Seperator + (r'(--)(next)', bygroups(Punctuation, Generic.Heading)), + # Compiler switches with one dash + (r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)), + # Compilerswitches with two dashes + (r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|' + r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)), + # Targets and other options that take an argument + (r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|' + r'cp|cmd)( +)(.+)', + bygroups(Punctuation, Keyword, Whitespace, String)), + # Options that take only numerical arguments + (r'(-)(swf-version)( +)(\d+)', + bygroups(Punctuation, Keyword, Number.Integer)), + # An Option that defines the size, the fps and the background + # color of an flash movie + (r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})', + bygroups(Punctuation, Keyword, Whitespace, Number.Integer, + Punctuation, Number.Integer, Punctuation, Number.Integer, + Punctuation, Number.Hex)), + # options with two dashes that takes arguments + (r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)' + r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)), + # Single line comment, multiline ones are not allowed. + (r'#.*', Comment.Single) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/hdl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/hdl.py new file mode 100644 index 0000000..c8e3471 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/hdl.py @@ -0,0 +1,375 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.hdl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for hardware descriptor languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, include, using, this, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer'] + + +class VerilogLexer(RegexLexer): + """ + For verilog source code with preprocessor directives. + + .. versionadded:: 1.4 + """ + name = 'verilog' + aliases = ['verilog', 'v'] + filenames = ['*.v'] + mimetypes = ['text/x-verilog'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' + + tokens = { + 'root': [ + (r'^\s*`define', Comment.Preproc, 'macro'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'[{}#@]', Punctuation), + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), + (r'([0-9]+)|(\'b)[01]+', Number.Bin), + (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), + (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), + (r'\'[01xz]', Number), + (r'\d+[Ll]?', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.;\']', Punctuation), + (r'`[a-zA-Z_]\w*', Name.Constant), + + (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), + (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), + 'import'), + + (words(( + 'always', 'always_comb', 'always_ff', 'always_latch', 'and', + 'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1', + 'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign', + 'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase', + 'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive', + 'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for', + 'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0', + 'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large', + 'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge', + 'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed', + 'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1', + 'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return', + 'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed', + 'small', 'specify', 'specparam', 'strength', 'string', 'strong0', + 'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1', + 'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait', + 'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'), + Keyword), + + (words(( + 'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype', + 'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected', + 'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate', + 'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames', + 'nounconnected_drive', 'protect', 'protected', 'remove_gatenames', + 'remove_netnames', 'resetall', 'timescale', 'unconnected_drive', + 'undef'), prefix=r'`', suffix=r'\b'), + Comment.Preproc), + + (words(( + 'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose', + 'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite', + 'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log', + 'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale', + 'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset', + 'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope', + 'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb', + 'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'), + prefix=r'\$', suffix=r'\b'), + Name.Builtin), + + (words(( + 'byte', 'shortint', 'int', 'longint', 'integer', 'time', + 'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand', + 'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo' + 'shortreal', 'real', 'realtime'), suffix=r'\b'), + Keyword.Type), + ('[a-zA-Z_]\w*:(?!:)', Name.Label), + ('[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'import': [ + (r'[\w:]+\*?', Name.Namespace, '#pop') + ] + } + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + # Convention: mark all upper case names as constants + if token is Name: + if value.isupper(): + token = Name.Constant + yield index, token, value + + +class SystemVerilogLexer(RegexLexer): + """ + Extends verilog lexer to recognise all SystemVerilog keywords from IEEE + 1800-2009 standard. + + .. versionadded:: 1.5 + """ + name = 'systemverilog' + aliases = ['systemverilog', 'sv'] + filenames = ['*.sv', '*.svh'] + mimetypes = ['text/x-systemverilog'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' + + tokens = { + 'root': [ + (r'^\s*`define', Comment.Preproc, 'macro'), + (r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)), + (r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'), + + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'[{}#@]', Punctuation), + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), + (r'([0-9]+)|(\'b)[01]+', Number.Bin), + (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), + (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), + (r'\'[01xz]', Number), + (r'\d+[Ll]?', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.;\']', Punctuation), + (r'`[a-zA-Z_]\w*', Name.Constant), + + (words(( + 'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch', + 'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins', + 'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez', + 'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', 'const', 'constraint', + 'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign', + 'default', 'defparam', 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase', + 'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate', + 'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive', + 'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable', + 'endtask', 'enum', 'event', 'eventually', 'expect', 'export', 'extends', 'extern', + 'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', + 'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone', + 'ignore_bins', 'illegal_bins', 'implies', 'import', 'incdir', 'include', + 'initial', 'inout', 'input', 'inside', 'instance', 'int', 'integer', 'interface', + 'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library', + 'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', 'medium', + 'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled', + 'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter', + 'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected', + 'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent', + 'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', 'realtime', + 'ref', 'reg', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos', + 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime', + 's_until', 's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal', + 'showcancelled', 'signed', 'small', 'solve', 'specify', 'specparam', 'static', + 'string', 'strong', 'strong0', 'strong1', 'struct', 'super', 'supply0', 'supply1', + 'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout', + 'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', 'tri0', + 'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', 'unique', 'unique0', + 'unsigned', 'until', 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored', + 'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while', + 'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'), suffix=r'\b'), + Keyword), + + (words(( + '`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype', + '`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif', + '`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma', + '`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'), + suffix=r'\b'), + Comment.Preproc), + + (words(( + '$display', '$displayb', '$displayh', '$displayo', '$dumpall', '$dumpfile', + '$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports', + '$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff', + '$dumpportson', '$dumpvars', '$fclose', '$fdisplay', '$fdisplayb', + '$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', + '$fgets', '$finish', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', + '$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh', + '$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo', + '$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff', + '$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', '$rewind', + '$sformat', '$sformatf', '$sscanf', '$strobe', '$strobeb', '$strobeh', '$strobeo', + '$swrite', '$swriteb', '$swriteh', '$swriteo', '$test', '$ungetc', + '$value$plusargs', '$write', '$writeb', '$writeh', '$writememb', + '$writememh', '$writeo'), suffix=r'\b'), + Name.Builtin), + + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + (words(( + 'byte', 'shortint', 'int', 'longint', 'integer', 'time', + 'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand', + 'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo' + 'shortreal', 'real', 'realtime'), suffix=r'\b'), + Keyword.Type), + ('[a-zA-Z_]\w*:(?!:)', Name.Label), + ('[a-zA-Z_]\w*', Name), + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'import': [ + (r'[\w:]+\*?', Name.Namespace, '#pop') + ] + } + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + # Convention: mark all upper case names as constants + if token is Name: + if value.isupper(): + token = Name.Constant + yield index, token, value + + +class VhdlLexer(RegexLexer): + """ + For VHDL source code. + + .. versionadded:: 1.5 + """ + name = 'vhdl' + aliases = ['vhdl'] + filenames = ['*.vhdl', '*.vhd'] + mimetypes = ['text/x-vhdl'] + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'--.*?$', Comment.Single), + (r"'(U|X|0|1|Z|W|L|H|-)'", String.Char), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r"'[a-z_]\w*", Name.Attribute), + (r'[()\[\],.;\']', Punctuation), + (r'"[^\n\\]*"', String), + + (r'(library)(\s+)([a-z_]\w*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)), + (r'(use)(\s+)([a-z_][\w.]*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(entity|component)(\s+)([a-z_]\w*)', + bygroups(Keyword, Text, Name.Class)), + (r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)' + r'(of)(\s+)([a-z_]\w*)(\s+)(is)', + bygroups(Keyword, Text, Name.Class, Text, Keyword, Text, + Name.Class, Text, Keyword)), + + (r'(end)(\s+)', bygroups(using(this), Text), 'endblock'), + + include('types'), + include('keywords'), + include('numbers'), + + (r'[a-z_]\w*', Name), + ], + 'endblock': [ + include('keywords'), + (r'[a-z_]\w*', Name.Class), + (r'(\s+)', Text), + (r';', Punctuation, '#pop'), + ], + 'types': [ + (words(( + 'boolean', 'bit', 'character', 'severity_level', 'integer', 'time', + 'delay_length', 'natural', 'positive', 'string', 'bit_vector', + 'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector', + 'std_logic', 'std_logic_vector'), suffix=r'\b'), + Keyword.Type), + ], + 'keywords': [ + (words(( + 'abs', 'access', 'after', 'alias', 'all', 'and', + 'architecture', 'array', 'assert', 'attribute', 'begin', 'block', + 'body', 'buffer', 'bus', 'case', 'component', 'configuration', + 'constant', 'disconnect', 'downto', 'else', 'elsif', 'end', + 'entity', 'exit', 'file', 'for', 'function', 'generate', + 'generic', 'group', 'guarded', 'if', 'impure', 'in', + 'inertial', 'inout', 'is', 'label', 'library', 'linkage', + 'literal', 'loop', 'map', 'mod', 'nand', 'new', + 'next', 'nor', 'not', 'null', 'of', 'on', + 'open', 'or', 'others', 'out', 'package', 'port', + 'postponed', 'procedure', 'process', 'pure', 'range', 'record', + 'register', 'reject', 'return', 'rol', 'ror', 'select', + 'severity', 'signal', 'shared', 'sla', 'sli', 'sra', + 'srl', 'subtype', 'then', 'to', 'transport', 'type', + 'units', 'until', 'use', 'variable', 'wait', 'when', + 'while', 'with', 'xnor', 'xor'), suffix=r'\b'), + Keyword), + ], + 'numbers': [ + (r'\d{1,2}#[0-9a-f_]+#?', Number.Integer), + (r'\d+', Number.Integer), + (r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float), + (r'X"[0-9a-f_]+"', Number.Hex), + (r'O"[0-7_]+"', Number.Oct), + (r'B"[01_]+"', Number.Bin), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/html.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/html.py new file mode 100644 index 0000000..8cf331d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/html.py @@ -0,0 +1,589 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.html + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for HTML, XML and related markup. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ + default, using +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Punctuation +from pygments.util import looks_like_xml, html_doctype_matches + +from pygments.lexers.javascript import JavascriptLexer +from pygments.lexers.jvm import ScalaLexer +from pygments.lexers.css import CssLexer, _indentation, _starts_block +from pygments.lexers.ruby import RubyLexer + +__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer', + 'ScamlLexer', 'JadeLexer'] + + +class HtmlLexer(RegexLexer): + """ + For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted + by the appropriate lexer. + """ + + name = 'HTML' + aliases = ['html'] + filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt'] + mimetypes = ['text/html', 'application/xhtml+xml'] + + flags = re.IGNORECASE | re.DOTALL + tokens = { + 'root': [ + ('[^<&]+', Text), + (r'&\S*?;', Name.Entity), + (r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc), + ('', Comment, '#pop'), + ('-', Comment), + ], + 'tag': [ + (r'\s+', Text), + (r'([\w:-]+\s*=)(\s*)', bygroups(Name.Attribute, Text), 'attr'), + (r'[\w:-]+', Name.Attribute), + (r'/?\s*>', Name.Tag, '#pop'), + ], + 'script-content': [ + (r'<\s*/\s*script\s*>', Name.Tag, '#pop'), + (r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)), + ], + 'style-content': [ + (r'<\s*/\s*style\s*>', Name.Tag, '#pop'), + (r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)), + ], + 'attr': [ + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } + + def analyse_text(text): + if html_doctype_matches(text): + return 0.5 + + +class DtdLexer(RegexLexer): + """ + A lexer for DTDs (Document Type Definitions). + + .. versionadded:: 1.5 + """ + + flags = re.MULTILINE | re.DOTALL + + name = 'DTD' + aliases = ['dtd'] + filenames = ['*.dtd'] + mimetypes = ['application/xml-dtd'] + + tokens = { + 'root': [ + include('common'), + + (r'(\s]+)', + bygroups(Keyword, Text, Name.Tag)), + (r'PUBLIC|SYSTEM', Keyword.Constant), + (r'[\[\]>]', Keyword), + ], + + 'common': [ + (r'\s+', Text), + (r'(%|&)[^;]*;', Name.Entity), + ('', Comment, '#pop'), + ('-', Comment), + ], + + 'element': [ + include('common'), + (r'EMPTY|ANY|#PCDATA', Keyword.Constant), + (r'[^>\s|()?+*,]+', Name.Tag), + (r'>', Keyword, '#pop'), + ], + + 'attlist': [ + include('common'), + (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', + Keyword.Constant), + (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant), + (r'xml:space|xml:lang', Keyword.Reserved), + (r'[^>\s|()?+*,]+', Name.Attribute), + (r'>', Keyword, '#pop'), + ], + + 'entity': [ + include('common'), + (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant), + (r'[^>\s|()?+*,]+', Name.Entity), + (r'>', Keyword, '#pop'), + ], + + 'notation': [ + include('common'), + (r'SYSTEM|PUBLIC', Keyword.Constant), + (r'[^>\s|()?+*,]+', Name.Attribute), + (r'>', Keyword, '#pop'), + ], + } + + def analyse_text(text): + if not looks_like_xml(text) and \ + ('', Comment.Preproc), + ('', Comment, '#pop'), + ('-', Comment), + ], + 'tag': [ + (r'\s+', Text), + (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), + (r'/?\s*>', Name.Tag, '#pop'), + ], + 'attr': [ + ('\s+', Text), + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } + + def analyse_text(text): + if looks_like_xml(text): + return 0.45 # less than HTML + + +class XsltLexer(XmlLexer): + """ + A lexer for XSLT. + + .. versionadded:: 0.10 + """ + + name = 'XSLT' + aliases = ['xslt'] + filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc + mimetypes = ['application/xsl+xml', 'application/xslt+xml'] + + EXTRA_KEYWORDS = set(( + 'apply-imports', 'apply-templates', 'attribute', + 'attribute-set', 'call-template', 'choose', 'comment', + 'copy', 'copy-of', 'decimal-format', 'element', 'fallback', + 'for-each', 'if', 'import', 'include', 'key', 'message', + 'namespace-alias', 'number', 'otherwise', 'output', 'param', + 'preserve-space', 'processing-instruction', 'sort', + 'strip-space', 'stylesheet', 'template', 'text', 'transform', + 'value-of', 'variable', 'when', 'with-param' + )) + + def get_tokens_unprocessed(self, text): + for index, token, value in XmlLexer.get_tokens_unprocessed(self, text): + m = re.match(']*)/?>?', value) + + if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS: + yield index, Keyword, value + else: + yield index, token, value + + def analyse_text(text): + if looks_like_xml(text) and ']{1,2}(?=[ \t=])', Punctuation), + include('eval-or-plain'), + ], + + 'plain': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(RubyLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + + 'html-attributes': [ + (r'\s+', Text), + (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), + (r'[\w:-]+', Name.Attribute), + (r'\)', Text, '#pop'), + ], + + 'html-attribute-value': [ + (r'[ \t]+', Text), + (r'\w+', Name.Variable, '#pop'), + (r'@\w+', Name.Variable.Instance, '#pop'), + (r'\$\w+', Name.Variable.Global, '#pop'), + (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), + (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), + ], + + 'html-comment-block': [ + (_dot + '+', Comment), + (r'\n', Text, 'root'), + ], + + 'haml-comment-block': [ + (_dot + '+', Comment.Preproc), + (r'\n', Text, 'root'), + ], + + 'filter-block': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(RubyLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + } + + +class ScamlLexer(ExtendedRegexLexer): + """ + For `Scaml markup `_. Scaml is Haml for Scala. + + .. versionadded:: 1.4 + """ + + name = 'Scaml' + aliases = ['scaml'] + filenames = ['*.scaml'] + mimetypes = ['text/x-scaml'] + + flags = re.IGNORECASE + # Scaml does not yet support the " |\n" notation to + # wrap long lines. Once it does, use the custom faux + # dot instead. + # _dot = r'(?: \|\n(?=.* \|)|.)' + _dot = r'.' + + tokens = { + 'root': [ + (r'[ \t]*\n', Text), + (r'[ \t]*', _indentation), + ], + + 'css': [ + (r'\.[\w:-]+', Name.Class, 'tag'), + (r'\#[\w:-]+', Name.Function, 'tag'), + ], + + 'eval-or-plain': [ + (r'[&!]?==', Punctuation, 'plain'), + (r'([&!]?[=~])(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), + 'root'), + default('plain'), + ], + + 'content': [ + include('css'), + (r'%[\w:-]+', Name.Tag, 'tag'), + (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), + (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', + bygroups(Comment, Comment.Special, Comment), + '#pop'), + (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), + '#pop'), + (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, + 'scaml-comment-block'), '#pop'), + (r'(-@\s*)(import)?(' + _dot + r'*\n)', + bygroups(Punctuation, Keyword, using(ScalaLexer)), + '#pop'), + (r'(-)(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), + '#pop'), + (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), + '#pop'), + include('eval-or-plain'), + ], + + 'tag': [ + include('css'), + (r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)), + (r'\[' + _dot + '*?\]', using(ScalaLexer)), + (r'\(', Text, 'html-attributes'), + (r'/[ \t]*\n', Punctuation, '#pop:2'), + (r'[<>]{1,2}(?=[ \t=])', Punctuation), + include('eval-or-plain'), + ], + + 'plain': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + + 'html-attributes': [ + (r'\s+', Text), + (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), + (r'[\w:-]+', Name.Attribute), + (r'\)', Text, '#pop'), + ], + + 'html-attribute-value': [ + (r'[ \t]+', Text), + (r'\w+', Name.Variable, '#pop'), + (r'@\w+', Name.Variable.Instance, '#pop'), + (r'\$\w+', Name.Variable.Global, '#pop'), + (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), + (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), + ], + + 'html-comment-block': [ + (_dot + '+', Comment), + (r'\n', Text, 'root'), + ], + + 'scaml-comment-block': [ + (_dot + '+', Comment.Preproc), + (r'\n', Text, 'root'), + ], + + 'filter-block': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + } + + +class JadeLexer(ExtendedRegexLexer): + """ + For Jade markup. + Jade is a variant of Scaml, see: + http://scalate.fusesource.org/documentation/scaml-reference.html + + .. versionadded:: 1.4 + """ + + name = 'Jade' + aliases = ['jade'] + filenames = ['*.jade'] + mimetypes = ['text/x-jade'] + + flags = re.IGNORECASE + _dot = r'.' + + tokens = { + 'root': [ + (r'[ \t]*\n', Text), + (r'[ \t]*', _indentation), + ], + + 'css': [ + (r'\.[\w:-]+', Name.Class, 'tag'), + (r'\#[\w:-]+', Name.Function, 'tag'), + ], + + 'eval-or-plain': [ + (r'[&!]?==', Punctuation, 'plain'), + (r'([&!]?[=~])(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), 'root'), + default('plain'), + ], + + 'content': [ + include('css'), + (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), + (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', + bygroups(Comment, Comment.Special, Comment), + '#pop'), + (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), + '#pop'), + (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, + 'scaml-comment-block'), '#pop'), + (r'(-@\s*)(import)?(' + _dot + r'*\n)', + bygroups(Punctuation, Keyword, using(ScalaLexer)), + '#pop'), + (r'(-)(' + _dot + r'*\n)', + bygroups(Punctuation, using(ScalaLexer)), + '#pop'), + (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), + '#pop'), + (r'[\w:-]+', Name.Tag, 'tag'), + (r'\|', Text, 'eval-or-plain'), + ], + + 'tag': [ + include('css'), + (r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)), + (r'\[' + _dot + '*?\]', using(ScalaLexer)), + (r'\(', Text, 'html-attributes'), + (r'/[ \t]*\n', Punctuation, '#pop:2'), + (r'[<>]{1,2}(?=[ \t=])', Punctuation), + include('eval-or-plain'), + ], + + 'plain': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + + 'html-attributes': [ + (r'\s+', Text), + (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), + (r'[\w:-]+', Name.Attribute), + (r'\)', Text, '#pop'), + ], + + 'html-attribute-value': [ + (r'[ \t]+', Text), + (r'\w+', Name.Variable, '#pop'), + (r'@\w+', Name.Variable.Instance, '#pop'), + (r'\$\w+', Name.Variable.Global, '#pop'), + (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), + (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), + ], + + 'html-comment-block': [ + (_dot + '+', Comment), + (r'\n', Text, 'root'), + ], + + 'scaml-comment-block': [ + (_dot + '+', Comment.Preproc), + (r'\n', Text, 'root'), + ], + + 'filter-block': [ + (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), + (r'(#\{)(' + _dot + '*?)(\})', + bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), + (r'\n', Text, 'root'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/idl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/idl.py new file mode 100644 index 0000000..7941028 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/idl.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.idl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for IDL. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, words +from pygments.token import Text, Comment, Operator, Keyword, Name, Number + +__all__ = ['IDLLexer'] + + +class IDLLexer(RegexLexer): + """ + Pygments Lexer for IDL (Interactive Data Language). + + .. versionadded:: 1.6 + """ + name = 'IDL' + aliases = ['idl'] + filenames = ['*.pro'] + mimetypes = ['text/idl'] + + flags = re.IGNORECASE | re.MULTILINE + + _RESERVED = ( + 'and', 'begin', 'break', 'case', 'common', 'compile_opt', + 'continue', 'do', 'else', 'end', 'endcase', 'elseelse', + 'endfor', 'endforeach', 'endif', 'endrep', 'endswitch', + 'endwhile', 'eq', 'for', 'foreach', 'forward_function', + 'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le', + 'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro', + 'repeat', 'switch', 'then', 'until', 'while', 'xor') + """Reserved words from: http://www.exelisvis.com/docs/reswords.html""" + + _BUILTIN_LIB = ( + 'abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10', + 'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query', + 'arg_present', 'array_equal', 'array_indices', 'arrow', + 'ascii_template', 'asin', 'assoc', 'atan', 'axis', + 'a_correlate', 'bandpass_filter', 'bandreject_filter', + 'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk', + 'besely', 'beta', 'bilinear', 'binary_template', 'bindgen', + 'binomial', 'bin_date', 'bit_ffs', 'bit_population', + 'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint', + 'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder', + 'bytscl', 'caldat', 'calendar', 'call_external', + 'call_function', 'call_method', 'call_procedure', 'canny', + 'catch', 'cd', 'cdf_\w*', 'ceil', 'chebyshev', + 'check_math', + 'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen', + 'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts', + 'cmyk_convert', 'colorbar', 'colorize_sample', + 'colormap_applicable', 'colormap_gradient', + 'colormap_rotation', 'colortable', 'color_convert', + 'color_exchange', 'color_quan', 'color_range_map', 'comfit', + 'command_line_args', 'complex', 'complexarr', 'complexround', + 'compute_mesh_normals', 'cond', 'congrid', 'conj', + 'constrained_min', 'contour', 'convert_coord', 'convol', + 'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos', + 'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct', + 'create_view', 'crossp', 'crvlength', 'cti_test', + 'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord', + 'cw_animate', 'cw_animate_getp', 'cw_animate_load', + 'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index', + 'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel', + 'cw_form', 'cw_fslider', 'cw_light_editor', + 'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient', + 'cw_palette_editor', 'cw_palette_editor_get', + 'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider', + 'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists', + 'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key', + 'define_msgblk', 'define_msgblk_from_file', 'defroi', + 'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv', + 'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix', + 'dialog_dbconnect', 'dialog_message', 'dialog_pickfile', + 'dialog_printersetup', 'dialog_printjob', + 'dialog_read_image', 'dialog_write_image', 'digital_filter', + 'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure', + 'dlm_load', 'dlm_register', 'doc_library', 'double', + 'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec', + 'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn', + 'eof', 'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx', + 'erode', 'errorplot', 'errplot', 'estimator_filter', + 'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint', + 'extrac', 'extract_slice', 'factorial', 'fft', 'filepath', + 'file_basename', 'file_chmod', 'file_copy', 'file_delete', + 'file_dirname', 'file_expand_path', 'file_info', + 'file_lines', 'file_link', 'file_mkdir', 'file_move', + 'file_poll_input', 'file_readlink', 'file_same', + 'file_search', 'file_test', 'file_which', 'findgen', + 'finite', 'fix', 'flick', 'float', 'floor', 'flow3', + 'fltarr', 'flush', 'format_axis_values', 'free_lun', + 'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root', + 'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct', + 'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint', + 'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv', + 'getwindows', 'get_drive_list', 'get_dxf_objects', + 'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size', + 'greg2jul', 'grib_\w*', 'grid3', 'griddata', + 'grid_input', 'grid_tps', 'gs_iter', + 'h5[adfgirst]_\w*', 'h5_browser', 'h5_close', + 'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse', + 'hanning', 'hash', 'hdf_\w*', 'heap_free', + 'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save', + 'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal', + 'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int', + 'i18n_multibytetoutf8', 'i18n_multibytetowidechar', + 'i18n_utf8tomultibyte', 'i18n_widechartomultibyte', + 'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity', + 'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64', + 'idl_validname', 'iellipse', 'igamma', 'igetcurrent', + 'igetdata', 'igetid', 'igetproperty', 'iimage', 'image', + 'image_cont', 'image_statistics', 'imaginary', 'imap', + 'indgen', 'intarr', 'interpol', 'interpolate', + 'interval_volume', 'int_2d', 'int_3d', 'int_tabulated', + 'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon', + 'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve', + 'irotate', 'ir_filter', 'isa', 'isave', 'iscale', + 'isetcurrent', 'isetproperty', 'ishft', 'isocontour', + 'isosurface', 'isurface', 'itext', 'itranslate', 'ivector', + 'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse', + 'json_serialize', 'jul2greg', 'julday', 'keyword_set', + 'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date', + 'label_region', 'ladfit', 'laguerre', 'laplacian', + 'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ', + 'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes', + 'la_gm_linear_model', 'la_hqr', 'la_invert', + 'la_least_squares', 'la_least_square_equality', + 'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol', + 'la_svd', 'la_tridc', 'la_trimprove', 'la_triql', + 'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt', + 'legend', 'legendre', 'linbcg', 'lindgen', 'linfit', + 'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr', + 'lngamma', 'lnp_test', 'loadct', 'locale_get', + 'logical_and', 'logical_or', 'logical_true', 'lon64arr', + 'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove', + 'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll', + 'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points', + 'map_continents', 'map_grid', 'map_image', 'map_patch', + 'map_proj_forward', 'map_proj_image', 'map_proj_info', + 'map_proj_init', 'map_proj_inverse', 'map_set', + 'matrix_multiply', 'matrix_power', 'max', 'md_test', + 'mean', 'meanabsdev', 'mean_filter', 'median', 'memory', + 'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge', + 'mesh_numtriangles', 'mesh_obj', 'mesh_smooth', + 'mesh_surfacearea', 'mesh_validate', 'mesh_volume', + 'message', 'min', 'min_curve_surf', 'mk_html_help', + 'modifyct', 'moment', 'morph_close', 'morph_distance', + 'morph_gradient', 'morph_hitormiss', 'morph_open', + 'morph_thin', 'morph_tophat', 'multi', 'm_correlate', + 'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick', + 'noise_scatter', 'noise_slur', 'norm', 'n_elements', + 'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy', + 'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid', + 'online_help', 'on_error', 'open', 'oplot', 'oploterr', + 'parse_url', 'particle_trace', 'path_cache', 'path_sep', + 'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox', + 'plot_field', 'pnt_line', 'point_lun', 'polarplot', + 'polar_contour', 'polar_surface', 'poly', 'polyfill', + 'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp', + 'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell', + 'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes', + 'print', 'printd', 'product', 'profile', 'profiler', + 'profiles', 'project_vol', 'psafm', 'pseudo', + 'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new', + 'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull', + 'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp', + 'query_csv', 'query_dicom', 'query_gif', 'query_image', + 'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict', + 'query_png', 'query_ppm', 'query_srf', 'query_tiff', + 'query_wav', 'radon', 'randomn', 'randomu', 'ranks', + 'rdpix', 'read', 'reads', 'readu', 'read_ascii', + 'read_binary', 'read_bmp', 'read_csv', 'read_dicom', + 'read_gif', 'read_image', 'read_interfile', 'read_jpeg', + 'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png', + 'read_ppm', 'read_spr', 'read_srf', 'read_sylk', + 'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap', + 'read_xwd', 'real_part', 'rebin', 'recall_commands', + 'recon3', 'reduce_colors', 'reform', 'region_grow', + 'register_cursor', 'regress', 'replicate', + 'replicate_inplace', 'resolve_all', 'resolve_routine', + 'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts', + 'rot', 'rotate', 'round', 'routine_filepath', + 'routine_info', 'rs_test', 'r_correlate', 'r_test', + 'save', 'savgol', 'scale3', 'scale3d', 'scope_level', + 'scope_traceback', 'scope_varfetch', 'scope_varname', + 'search2d', 'search3d', 'sem_create', 'sem_delete', + 'sem_lock', 'sem_release', 'setenv', 'set_plot', + 'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr', + 'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap', + 'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin', + 'sindgen', 'sinh', 'size', 'skewness', 'skip_lun', + 'slicer3', 'slide_image', 'smooth', 'sobel', 'socket', + 'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat', + 'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab', + 'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize', + 'stddev', 'stop', 'strarr', 'strcmp', 'strcompress', + 'streamline', 'stregex', 'stretch', 'string', 'strjoin', + 'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid', + 'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign', + 'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc', + 'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace', + 'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan', + 'tanh', 'tek_color', 'temporary', 'tetra_clip', + 'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed', + 'timegen', 'time_test2', 'tm_test', 'total', 'trace', + 'transpose', 'triangulate', 'trigrid', 'triql', 'trired', + 'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff', + 'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd', + 'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint', + 'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr', + 'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym', + 'value_locate', 'variance', 'vector', 'vector_field', 'vel', + 'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj', + 'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw', + 'where', 'widget_base', 'widget_button', 'widget_combobox', + 'widget_control', 'widget_displaycontextmen', 'widget_draw', + 'widget_droplist', 'widget_event', 'widget_info', + 'widget_label', 'widget_list', 'widget_propertysheet', + 'widget_slider', 'widget_tab', 'widget_table', + 'widget_text', 'widget_tree', 'widget_tree_move', + 'widget_window', 'wiener_filter', 'window', 'writeu', + 'write_bmp', 'write_csv', 'write_gif', 'write_image', + 'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict', + 'write_png', 'write_ppm', 'write_spr', 'write_srf', + 'write_sylk', 'write_tiff', 'write_wav', 'write_wave', + 'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt', + 'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet', + 'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar', + 'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet', + 'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps', + 'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise', + 'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont', + 'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl', + 'xmtool', 'xobjview', 'xobjview_rotate', + 'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d', + 'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit', + 'xvolume', 'xvolume_rotate', 'xvolume_write_image', + 'xyouts', 'zoom', 'zoom_24') + """Functions from: http://www.exelisvis.com/docs/routines-1.html""" + + tokens = { + 'root': [ + (r'^\s*;.*?\n', Comment.Singleline), + (words(_RESERVED, prefix=r'\b', suffix=r'\b'), Keyword), + (words(_BUILTIN_LIB, prefix=r'\b', suffix=r'\b'), Name.Builtin), + (r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator), + (r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator), + (r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator), + (r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator), + (r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number), + (r'.', Text), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/igor.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/igor.py new file mode 100644 index 0000000..dcf9770 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/igor.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.igor + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Igor Pro. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, words +from pygments.token import Text, Comment, Keyword, Name, String + +__all__ = ['IgorLexer'] + + +class IgorLexer(RegexLexer): + """ + Pygments Lexer for Igor Pro procedure files (.ipf). + See http://www.wavemetrics.com/ and http://www.igorexchange.com/. + + .. versionadded:: 2.0 + """ + + name = 'Igor' + aliases = ['igor', 'igorpro'] + filenames = ['*.ipf'] + mimetypes = ['text/ipf'] + + flags = re.IGNORECASE | re.MULTILINE + + flowControl = ( + 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch', + 'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry', + 'break', 'continue', 'return', + ) + types = ( + 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE', + 'STRUCT', 'dfref' + ) + keywords = ( + 'override', 'ThreadSafe', 'static', 'FuncFit', 'Proc', 'Picture', + 'Prompt', 'DoPrompt', 'macro', 'window', 'graph', 'function', 'end', + 'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu', + ) + operations = ( + 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', + 'AddMovieFrame', 'APMath', 'Append', 'AppendImage', + 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText', + 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour', + 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall', + 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', + 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc', + 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar', + 'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile', + 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross', + 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor', + 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions', + 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide', + 'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints', + 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic', + 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow', + 'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine', + 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText', + 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder', + 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText', + 'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp', + 'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR', + 'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly', + 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf', + 'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload', + 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo', + 'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow', + 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', + 'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools', + 'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles', + 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection', + 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask', + 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', + 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration', + 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave', + 'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold', + 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', + 'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath', + 'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder', + 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings', + 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout', + 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', + 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess', + 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime', + 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter', + 'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve', + 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', + 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', + 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText', + 'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList', + 'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', + 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder', + 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable', + 'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain', + 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage', + 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath', + 'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open', + 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo', + 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction', + 'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu', + 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', + 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable', + 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit', + 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour', + 'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', + 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder', + 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages', + 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample', + 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData', + 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook', + 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', + 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern', + 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer', + 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode', + 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed', + 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus', + 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth', + 'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet', + 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart', + 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString', + 'sprintf', 'sscanf', 'Stack', 'StackWindows', + 'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest', + 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest', + 'StatsCircularCorrelationTest', 'StatsCircularMeans', + 'StatsCircularMoments', 'StatsCircularTwoSampleTest', + 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest', + 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest', + 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest', + 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest', + 'StatsLinearRegression', 'StatsMultiCorrelationTest', + 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles', + 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample', + 'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest', + 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest', + 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest', + 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String', + 'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile', + 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid', + 'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv', + 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform', + 'WindowFunction', + ) + functions = ( + 'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog', + 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh', + 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi', + 'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch', + 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs', + 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev', + 'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos', + 'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi', + 'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual', + 'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian', + 'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave', + 'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma', + 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', + 'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata', + 'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor', + 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin', + 'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss', + 'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize', + 'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise', + 'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1', + 'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion', + 'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D', + 'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre', + 'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln', + 'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint', + 'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max', + 'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey', + 'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect', + 'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x', + 'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar', + 'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec', + 'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ', + 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD', + 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF', + 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF', + 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', + 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', + 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', + 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', + 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF', + 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF', + 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF', + 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF', + 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF', + 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', + 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', + 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', + 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF', + 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF', + 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF', + 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF', + 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF', + 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF', + 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', + 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF', + 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF', + 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF', + 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF', + 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute', + 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF', + 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', + 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF', + 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF', + 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF', + 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean', + 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', + 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', + 'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch', + 'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists', + 'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease', + 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', + 'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists', + 'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem', + 'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR', + ) + functions += ( + 'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo', + 'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName', + 'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo', + 'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date', + 'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo', + 'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont', + 'GetDimLabel', 'GetErrMessage', 'GetFormula', + 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR', + 'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData', + 'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash', + 'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile', + 'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList', + 'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str', + 'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo', + 'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey', + 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', + 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey', + 'Secs2Date', 'Secs2Time', 'SelectString', 'SortList', + 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath', + 'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault', + 'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel', + 'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr', + 'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits', + 'WinList', 'WinName', 'WinRecreation', 'XWaveName', + 'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef', + 'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef', + 'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR', + 'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR', + ) + + tokens = { + 'root': [ + (r'//.*$', Comment.Single), + (r'"([^"\\]|\\.)*"', String), + # Flow Control. + (words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword), + # Types. + (words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type), + # Keywords. + (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), + # Built-in operations. + (words(operations, prefix=r'\b', suffix=r'\b'), Name.Class), + # Built-in functions. + (words(functions, prefix=r'\b', suffix=r'\b'), Name.Function), + # Compiler directives. + (r'^#(include|pragma|define|ifdef|ifndef|endif)', + Name.Decorator), + (r'[^a-z"/]+$', Text), + (r'.', Text), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/inferno.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/inferno.py new file mode 100644 index 0000000..706be0c --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/inferno.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.inferno + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Inferno os and all the related stuff. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default +from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \ + Name, String, Number + +__all__ = ['LimboLexer'] + + +class LimboLexer(RegexLexer): + """ + Lexer for `Limbo programming language `_ + + TODO: + - maybe implement better var declaration highlighting + - some simple syntax error highlighting + + .. versionadded:: 2.0 + """ + name = 'Limbo' + aliases = ['limbo'] + filenames = ['*.b'] + mimetypes = ['text/limbo'] + + tokens = { + 'whitespace': [ + (r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)', + bygroups(Text, Name.Label)), + (r'\n', Text), + (r'\s+', Text), + (r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' + r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\', String), # stray backslash + ], + 'statements': [ + (r'"', String, 'string'), + (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float), + (r'16r[0-9a-fA-F]+', Number.Hex), + (r'8r[0-7]+', Number.Oct), + (r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer), + (r'[()\[\],.]', Punctuation), + (r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator), + (r'(alt|break|case|continue|cyclic|do|else|exit' + r'for|hd|if|implement|import|include|len|load|or' + r'pick|return|spawn|tagof|tl|to|while)\b', Keyword), + (r'(byte|int|big|real|string|array|chan|list|adt' + r'|fn|ref|of|module|self|type)\b', Keyword.Type), + (r'(con|iota|nil)\b', Keyword.Constant), + ('[a-zA-Z_]\w*', Name), + ], + 'statement' : [ + include('whitespace'), + include('statements'), + ('[{}]', Punctuation), + (';', Punctuation, '#pop'), + ], + 'root': [ + include('whitespace'), + default('statement'), + ], + } + + def analyse_text(text): + # Any limbo module implements something + if re.search(r'^implement \w+;', text, re.MULTILINE): + return 0.7 + +# TODO: +# - Make lexers for: +# - asm sources +# - man pages +# - mkfiles +# - module definitions +# - namespace definitions +# - shell scripts +# - maybe keyfiles and fonts +# they all seem to be quite similar to their equivalents +# from unix world, so there should not be a lot of problems diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/installers.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/installers.py new file mode 100644 index 0000000..769bce6 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/installers.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.installers + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for installer/packager DSLs and formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, this, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Punctuation, Generic, Number, Whitespace + +__all__ = ['NSISLexer', 'RPMSpecLexer', 'SourcesListLexer', + 'DebianControlLexer'] + + +class NSISLexer(RegexLexer): + """ + For `NSIS `_ scripts. + + .. versionadded:: 1.6 + """ + name = 'NSIS' + aliases = ['nsis', 'nsi', 'nsh'] + filenames = ['*.nsi', '*.nsh'] + mimetypes = ['text/x-nsis'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'[;#].*\n', Comment), + (r"'.*?'", String.Single), + (r'"', String.Double, 'str_double'), + (r'`', String.Backtick, 'str_backtick'), + include('macro'), + include('interpol'), + include('basic'), + (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), + (r'/[a-z_]\w*', Name.Attribute), + ('.', Text), + ], + 'basic': [ + (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', + bygroups(Text, Keyword, Text, Name.Function)), + (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', + bygroups(Keyword.Namespace, Punctuation, Name.Function)), + (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), + (r'(\b[ULS]|\B)([!<>=]?=|\<\>?|\>)\B', Operator), + (r'[|+-]', Operator), + (r'\\', Punctuation), + (r'\b(Abort|Add(?:BrandingImage|Size)|' + r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' + r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' + r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' + r'ComponentText|CopyFiles|CRCCheck|' + r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' + r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' + r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' + r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' + r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' + r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' + r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' + r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' + r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' + r'InstDirError|LabelAddress|TempFileName)|' + r'Goto|HideWindow|Icon|' + r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' + r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' + r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' + r'IsWindow|LangString(?:UP)?|' + r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' + r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' + r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' + r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' + r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' + r'Return|RMDir|SearchPath|Section(?:Divider|End|' + r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' + r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' + r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' + r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' + r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' + r'Silent|StaticBkColor)|' + r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' + r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' + r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' + r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' + r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' + r'XPStyle)\b', Keyword), + (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' + r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' + r'HK(CC|CR|CU|DD|LM|PD|U)|' + r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' + r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' + r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' + r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' + r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' + r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' + r'YESNO(?:CANCEL)?)|SET|SHCTX|' + r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' + r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' + r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' + r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' + r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' + r'true|try|user|zlib)\b', Name.Constant), + ], + 'macro': [ + (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' + r'delfilefile|echo(?:message)?|else|endif|error|execute|' + r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' + r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' + r'warning)\b', Comment.Preproc), + ], + 'interpol': [ + (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers + (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' + r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' + r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' + r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' + r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' + r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', + Name.Builtin), + (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), + (r'\$[a-z_]\w*', Name.Variable), + ], + 'str_double': [ + (r'"', String, '#pop'), + (r'\$(\\[nrt"]|\$)', String.Escape), + include('interpol'), + (r'.', String.Double), + ], + 'str_backtick': [ + (r'`', String, '#pop'), + (r'\$(\\[nrt"]|\$)', String.Escape), + include('interpol'), + (r'.', String.Double), + ], + } + + +class RPMSpecLexer(RegexLexer): + """ + For RPM ``.spec`` files. + + .. versionadded:: 1.6 + """ + + name = 'RPMSpec' + aliases = ['spec'] + filenames = ['*.spec'] + mimetypes = ['text/x-rpm-spec'] + + _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' + 'post[a-z]*|trigger[a-z]*|files)') + + tokens = { + 'root': [ + (r'#.*\n', Comment), + include('basic'), + ], + 'description': [ + (r'^(%' + _directives + ')(.*)$', + bygroups(Name.Decorator, Text), '#pop'), + (r'\n', Text), + (r'.', Text), + ], + 'changelog': [ + (r'\*.*\n', Generic.Subheading), + (r'^(%' + _directives + ')(.*)$', + bygroups(Name.Decorator, Text), '#pop'), + (r'\n', Text), + (r'.', Text), + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + include('interpol'), + (r'.', String.Double), + ], + 'basic': [ + include('macro'), + (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' + r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' + r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' + r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', + bygroups(Generic.Heading, Punctuation, using(this))), + (r'^%description', Name.Decorator, 'description'), + (r'^%changelog', Name.Decorator, 'changelog'), + (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), + (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' + r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', + Keyword), + include('interpol'), + (r"'.*?'", String.Single), + (r'"', String.Double, 'string'), + (r'.', Text), + ], + 'macro': [ + (r'%define.*\n', Comment.Preproc), + (r'%\{\!\?.*%define.*\}', Comment.Preproc), + (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', + bygroups(Comment.Preproc, Text)), + ], + 'interpol': [ + (r'%\{?__[a-z_]+\}?', Name.Function), + (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), + (r'%\{\?\w+\}', Name.Variable), + (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), + (r'%\{[a-zA-Z]\w+\}', Keyword.Constant), + ] + } + + +class SourcesListLexer(RegexLexer): + """ + Lexer that highlights debian sources.list files. + + .. versionadded:: 0.7 + """ + + name = 'Debian Sourcelist' + aliases = ['sourceslist', 'sources.list', 'debsources'] + filenames = ['sources.list'] + mimetype = ['application/x-debian-sourceslist'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?$', Comment), + (r'^(deb(?:-src)?)(\s+)', + bygroups(Keyword, Text), 'distribution') + ], + 'distribution': [ + (r'#.*?$', Comment, '#pop'), + (r'\$\(ARCH\)', Name.Variable), + (r'[^\s$[]+', String), + (r'\[', String.Other, 'escaped-distribution'), + (r'\$', String), + (r'\s+', Text, 'components') + ], + 'escaped-distribution': [ + (r'\]', String.Other, '#pop'), + (r'\$\(ARCH\)', Name.Variable), + (r'[^\]$]+', String.Other), + (r'\$', String.Other) + ], + 'components': [ + (r'#.*?$', Comment, '#pop:2'), + (r'$', Text, '#pop:2'), + (r'\s+', Text), + (r'\S+', Keyword.Pseudo), + ] + } + + def analyse_text(text): + for line in text.splitlines(): + line = line.strip() + if line.startswith('deb ') or line.startswith('deb-src '): + return True + + +class DebianControlLexer(RegexLexer): + """ + Lexer for Debian ``control`` files and ``apt-cache show `` outputs. + + .. versionadded:: 0.9 + """ + name = 'Debian Control file' + aliases = ['control', 'debcontrol'] + filenames = ['control'] + + tokens = { + 'root': [ + (r'^(Description)', Keyword, 'description'), + (r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'), + (r'^((Build-)?Depends)', Keyword, 'depends'), + (r'^((?:Python-)?Version)(:\s*)(\S+)$', + bygroups(Keyword, Text, Number)), + (r'^((?:Installed-)?Size)(:\s*)(\S+)$', + bygroups(Keyword, Text, Number)), + (r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$', + bygroups(Keyword, Text, Number)), + (r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$', + bygroups(Keyword, Whitespace, String)), + ], + 'maintainer': [ + (r'<[^>]+>', Generic.Strong), + (r'<[^>]+>$', Generic.Strong, '#pop'), + (r',\n?', Text), + (r'.', Text), + ], + 'description': [ + (r'(.*)(Homepage)(: )(\S+)', + bygroups(Text, String, Name, Name.Class)), + (r':.*\n', Generic.Strong), + (r' .*\n', Text), + default('#pop'), + ], + 'depends': [ + (r':\s*', Text), + (r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)), + (r'\(', Text, 'depend_vers'), + (r',', Text), + (r'\|', Operator), + (r'[\s]+', Text), + (r'[})]\s*$', Text, '#pop'), + (r'\}', Text), + (r'[^,]$', Name.Function, '#pop'), + (r'([+.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)), + (r'\[.*?\]', Name.Entity), + ], + 'depend_vers': [ + (r'\),', Text, '#pop'), + (r'\)[^,]', Text, '#pop:2'), + (r'([><=]+)(\s*)([^)]+)', bygroups(Operator, Text, Number)) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/int_fiction.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/int_fiction.py new file mode 100644 index 0000000..7b004c2 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/int_fiction.py @@ -0,0 +1,1342 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.int_fiction + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for interactive fiction languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, \ + this, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Generic + +__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer', + 'Tads3Lexer'] + + +class Inform6Lexer(RegexLexer): + """ + For `Inform 6 `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Inform 6' + aliases = ['inform6', 'i6'] + filenames = ['*.inf'] + + flags = re.MULTILINE | re.DOTALL | re.UNICODE + + _name = r'[a-zA-Z_]\w*' + + # Inform 7 maps these four character classes to their ASCII + # equivalents. To support Inform 6 inclusions within Inform 7, + # Inform6Lexer maps them too. + _dash = u'\\-\u2010-\u2014' + _dquote = u'"\u201c\u201d' + _squote = u"'\u2018\u2019" + _newline = u'\\n\u0085\u2028\u2029' + + tokens = { + 'root': [ + (r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc, + 'directive'), + default('directive') + ], + '_whitespace': [ + (r'\s+', Text), + (r'![^%s]*' % _newline, Comment.Single) + ], + 'default': [ + include('_whitespace'), + (r'\[', Punctuation, 'many-values'), # Array initialization + (r':|(?=;)', Punctuation, '#pop'), + (r'<', Punctuation), # Second angle bracket in an action statement + default(('expression', '_expression')) + ], + + # Expressions + '_expression': [ + include('_whitespace'), + (r'(?=sp\b)', Text, '#pop'), + (r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text, + ('#pop', 'value')), + (r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator), + (r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop') + ], + 'expression': [ + include('_whitespace'), + (r'\(', Punctuation, ('expression', '_expression')), + (r'\)', Punctuation, '#pop'), + (r'\[', Punctuation, ('#pop', 'statements', 'locals')), + (r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation), + (r'\+\+|[%s]{2}(?!>)' % _dash, Operator), + (r',', Punctuation, '_expression'), + (r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash, + Operator, '_expression'), + (r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word, + '_expression'), + (r'sp\b', Name), + (r'\?~?', Name.Label, 'label?'), + (r'[@{]', Error), + default('#pop') + ], + '_assembly-expression': [ + (r'\(', Punctuation, ('#push', '_expression')), + (r'[\[\]]', Punctuation), + (r'[%s]>' % _dash, Punctuation, '_expression'), + (r'sp\b', Keyword.Pseudo), + (r';', Punctuation, '#pop:3'), + include('expression') + ], + '_for-expression': [ + (r'\)', Punctuation, '#pop:2'), + (r':', Punctuation, '#pop'), + include('expression') + ], + '_keyword-expression': [ + (r'(from|near|to)\b', Keyword, '_expression'), + include('expression') + ], + '_list-expression': [ + (r',', Punctuation, '#pop'), + include('expression') + ], + '_object-expression': [ + (r'has\b', Keyword.Declaration, '#pop'), + include('_list-expression') + ], + + # Values + 'value': [ + include('_whitespace'), + # Strings + (r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'), + (r'([%s])(@\{[0-9a-fA-F]{1,4}\})([%s])' % (_squote, _squote), + bygroups(String.Char, String.Escape, String.Char), '#pop'), + (r'([%s])(@.{2})([%s])' % (_squote, _squote), + bygroups(String.Char, String.Escape, String.Char), '#pop'), + (r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')), + (r'[%s]' % _dquote, String.Double, ('#pop', 'string')), + # Numbers + (r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash), + Number.Float, '#pop'), + (r'\$[0-9a-fA-F]+', Number.Hex, '#pop'), + (r'\$\$[01]+', Number.Bin, '#pop'), + (r'[0-9]+', Number.Integer, '#pop'), + # Values prefixed by hashes + (r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'), + (r'(#g\$)(%s)' % _name, + bygroups(Operator, Name.Variable.Global), '#pop'), + (r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')), + (r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'), + (r'#', Name.Builtin, ('#pop', 'system-constant')), + # System functions + (words(( + 'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass', + 'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'), + Name.Builtin, '#pop'), + # Metaclasses + (r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'), + # Veneer routines + (words(( + 'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms', + 'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String', + 'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__', + 'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr', + 'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process', + 'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA', + 'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR', + 'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr', + 'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'), + prefix='(?i)', suffix=r'\b'), + Name.Builtin, '#pop'), + # Other built-in symbols + (words(( + 'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE', + 'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'false', + 'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY', 'GOBJFIELD_CHAIN', + 'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT', + 'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START', + 'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX', + 'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print', + 'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE', + 'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag', + 'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3', + 'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'), + prefix='(?i)', suffix=r'\b'), + Name.Builtin, '#pop'), + # Other values + (_name, Name, '#pop') + ], + # Strings + 'dictionary-word': [ + (r'[~^]+', String.Escape), + (r'[^~^\\@({%s]+' % _squote, String.Single), + (r'[({]', String.Single), + (r'@\{[0-9a-fA-F]{,4}\}', String.Escape), + (r'@.{2}', String.Escape), + (r'[%s]' % _squote, String.Single, '#pop') + ], + 'string': [ + (r'[~^]+', String.Escape), + (r'[^~^\\@({%s]+' % _dquote, String.Double), + (r'[({]', String.Double), + (r'\\', String.Escape), + (r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' % + (_newline, _newline), String.Escape), + (r'@(\\\s*[%s]\s*)*\{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}' + r'(\\\s*[%s]\s*)*\}' % (_newline, _newline, _newline), + String.Escape), + (r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline), + String.Escape), + (r'[%s]' % _dquote, String.Double, '#pop') + ], + 'plain-string': [ + (r'[^~^\\({\[\]%s]+' % _dquote, String.Double), + (r'[~^({\[\]]', String.Double), + (r'\\', String.Escape), + (r'[%s]' % _dquote, String.Double, '#pop') + ], + # Names + '_constant': [ + include('_whitespace'), + (_name, Name.Constant, '#pop'), + include('value') + ], + '_global': [ + include('_whitespace'), + (_name, Name.Variable.Global, '#pop'), + include('value') + ], + 'label?': [ + include('_whitespace'), + (_name, Name.Label, '#pop'), + default('#pop') + ], + 'variable?': [ + include('_whitespace'), + (_name, Name.Variable, '#pop'), + default('#pop') + ], + # Values after hashes + 'obsolete-dictionary-word': [ + (r'\S\w*', String.Other, '#pop') + ], + 'system-constant': [ + include('_whitespace'), + (_name, Name.Builtin, '#pop') + ], + + # Directives + 'directive': [ + include('_whitespace'), + (r'#', Punctuation), + (r';', Punctuation, '#pop'), + (r'\[', Punctuation, + ('default', 'statements', 'locals', 'routine-name?')), + (words(( + 'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot', + 'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file', + 'version'), prefix='(?i)', suffix=r'\b'), + Keyword, 'default'), + (r'(?i)(array|global)\b', Keyword, + ('default', 'directive-keyword?', '_global')), + (r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')), + (r'(?i)class\b', Keyword, + ('object-body', 'duplicates', 'class-name')), + (r'(?i)(constant|default)\b', Keyword, + ('default', 'expression', '_constant')), + (r'(?i)(end\b)(.*)', bygroups(Keyword, Text)), + (r'(?i)(extend|verb)\b', Keyword, 'grammar'), + (r'(?i)fake_action\b', Keyword, ('default', '_constant')), + (r'(?i)import\b', Keyword, 'manifest'), + (r'(?i)(include|link)\b', Keyword, + ('default', 'before-plain-string')), + (r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')), + (r'(?i)message\b', Keyword, ('default', 'diagnostic')), + (r'(?i)(nearby|object)\b', Keyword, + ('object-body', '_object-head')), + (r'(?i)property\b', Keyword, + ('default', 'alias?', '_constant', 'property-keyword*')), + (r'(?i)replace\b', Keyword, + ('default', 'routine-name?', 'routine-name?')), + (r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')), + (r'(?i)stub\b', Keyword, ('default', 'routine-name?')), + (r'(?i)trace\b', Keyword, + ('default', 'trace-keyword?', 'trace-keyword?')), + (r'(?i)zcharacter\b', Keyword, + ('default', 'directive-keyword?', 'directive-keyword?')), + (_name, Name.Class, ('object-body', '_object-head')) + ], + # [, Replace, Stub + 'routine-name?': [ + include('_whitespace'), + (_name, Name.Function, '#pop'), + default('#pop') + ], + 'locals': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r'\*', Punctuation), + (_name, Name.Variable) + ], + # Array + 'many-values': [ + include('_whitespace'), + (r';', Punctuation), + (r'\]', Punctuation, '#pop'), + (r':', Error), + default(('expression', '_expression')) + ], + # Attribute, Property + 'alias?': [ + include('_whitespace'), + (r'alias\b', Keyword, ('#pop', '_constant')), + default('#pop') + ], + # Class, Object, Nearby + 'class-name': [ + include('_whitespace'), + (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'), + (_name, Name.Class, '#pop') + ], + 'duplicates': [ + include('_whitespace'), + (r'\(', Punctuation, ('#pop', 'expression', '_expression')), + default('#pop') + ], + '_object-head': [ + (r'[%s]>' % _dash, Punctuation), + (r'(class|has|private|with)\b', Keyword.Declaration, '#pop'), + include('_global') + ], + 'object-body': [ + include('_whitespace'), + (r';', Punctuation, '#pop:2'), + (r',', Punctuation), + (r'class\b', Keyword.Declaration, 'class-segment'), + (r'(has|private|with)\b', Keyword.Declaration), + (r':', Error), + default(('_object-expression', '_expression')) + ], + 'class-segment': [ + include('_whitespace'), + (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'), + (_name, Name.Class), + default('value') + ], + # Extend, Verb + 'grammar': [ + include('_whitespace'), + (r'=', Punctuation, ('#pop', 'default')), + (r'\*', Punctuation, ('#pop', 'grammar-line')), + default('_directive-keyword') + ], + 'grammar-line': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r'[/*]', Punctuation), + (r'[%s]>' % _dash, Punctuation, 'value'), + (r'(noun|scope)\b', Keyword, '=routine'), + default('_directive-keyword') + ], + '=routine': [ + include('_whitespace'), + (r'=', Punctuation, 'routine-name?'), + default('#pop') + ], + # Import + 'manifest': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r',', Punctuation), + (r'(?i)global\b', Keyword, '_global'), + default('_global') + ], + # Include, Link, Message + 'diagnostic': [ + include('_whitespace'), + (r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')), + default(('#pop', 'before-plain-string', 'directive-keyword?')) + ], + 'before-plain-string': [ + include('_whitespace'), + (r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string')) + ], + 'message-string': [ + (r'[~^]+', String.Escape), + include('plain-string') + ], + + # Keywords used in directives + '_directive-keyword!': [ + include('_whitespace'), + (words(( + 'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror', + 'first', 'has', 'held', 'initial', 'initstr', 'last', 'long', 'meta', 'multi', + 'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only', 'private', + 'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table', 'terminating', + 'time', 'topic', 'warning', 'with'), suffix=r'\b'), + Keyword, '#pop'), + (r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop') + ], + '_directive-keyword': [ + include('_directive-keyword!'), + include('value') + ], + 'directive-keyword?': [ + include('_directive-keyword!'), + default('#pop') + ], + 'property-keyword*': [ + include('_whitespace'), + (r'(additive|long)\b', Keyword), + default('#pop') + ], + 'trace-keyword?': [ + include('_whitespace'), + (words(( + 'assembly', 'dictionary', 'expressions', 'lines', 'linker', + 'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'), + Keyword, '#pop'), + default('#pop') + ], + + # Statements + 'statements': [ + include('_whitespace'), + (r'\]', Punctuation, '#pop'), + (r'[;{}]', Punctuation), + (words(( + 'box', 'break', 'continue', 'default', 'give', 'inversion', + 'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue', + 'spaces', 'string', 'until'), suffix=r'\b'), + Keyword, 'default'), + (r'(do|else)\b', Keyword), + (r'(font|style)\b', Keyword, + ('default', 'miscellaneous-keyword?')), + (r'for\b', Keyword, ('for', '(?')), + (r'(if|switch|while)', Keyword, + ('expression', '_expression', '(?')), + (r'(jump|save|restore)\b', Keyword, ('default', 'label?')), + (r'objectloop\b', Keyword, + ('_keyword-expression', 'variable?', '(?')), + (r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'), + (r'\.', Name.Label, 'label?'), + (r'@', Keyword, 'opcode'), + (r'#(?![agrnw]\$|#)', Punctuation, 'directive'), + (r'<', Punctuation, 'default'), + (r'move\b', Keyword, + ('default', '_keyword-expression', '_expression')), + default(('default', '_keyword-expression', '_expression')) + ], + 'miscellaneous-keyword?': [ + include('_whitespace'), + (r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b', + Keyword, '#pop'), + (r'(a|A|an|address|char|name|number|object|property|string|the|' + r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo, + '#pop'), + (r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function, + '#pop'), + default('#pop') + ], + '(?': [ + include('_whitespace'), + (r'\(', Punctuation, '#pop'), + default('#pop') + ], + 'for': [ + include('_whitespace'), + (r';', Punctuation, ('_for-expression', '_expression')), + default(('_for-expression', '_expression')) + ], + 'print-list': [ + include('_whitespace'), + (r';', Punctuation, '#pop'), + (r':', Error), + default(('_list-expression', '_expression', '_list-expression', 'form')) + ], + 'form': [ + include('_whitespace'), + (r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')), + default('#pop') + ], + + # Assembly + 'opcode': [ + include('_whitespace'), + (r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')), + (_name, Keyword, 'operands') + ], + 'operands': [ + (r':', Error), + default(('_assembly-expression', '_expression')) + ] + } + + def get_tokens_unprocessed(self, text): + # 'in' is either a keyword or an operator. + # If the token two tokens after 'in' is ')', 'in' is a keyword: + # objectloop(a in b) + # Otherwise, it is an operator: + # objectloop(a in b && true) + objectloop_queue = [] + objectloop_token_count = -1 + previous_token = None + for index, token, value in RegexLexer.get_tokens_unprocessed(self, + text): + if previous_token is Name.Variable and value == 'in': + objectloop_queue = [[index, token, value]] + objectloop_token_count = 2 + elif objectloop_token_count > 0: + if token not in Comment and token not in Text: + objectloop_token_count -= 1 + objectloop_queue.append((index, token, value)) + else: + if objectloop_token_count == 0: + if objectloop_queue[-1][2] == ')': + objectloop_queue[0][1] = Keyword + while objectloop_queue: + yield objectloop_queue.pop(0) + objectloop_token_count = -1 + yield index, token, value + if token not in Comment and token not in Text: + previous_token = token + while objectloop_queue: + yield objectloop_queue.pop(0) + + +class Inform7Lexer(RegexLexer): + """ + For `Inform 7 `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Inform 7' + aliases = ['inform7', 'i7'] + filenames = ['*.ni', '*.i7x'] + + flags = re.MULTILINE | re.DOTALL | re.UNICODE + + _dash = Inform6Lexer._dash + _dquote = Inform6Lexer._dquote + _newline = Inform6Lexer._newline + _start = r'\A|(?<=[%s])' % _newline + + # There are three variants of Inform 7, differing in how to + # interpret at signs and braces in I6T. In top-level inclusions, at + # signs in the first column are inweb syntax. In phrase definitions + # and use options, tokens in braces are treated as I7. Use options + # also interpret "{N}". + tokens = {} + token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option'] + + for level in token_variants: + tokens[level] = { + '+i6-root': list(Inform6Lexer.tokens['root']), + '+i6t-root': [ # For Inform6TemplateLexer + (r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc, + ('directive', '+p')) + ], + 'root': [ + (r'(\|?\s)+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'[%s]' % _dquote, Generic.Heading, + ('+main', '+titling', '+titling-string')), + default(('+main', '+heading?')) + ], + '+titling-string': [ + (r'[^%s]+' % _dquote, Generic.Heading), + (r'[%s]' % _dquote, Generic.Heading, '#pop') + ], + '+titling': [ + (r'\[', Comment.Multiline, '+comment'), + (r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading), + (r'[%s]' % _dquote, Generic.Heading, '+titling-string'), + (r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote), + Text, ('#pop', '+heading?')), + (r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'), + (r'[|%s]' % _newline, Generic.Heading) + ], + '+main': [ + (r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text), + (r'[%s]' % _dquote, String.Double, '+text'), + (r':', Text, '+phrase-definition'), + (r'(?i)\bas\b', Text, '+use-option'), + (r'\[', Comment.Multiline, '+comment'), + (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash), + bygroups(Punctuation, + using(this, state=('+i6-root', 'directive'), + i6t='+i6t-not-inline'), Punctuation)), + (r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' % + (_start, _dquote, _newline), Text, '+heading?'), + (r'(?i)[a(|%s]' % _newline, Text) + ], + '+phrase-definition': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash), + bygroups(Punctuation, + using(this, state=('+i6-root', 'directive', + 'default', 'statements'), + i6t='+i6t-inline'), Punctuation), '#pop'), + default('#pop') + ], + '+use-option': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash), + bygroups(Punctuation, + using(this, state=('+i6-root', 'directive'), + i6t='+i6t-use-option'), Punctuation), '#pop'), + default('#pop') + ], + '+comment': [ + (r'[^\[\]]+', Comment.Multiline), + (r'\[', Comment.Multiline, '#push'), + (r'\]', Comment.Multiline, '#pop') + ], + '+text': [ + (r'[^\[%s]+' % _dquote, String.Double), + (r'\[.*?\]', String.Interpol), + (r'[%s]' % _dquote, String.Double, '#pop') + ], + '+heading?': [ + (r'(\|?\s)+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'), + (r'[%s]{1,3}' % _dash, Text), + (r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline, + Generic.Heading, '#pop'), + default('#pop') + ], + '+documentation-heading': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'(?i)documentation\s+', Text, '+documentation-heading2'), + default('#pop') + ], + '+documentation-heading2': [ + (r'\s+', Text), + (r'\[', Comment.Multiline, '+comment'), + (r'[%s]{4}\s' % _dash, Text, '+documentation'), + default('#pop:2') + ], + '+documentation': [ + (r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' % + (_start, _newline), Generic.Heading), + (r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline), + Generic.Subheading), + (r'((%s)\t.*?[%s])+' % (_start, _newline), + using(this, state='+main')), + (r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text), + (r'\[', Comment.Multiline, '+comment'), + ], + '+i6t-not-inline': [ + (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline), + Comment.Preproc), + (r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline), + Comment.Preproc), + (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline), + Generic.Heading, '+p') + ], + '+i6t-use-option': [ + include('+i6t-not-inline'), + (r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation)) + ], + '+i6t-inline': [ + (r'(\{)(\S[^}]*)?(\})', + bygroups(Punctuation, using(this, state='+main'), + Punctuation)) + ], + '+i6t': [ + (r'(\{[%s])(![^}]*)(\}?)' % _dash, + bygroups(Punctuation, Comment.Single, Punctuation)), + (r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash, + bygroups(Punctuation, Keyword, Punctuation, Text, + Punctuation), '+lines'), + (r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash, + bygroups(Punctuation, Keyword, Punctuation, Text, + Punctuation)), + (r'(\(\+)(.*?)(\+\)|\Z)', + bygroups(Punctuation, using(this, state='+main'), + Punctuation)) + ], + '+p': [ + (r'[^@]+', Comment.Preproc), + (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline), + Comment.Preproc, '#pop'), + (r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc), + (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline), + Generic.Heading), + (r'@', Comment.Preproc) + ], + '+lines': [ + (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline), + Comment.Preproc), + (r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline), + Comment.Preproc), + (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline), + Generic.Heading, '+p'), + (r'(%s)@\w*[ %s]' % (_start, _newline), Keyword), + (r'![^%s]*' % _newline, Comment.Single), + (r'(\{)([%s]endlines)(\})' % _dash, + bygroups(Punctuation, Keyword, Punctuation), '#pop'), + (r'[^@!{]+?([%s]|\Z)|.' % _newline, Text) + ] + } + # Inform 7 can include snippets of Inform 6 template language, + # so all of Inform6Lexer's states are copied here, with + # modifications to account for template syntax. Inform7Lexer's + # own states begin with '+' to avoid name conflicts. Some of + # Inform6Lexer's states begin with '_': these are not modified. + # They deal with template syntax either by including modified + # states, or by matching r'' then pushing to modified states. + for token in Inform6Lexer.tokens: + if token == 'root': + continue + tokens[level][token] = list(Inform6Lexer.tokens[token]) + if not token.startswith('_'): + tokens[level][token][:0] = [include('+i6t'), include(level)] + + def __init__(self, **options): + level = options.get('i6t', '+i6t-not-inline') + if level not in self._all_tokens: + self._tokens = self.__class__.process_tokendef(level) + else: + self._tokens = self._all_tokens[level] + RegexLexer.__init__(self, **options) + + +class Inform6TemplateLexer(Inform7Lexer): + """ + For `Inform 6 template + `_ code. + + .. versionadded:: 2.0 + """ + + name = 'Inform 6 template' + aliases = ['i6t'] + filenames = ['*.i6t'] + + def get_tokens_unprocessed(self, text, stack=('+i6t-root',)): + return Inform7Lexer.get_tokens_unprocessed(self, text, stack) + + +class Tads3Lexer(RegexLexer): + """ + For `TADS 3 `_ source code. + """ + + name = 'TADS 3' + aliases = ['tads3'] + filenames = ['*.t'] + + flags = re.DOTALL | re.MULTILINE + + _comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)' + _comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)' + _escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|' + r'[0-3]?[0-7]{1,2}))') + _name = r'(?:[_a-zA-Z]\w*)' + _no_quote = r'(?=\s|\\?>)' + _operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|' + r'(?:[=+\-*/%!&|^]|<>?>?)=?)') + _ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline) + _ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline) + + def _make_string_state(triple, double, verbatim=None, _escape=_escape): + if verbatim: + verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()), + re.escape(c.upper())) + for c in verbatim]) + char = r'"' if double else r"'" + token = String.Double if double else String.Single + escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r'' + prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's') + tag_state_name = '%sqt' % prefix + state = [] + if triple: + state += [ + (r'%s{3,}' % char, token, '#pop'), + (r'\\%s+' % char, String.Escape), + (char, token) + ] + else: + state.append((char, token, '#pop')) + state += [ + include('s/verbatim'), + (r'[^\\<&{}%s]+' % char, token) + ] + if verbatim: + # This regex can't use `(?i)` because escape sequences are + # case-sensitive. `<\XMP>` works; `<\xmp>` doesn't. + state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' % + (_escape, verbatim), + Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name))) + else: + state += [ + (r'\\?<\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' % + (char, char, escaped_quotes, _escape), Comment.Multiline), + (r'(?i)\\?]|\\>)', Name.Tag, + ('#pop', '%sqs/listing' % prefix, tag_state_name)), + (r'(?i)\\?]|\\>)', Name.Tag, + ('#pop', '%sqs/xmp' % prefix, tag_state_name)), + (r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' % + (char, char, escaped_quotes, _escape), Name.Tag, + tag_state_name), + include('s/entity') + ] + state += [ + include('s/escape'), + (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' % + (char, char, escaped_quotes, _escape), String.Interpol), + (r'[\\&{}<]', token) + ] + return state + + def _make_tag_state(triple, double, _escape=_escape): + char = r'"' if double else r"'" + quantifier = r'{3,}' if triple else r'' + state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's') + token = String.Double if double else String.Single + escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r'' + return [ + (r'%s%s' % (char, quantifier), token, '#pop:2'), + (r'(\s|\\\n)+', Text), + (r'(=)(\\?")', bygroups(Punctuation, String.Double), + 'dqs/%s' % state_name), + (r"(=)(\\?')", bygroups(Punctuation, String.Single), + 'sqs/%s' % state_name), + (r'=', Punctuation, 'uqs/%s' % state_name), + (r'\\?>', Name.Tag, '#pop'), + (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' % + (char, char, escaped_quotes, _escape), String.Interpol), + (r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' % + (char, char, escaped_quotes, _escape), Name.Attribute), + include('s/escape'), + include('s/verbatim'), + include('s/entity'), + (r'[\\{}&]', Name.Attribute) + ] + + def _make_attribute_value_state(terminator, host_triple, host_double, + _escape=_escape): + token = (String.Double if terminator == r'"' else + String.Single if terminator == r"'" else String.Other) + host_char = r'"' if host_double else r"'" + host_quantifier = r'{3,}' if host_triple else r'' + host_token = String.Double if host_double else String.Single + escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char) + if host_triple else r'') + return [ + (r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'), + (r'%s%s' % (r'' if token is String.Other else r'\\?', terminator), + token, '#pop'), + include('s/verbatim'), + include('s/entity'), + (r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' % + (host_char, host_char, escaped_quotes, _escape), String.Interpol), + (r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''), + token), + include('s/escape'), + (r'["\'\s&{<}\\]', token) + ] + + tokens = { + 'root': [ + (u'\ufeff', Text), + (r'\{', Punctuation, 'object-body'), + (r';+', Punctuation), + (r'(?=(argcount|break|case|catch|continue|default|definingobj|' + r'delegated|do|else|for|foreach|finally|goto|if|inherited|' + r'invokee|local|nil|new|operator|replaced|return|self|switch|' + r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'), + (r'(%s)(%s*)(\()' % (_name, _ws), + bygroups(Name.Function, using(this, state='whitespace'), + Punctuation), + ('block?/root', 'more/parameters', 'main/parameters')), + include('whitespace'), + (r'\++', Punctuation), + (r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop + (r'(?!\Z)', Text, 'main/root') + ], + 'main/root': [ + include('main/basic'), + default(('#pop', 'object-body/no-braces', 'classes', 'class')) + ], + 'object-body/no-braces': [ + (r';', Punctuation, '#pop'), + (r'\{', Punctuation, ('#pop', 'object-body')), + include('object-body') + ], + 'object-body': [ + (r';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + (r':', Punctuation, ('classes', 'class')), + (r'(%s?)(%s*)(\()' % (_name, _ws), + bygroups(Name.Function, using(this, state='whitespace'), + Punctuation), + ('block?', 'more/parameters', 'main/parameters')), + (r'(%s)(%s*)(\{)' % (_name, _ws), + bygroups(Name.Function, using(this, state='whitespace'), + Punctuation), 'block'), + (r'(%s)(%s*)(:)' % (_name, _ws), + bygroups(Name.Variable, using(this, state='whitespace'), + Punctuation), + ('object-body/no-braces', 'classes', 'class')), + include('whitespace'), + (r'->|%s' % _operator, Punctuation, 'main'), + default('main/object-body') + ], + 'main/object-body': [ + include('main/basic'), + (r'(%s)(%s*)(=?)' % (_name, _ws), + bygroups(Name.Variable, using(this, state='whitespace'), + Punctuation), ('#pop', 'more', 'main')), + default('#pop:2') + ], + 'block?/root': [ + (r'\{', Punctuation, ('#pop', 'block')), + include('whitespace'), + (r'(?=[[\'"<(:])', Text, # It might be a VerbRule macro. + ('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')), + # It might be a macro like DefineAction. + default(('#pop', 'object-body/no-braces')) + ], + 'block?': [ + (r'\{', Punctuation, ('#pop', 'block')), + include('whitespace'), + default('#pop') + ], + 'block/basic': [ + (r'[;:]+', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + (r'default\b', Keyword.Reserved), + (r'(%s)(%s*)(:)' % (_name, _ws), + bygroups(Name.Label, using(this, state='whitespace'), + Punctuation)), + include('whitespace') + ], + 'block': [ + include('block/basic'), + (r'(?!\Z)', Text, ('more', 'main')) + ], + 'block/embed': [ + (r'>>', String.Interpol, '#pop'), + include('block/basic'), + (r'(?!\Z)', Text, ('more/embed', 'main')) + ], + 'main/basic': [ + include('whitespace'), + (r'\(', Punctuation, ('#pop', 'more', 'main')), + (r'\[', Punctuation, ('#pop', 'more/list', 'main')), + (r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner', + 'more/parameters', 'main/parameters')), + (r'\*|\.{3}', Punctuation, '#pop'), + (r'(?i)0x[\da-f]+', Number.Hex, '#pop'), + (r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+', + Number.Float, '#pop'), + (r'0[0-7]+', Number.Oct, '#pop'), + (r'\d+', Number.Integer, '#pop'), + (r'"""', String.Double, ('#pop', 'tdqs')), + (r"'''", String.Single, ('#pop', 'tsqs')), + (r'"', String.Double, ('#pop', 'dqs')), + (r"'", String.Single, ('#pop', 'sqs')), + (r'R"""', String.Regex, ('#pop', 'tdqr')), + (r"R'''", String.Regex, ('#pop', 'tsqr')), + (r'R"', String.Regex, ('#pop', 'dqr')), + (r"R'", String.Regex, ('#pop', 'sqr')), + # Two-token keywords + (r'(extern)(%s+)(object\b)' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Keyword.Reserved)), + (r'(function|method)(%s*)(\()' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Punctuation), + ('#pop', 'block?', 'more/parameters', 'main/parameters')), + (r'(modify)(%s+)(grammar\b)' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Keyword.Reserved), + ('#pop', 'object-body/no-braces', ':', 'grammar')), + (r'(new)(%s+(?=(?:function|method)\b))' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'))), + (r'(object)(%s+)(template\b)' % _ws, + bygroups(Keyword.Reserved, using(this, state='whitespace'), + Keyword.Reserved), ('#pop', 'template')), + (r'(string)(%s+)(template\b)' % _ws, + bygroups(Keyword, using(this, state='whitespace'), + Keyword.Reserved), ('#pop', 'function-name')), + # Keywords + (r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b', + Name.Builtin, '#pop'), + (r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')), + (r'(case|extern|if|intrinsic|return|static|while)\b', + Keyword.Reserved), + (r'catch\b', Keyword.Reserved, ('#pop', 'catch')), + (r'class\b', Keyword.Reserved, + ('#pop', 'object-body/no-braces', 'class')), + (r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'), + (r'(dictionary|property)\b', Keyword.Reserved, + ('#pop', 'constants')), + (r'enum\b', Keyword.Reserved, ('#pop', 'enum')), + (r'export\b', Keyword.Reserved, ('#pop', 'main')), + (r'(for|foreach)\b', Keyword.Reserved, + ('#pop', 'more/inner', 'main/inner')), + (r'(function|method)\b', Keyword.Reserved, + ('#pop', 'block?', 'function-name')), + (r'grammar\b', Keyword.Reserved, + ('#pop', 'object-body/no-braces', 'grammar')), + (r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')), + (r'local\b', Keyword.Reserved, + ('#pop', 'more/local', 'main/local')), + (r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved, + '#pop'), + (r'new\b', Keyword.Reserved, ('#pop', 'class')), + (r'(nil|true)\b', Keyword.Constant, '#pop'), + (r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')), + (r'operator\b', Keyword.Reserved, ('#pop', 'operator')), + (r'propertyset\b', Keyword.Reserved, + ('#pop', 'propertyset', 'main')), + (r'self\b', Name.Builtin.Pseudo, '#pop'), + (r'template\b', Keyword.Reserved, ('#pop', 'template')), + # Operators + (r'(__objref|defined)(%s*)(\()' % _ws, + bygroups(Operator.Word, using(this, state='whitespace'), + Operator), ('#pop', 'more/__objref', 'main')), + (r'delegated\b', Operator.Word), + # Compiler-defined macros and built-in properties + (r'(__DATE__|__DEBUG|__LINE__|__FILE__|' + r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|' + r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|' + r'construct|finalize|grammarInfo|grammarTag|lexicalParent|' + r'miscVocab|sourceTextGroup|sourceTextGroupName|' + r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop') + ], + 'main': [ + include('main/basic'), + (_name, Name, '#pop'), + default('#pop') + ], + 'more/basic': [ + (r'\(', Punctuation, ('more/list', 'main')), + (r'\[', Punctuation, ('more', 'main')), + (r'\.{3}', Punctuation), + (r'->|\.\.', Punctuation, 'main'), + (r'(?=;)|[:)\]]', Punctuation, '#pop'), + include('whitespace'), + (_operator, Operator, 'main'), + (r'\?', Operator, ('main', 'more/conditional', 'main')), + (r'(is|not)(%s+)(in\b)' % _ws, + bygroups(Operator.Word, using(this, state='whitespace'), + Operator.Word)), + (r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop + ], + 'more': [ + include('more/basic'), + default('#pop') + ], + # Then expression (conditional operator) + 'more/conditional': [ + (r':(?!:)', Operator, '#pop'), + include('more') + ], + # Embedded expressions + 'more/embed': [ + (r'>>', String.Interpol, '#pop:2'), + include('more') + ], + # For/foreach loop initializer or short-form anonymous function + 'main/inner': [ + (r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')), + (r'local\b', Keyword.Reserved, ('#pop', 'main/local')), + include('main') + ], + 'more/inner': [ + (r'\}', Punctuation, '#pop'), + (r',', Punctuation, 'main/inner'), + (r'(in|step)\b', Keyword, 'main/inner'), + include('more') + ], + # Local + 'main/local': [ + (_name, Name.Variable, '#pop'), + include('whitespace') + ], + 'more/local': [ + (r',', Punctuation, 'main/local'), + include('more') + ], + # List + 'more/list': [ + (r'[,:]', Punctuation, 'main'), + include('more') + ], + # Parameter list + 'main/parameters': [ + (r'(%s)(%s*)(?=:)' % (_name, _ws), + bygroups(Name.Variable, using(this, state='whitespace')), '#pop'), + (r'(%s)(%s+)(%s)' % (_name, _ws, _name), + bygroups(Name.Class, using(this, state='whitespace'), + Name.Variable), '#pop'), + (r'\[+', Punctuation), + include('main/basic'), + (_name, Name.Variable, '#pop'), + default('#pop') + ], + 'more/parameters': [ + (r'(:)(%s*(?=[?=,:)]))' % _ws, + bygroups(Punctuation, using(this, state='whitespace'))), + (r'[?\]]+', Punctuation), + (r'[:)]', Punctuation, ('#pop', 'multimethod?')), + (r',', Punctuation, 'main/parameters'), + (r'=', Punctuation, ('more/parameter', 'main')), + include('more') + ], + 'more/parameter': [ + (r'(?=[,)])', Text, '#pop'), + include('more') + ], + 'multimethod?': [ + (r'multimethod\b', Keyword, '#pop'), + include('whitespace'), + default('#pop') + ], + + # Statements and expressions + 'more/__objref': [ + (r',', Punctuation, 'mode'), + (r'\)', Operator, '#pop'), + include('more') + ], + 'mode': [ + (r'(error|warn)\b', Keyword, '#pop'), + include('whitespace') + ], + 'catch': [ + (r'\(+', Punctuation), + (_name, Name.Exception, ('#pop', 'variables')), + include('whitespace') + ], + 'enum': [ + include('whitespace'), + (r'token\b', Keyword, ('#pop', 'constants')), + default(('#pop', 'constants')) + ], + 'grammar': [ + (r'\)+', Punctuation), + (r'\(', Punctuation, 'grammar-tag'), + (r':', Punctuation, 'grammar-rules'), + (_name, Name.Class), + include('whitespace') + ], + 'grammar-tag': [ + include('whitespace'), + (r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|' + r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|' + r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|" + r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|" + r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|' + r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|" + r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop') + ], + 'grammar-rules': [ + include('string'), + include('whitespace'), + (r'(\[)(%s*)(badness)' % _ws, + bygroups(Punctuation, using(this, state='whitespace'), Keyword), + 'main'), + (r'->|%s|[()]' % _operator, Punctuation), + (_name, Name.Constant), + default('#pop:2') + ], + ':': [ + (r':', Punctuation, '#pop') + ], + 'function-name': [ + (r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol), + (r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'), + (_name, Name.Function, '#pop'), + include('whitespace') + ], + 'inherited': [ + (r'<', Punctuation, ('#pop', 'classes', 'class')), + include('whitespace'), + (_name, Name.Class, '#pop'), + default('#pop') + ], + 'operator': [ + (r'negate\b', Operator.Word, '#pop'), + include('whitespace'), + (_operator, Operator), + default('#pop') + ], + 'propertyset': [ + (r'\(', Punctuation, ('more/parameters', 'main/parameters')), + (r'\{', Punctuation, ('#pop', 'object-body')), + include('whitespace') + ], + 'template': [ + (r'(?=;)', Text, '#pop'), + include('string'), + (r'inherited\b', Keyword.Reserved), + include('whitespace'), + (r'->|\?|%s' % _operator, Punctuation), + (_name, Name.Variable) + ], + + # Identifiers + 'class': [ + (r'\*|\.{3}', Punctuation, '#pop'), + (r'object\b', Keyword.Reserved, '#pop'), + (r'transient\b', Keyword.Reserved), + (_name, Name.Class, '#pop'), + include('whitespace'), + default('#pop') + ], + 'classes': [ + (r'[:,]', Punctuation, 'class'), + include('whitespace'), + (r'>', Punctuation, '#pop'), + default('#pop') + ], + 'constants': [ + (r',+', Punctuation), + (r';', Punctuation, '#pop'), + (r'property\b', Keyword.Reserved), + (_name, Name.Constant), + include('whitespace') + ], + 'label': [ + (_name, Name.Label, '#pop'), + include('whitespace'), + default('#pop') + ], + 'variables': [ + (r',+', Punctuation), + (r'\)', Punctuation, '#pop'), + include('whitespace'), + (_name, Name.Variable) + ], + + # Whitespace and comments + 'whitespace': [ + (r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline), + Comment.Preproc), + (_comment_single, Comment.Single), + (_comment_multiline, Comment.Multiline), + (r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text) + ], + + # Strings + 'string': [ + (r'"""', String.Double, 'tdqs'), + (r"'''", String.Single, 'tsqs'), + (r'"', String.Double, 'dqs'), + (r"'", String.Single, 'sqs') + ], + 's/escape': [ + (r'\{\{|\}\}|%s' % _escape, String.Escape) + ], + 's/verbatim': [ + (r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|' + r'first\s+time|one\s+of|only|or|otherwise|' + r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|' + r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol), + (r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|' + r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape), + String.Interpol, ('block/embed', 'more/embed', 'main')) + ], + 's/entity': [ + (r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity) + ], + 'tdqs': _make_string_state(True, True), + 'tsqs': _make_string_state(True, False), + 'dqs': _make_string_state(False, True), + 'sqs': _make_string_state(False, False), + 'tdqs/listing': _make_string_state(True, True, 'listing'), + 'tsqs/listing': _make_string_state(True, False, 'listing'), + 'dqs/listing': _make_string_state(False, True, 'listing'), + 'sqs/listing': _make_string_state(False, False, 'listing'), + 'tdqs/xmp': _make_string_state(True, True, 'xmp'), + 'tsqs/xmp': _make_string_state(True, False, 'xmp'), + 'dqs/xmp': _make_string_state(False, True, 'xmp'), + 'sqs/xmp': _make_string_state(False, False, 'xmp'), + + # Tags + 'tdqt': _make_tag_state(True, True), + 'tsqt': _make_tag_state(True, False), + 'dqt': _make_tag_state(False, True), + 'sqt': _make_tag_state(False, False), + 'dqs/tdqt': _make_attribute_value_state(r'"', True, True), + 'dqs/tsqt': _make_attribute_value_state(r'"', True, False), + 'dqs/dqt': _make_attribute_value_state(r'"', False, True), + 'dqs/sqt': _make_attribute_value_state(r'"', False, False), + 'sqs/tdqt': _make_attribute_value_state(r"'", True, True), + 'sqs/tsqt': _make_attribute_value_state(r"'", True, False), + 'sqs/dqt': _make_attribute_value_state(r"'", False, True), + 'sqs/sqt': _make_attribute_value_state(r"'", False, False), + 'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True), + 'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False), + 'uqs/dqt': _make_attribute_value_state(_no_quote, False, True), + 'uqs/sqt': _make_attribute_value_state(_no_quote, False, False), + + # Regular expressions + 'tdqr': [ + (r'[^\\"]+', String.Regex), + (r'\\"*', String.Regex), + (r'"{3,}', String.Regex, '#pop'), + (r'"', String.Regex) + ], + 'tsqr': [ + (r"[^\\']+", String.Regex), + (r"\\'*", String.Regex), + (r"'{3,}", String.Regex, '#pop'), + (r"'", String.Regex) + ], + 'dqr': [ + (r'[^\\"]+', String.Regex), + (r'\\"?', String.Regex), + (r'"', String.Regex, '#pop') + ], + 'sqr': [ + (r"[^\\']+", String.Regex), + (r"\\'?", String.Regex), + (r"'", String.Regex, '#pop') + ] + } + + def get_tokens_unprocessed(self, text, **kwargs): + pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp) + if_false_level = 0 + for index, token, value in ( + RegexLexer.get_tokens_unprocessed(self, text, **kwargs)): + if if_false_level == 0: # Not in a false #if + if (token is Comment.Preproc and + re.match(r'%sif%s+(0|nil)%s*$\n?' % + (pp, self._ws_pp, self._ws_pp), value)): + if_false_level = 1 + else: # In a false #if + if token is Comment.Preproc: + if (if_false_level == 1 and + re.match(r'%sel(if|se)\b' % pp, value)): + if_false_level = 0 + elif re.match(r'%sif' % pp, value): + if_false_level += 1 + elif re.match(r'%sendif\b' % pp, value): + if_false_level -= 1 + else: + token = Comment + yield index, token, value diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/iolang.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/iolang.py new file mode 100644 index 0000000..0bf86f5 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/iolang.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.iolang + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Io language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number + +__all__ = ['IoLexer'] + + +class IoLexer(RegexLexer): + """ + For `Io `_ (a small, prototype-based + programming language) source. + + .. versionadded:: 0.10 + """ + name = 'Io' + filenames = ['*.io'] + aliases = ['io'] + mimetypes = ['text/x-iosrc'] + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text), + # Comments + (r'//(.*?)\n', Comment.Single), + (r'#(.*?)\n', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'/\+', Comment.Multiline, 'nestedcomment'), + # DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"', String), + # Operators + (r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}', + Operator), + # keywords + (r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b', + Keyword), + # constants + (r'(nil|false|true)\b', Name.Constant), + # names + (r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b', + Name.Builtin), + ('[a-zA-Z_]\w*', Name), + # numbers + (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer) + ], + 'nestedcomment': [ + (r'[^+/]+', Comment.Multiline), + (r'/\+', Comment.Multiline, '#push'), + (r'\+/', Comment.Multiline, '#pop'), + (r'[+/]', Comment.Multiline), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/javascript.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/javascript.py new file mode 100644 index 0000000..404bcf9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/javascript.py @@ -0,0 +1,1199 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.javascript + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for JavaScript and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, using, this +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Other +from pygments.util import get_bool_opt, iteritems +import pygments.unistring as uni + +__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer', + 'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer', + 'CoffeeScriptLexer', 'MaskLexer'] + +JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + + ']|\\\\u[a-fA-F0-9]{4})') +JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', + 'Mn', 'Mc', 'Nd', 'Pc') + + u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})') +JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*' + + +class JavascriptLexer(RegexLexer): + """ + For JavaScript source code. + """ + + name = 'JavaScript' + aliases = ['js', 'javascript'] + filenames = ['*.js', ] + mimetypes = ['application/javascript', 'application/x-javascript', + 'text/x-javascript', 'text/javascript', ] + + flags = re.DOTALL | re.UNICODE | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r')?', Other, 'delimiters'), + (r'\s+', Other), + default(('delimiters', 'lassofile')), + ], + 'delimiters': [ + (r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'), + (r'\[noprocess\]', Comment.Preproc, 'noprocess'), + (r'\[', Comment.Preproc, 'squarebrackets'), + (r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'), + (r'<(!--.*?-->)?', Other), + (r'[^[<]+', Other), + ], + 'nosquarebrackets': [ + (r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'), + (r'<', Other), + (r'[^<]+', Other), + ], + 'noprocess': [ + (r'\[/noprocess\]', Comment.Preproc, '#pop'), + (r'\[', Other), + (r'[^[]', Other), + ], + 'squarebrackets': [ + (r'\]', Comment.Preproc, '#pop'), + include('lasso'), + ], + 'anglebrackets': [ + (r'\?>', Comment.Preproc, '#pop'), + include('lasso'), + ], + 'lassofile': [ + (r'\]|\?>', Comment.Preproc, '#pop'), + include('lasso'), + ], + 'whitespacecomments': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*\*!.*?\*/', String.Doc), + (r'/\*.*?\*/', Comment.Multiline), + ], + 'lasso': [ + # whitespace/comments + include('whitespacecomments'), + + # literals + (r'\d*\.\d+(e[+-]?\d+)?', Number.Float), + (r'0x[\da-f]+', Number.Hex), + (r'\d+', Number.Integer), + (r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)), + (r"'", String.Single, 'singlestring'), + (r'"', String.Double, 'doublestring'), + (r'`[^`]*`', String.Backtick), + + # names + (r'\$[a-z_][\w.]*', Name.Variable), + (r'#([a-z_][\w.]*|\d+)', Name.Variable.Instance), + (r"(\.)('[a-z_][\w.]*')", + bygroups(Name.Builtin.Pseudo, Name.Variable.Class)), + (r"(self)(\s*->\s*)('[a-z_][\w.]*')", + bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)), + (r'(\.\.?)([a-z_][\w.]*(=(?!=))?)', + bygroups(Name.Builtin.Pseudo, Name.Other.Member)), + (r'(->\\?\s*|&\s*)([a-z_][\w.]*(=(?!=))?)', + bygroups(Operator, Name.Other.Member)), + (r'(self|inherited)\b', Name.Builtin.Pseudo), + (r'-[a-z_][\w.]*', Name.Attribute), + (r'::\s*[a-z_][\w.]*', Name.Label), + (r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|' + r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|' + r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|' + r'Error_InvalidDatabase|Error_InvalidPassword|' + r'Error_InvalidUsername|Error_ModuleNotFound|' + r'Error_NoError|Error_NoPermission|Error_OutOfMemory|' + r'Error_ReqColumnMissing|Error_ReqFieldMissing|' + r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|' + r'Error_UpdateError)\b', Name.Exception), + + # definitions + (r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b', + bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)), + (r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%])', + bygroups(Keyword.Declaration, Text, Name.Class, Operator, + Name.Function), 'signature'), + (r'(define)(\s+)([a-z_][\w.]*)', + bygroups(Keyword.Declaration, Text, Name.Function), 'signature'), + (r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])' + r'(?=\s*\())', bygroups(Keyword, Text, Name.Function), + 'signature'), + (r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)', + bygroups(Keyword, Text, Name.Function)), + + # keywords + (r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant), + (r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration), + (r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|' + r'null|bytes|list|queue|set|stack|staticarray|tie)\b', Keyword.Type), + (r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)), + (r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)), + (r'require\b', Keyword, 'requiresection'), + (r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)), + (r'(/?)(Cache|Database_Names|Database_SchemaNames|' + r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|' + r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|' + r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|' + r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|' + r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|' + r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|' + r'NoProcess|Output_None|Portal|Private|Protect|Records|Referer|' + r'Referrer|Repeating|ResultSet|Rows|Search_Args|Search_Arguments|' + r'Select|Sort_Args|Sort_Arguments|Thread_Atomic|Value_List|While|' + r'Abort|Case|Else|If_Empty|If_False|If_Null|If_True|Loop_Abort|' + r'Loop_Continue|Loop_Count|Params|Params_Up|Return|Return_Value|' + r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|' + r'Tag_Name|ascending|average|by|define|descending|do|equals|' + r'frozen|group|handle_failure|import|in|into|join|let|match|max|' + r'min|on|order|parent|protected|provide|public|require|returnhome|' + r'skip|split_thread|sum|take|thread|to|trait|type|where|with|' + r'yield|yieldhome)\b', + bygroups(Punctuation, Keyword)), + + # other + (r',', Punctuation, 'commamember'), + (r'(and|or|not)\b', Operator.Word), + (r'([a-z_][\w.]*)(\s*::\s*[a-z_][\w.]*)?(\s*=(?!=))', + bygroups(Name, Name.Label, Operator)), + (r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)), + (r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b', + bygroups(Operator, Operator.Word)), + (r':=|[-+*/%=<>&|!?\\]+', Operator), + (r'[{}():;,@^]', Punctuation), + ], + 'singlestring': [ + (r"'", String.Single, '#pop'), + (r"[^'\\]+", String.Single), + include('escape'), + (r"\\", String.Single), + ], + 'doublestring': [ + (r'"', String.Double, '#pop'), + (r'[^"\\]+', String.Double), + include('escape'), + (r'\\', String.Double), + ], + 'escape': [ + (r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|' + r'[abefnrtv?"\'\\]|$)', String.Escape), + ], + 'signature': [ + (r'=>', Operator, '#pop'), + (r'\)', Punctuation, '#pop'), + (r'[(,]', Punctuation, 'parameter'), + include('lasso'), + ], + 'parameter': [ + (r'\)', Punctuation, '#pop'), + (r'-?[a-z_][\w.]*', Name.Attribute, '#pop'), + (r'\.\.\.', Name.Builtin.Pseudo), + include('lasso'), + ], + 'requiresection': [ + (r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'), + (r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name), + (r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'), + (r'::\s*[a-z_][\w.]*', Name.Label), + (r',', Punctuation), + include('whitespacecomments'), + ], + 'requiresignature': [ + (r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'), + (r'\)', Punctuation, '#pop:2'), + (r'-?[a-z_][\w.]*', Name.Attribute), + (r'::\s*[a-z_][\w.]*', Name.Label), + (r'\.\.\.', Name.Builtin.Pseudo), + (r'[(,]', Punctuation), + include('whitespacecomments'), + ], + 'commamember': [ + (r'(([a-z_][\w.]*=?|[-+*/%])' + r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))', + Name.Function, 'signature'), + include('whitespacecomments'), + default('#pop'), + ], + } + + def __init__(self, **options): + self.builtinshighlighting = get_bool_opt( + options, 'builtinshighlighting', True) + self.requiredelimiters = get_bool_opt( + options, 'requiredelimiters', False) + + self._builtins = set() + self._members = set() + if self.builtinshighlighting: + from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS + for key, value in iteritems(BUILTINS): + self._builtins.update(value) + for key, value in iteritems(MEMBERS): + self._members.update(value) + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + stack = ['root'] + if self.requiredelimiters: + stack.append('delimiters') + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text, stack): + if (token is Name.Other and value.lower() in self._builtins or + token is Name.Other.Member and + value.lower().rstrip('=') in self._members): + yield index, Name.Builtin, value + continue + yield index, token, value + + def analyse_text(text): + rv = 0.0 + if 'bin/lasso9' in text: + rv += 0.8 + if re.search(r'<\?lasso', text, re.I): + rv += 0.4 + if re.search(r'local\(', text, re.I): + rv += 0.4 + return rv + + +class ObjectiveJLexer(RegexLexer): + """ + For Objective-J source code with preprocessor directives. + + .. versionadded:: 1.3 + """ + + name = 'Objective-J' + aliases = ['objective-j', 'objectivej', 'obj-j', 'objj'] + filenames = ['*.j'] + mimetypes = ['text/x-objective-j'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*' + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'root': [ + include('whitespace'), + + # function definition + (r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)', + bygroups(using(this), using(this, state='function_signature'), + using(this))), + + # class definition + (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), + 'classname'), + (r'(@class|@protocol)(\s*)', bygroups(Keyword, Text), + 'forward_classname'), + (r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)), + + include('statements'), + ('[{()}]', Punctuation), + (';', Punctuation), + ], + 'whitespace': [ + (r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")', + bygroups(Comment.Preproc, Text, String.Double)), + (r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)', + bygroups(Comment.Preproc, Text, String.Double)), + (r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")', + bygroups(Comment.Preproc, Text, String.Double)), + (r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)', + bygroups(Comment.Preproc, Text, String.Double)), + + (r'#if\s+0', Comment.Preproc, 'if0'), + (r'#', Comment.Preproc, 'macro'), + + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'', Comment, '#pop'), + (r'[^\-]+|-', Comment), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/modeling.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/modeling.py new file mode 100644 index 0000000..1552fbf --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/modeling.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.modeling + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for modeling languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +from pygments.lexers.html import HtmlLexer +from pygments.lexers import _stan_builtins + +__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer'] + + +class ModelicaLexer(RegexLexer): + """ + For `Modelica `_ source code. + + .. versionadded:: 1.1 + """ + name = 'Modelica' + aliases = ['modelica'] + filenames = ['*.mo'] + mimetypes = ['text/x-modelica'] + + flags = re.DOTALL | re.MULTILINE + + _name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)" + + tokens = { + 'whitespace': [ + (u'[\\s\ufeff]+', Text), + (r'//[^\n]*\n?', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline) + ], + 'root': [ + include('whitespace'), + (r'"', String.Double, 'string'), + (r'[()\[\]{},;]+', Punctuation), + (r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator), + (r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float), + (r'\d+', Number.Integer), + (r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|' + r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|' + r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|' + r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|' + r'identity|inStream|integer|Integer|interval|inverse|isPresent|' + r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|' + r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|' + r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|' + r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|' + r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|' + r'transpose|vector|zeros)\b', Name.Builtin), + (r'(algorithm|annotation|break|connect|constant|constrainedby|der|' + r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' + r'equation|exit|expandable|extends|external|final|flow|for|if|' + r'import|impure|in|initial|inner|input|loop|nondiscrete|outer|' + r'output|parameter|partial|protected|public|pure|redeclare|' + r'replaceable|return|stream|then|when|while)\b', + Keyword.Reserved), + (r'(and|not|or)\b', Operator.Word), + (r'(block|class|connector|end|function|model|operator|package|' + r'record|type)\b', Keyword.Reserved, 'class'), + (r'(false|true)\b', Keyword.Constant), + (r'within\b', Keyword.Reserved, 'package-prefix'), + (_name, Name) + ], + 'class': [ + include('whitespace'), + (r'(function|record)\b', Keyword.Reserved), + (r'(if|for|when|while)\b', Keyword.Reserved, '#pop'), + (_name, Name.Class, '#pop'), + default('#pop') + ], + 'package-prefix': [ + include('whitespace'), + (_name, Name.Namespace, '#pop'), + default('#pop') + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'\\[\'"?\\abfnrtv]', String.Escape), + (r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))', + using(HtmlLexer)), + (r'<|\\?[^"\\<]+', String.Double) + ] + } + + +class BugsLexer(RegexLexer): + """ + Pygments Lexer for `OpenBugs `_ and WinBugs + models. + + .. versionadded:: 1.6 + """ + + name = 'BUGS' + aliases = ['bugs', 'winbugs', 'openbugs'] + filenames = ['*.bug'] + + _FUNCTIONS = ( + # Scalar functions + 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', + 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', + 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', + 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', + 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', + 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', + 'trunc', + # Vector functions + 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', + 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', + 'sd', 'sort', 'sum', + # Special + 'D', 'I', 'F', 'T', 'C') + """ OpenBUGS built-in functions + + From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII + + This also includes + + - T, C, I : Truncation and censoring. + ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. + - D : ODE + - F : Functional http://www.openbugs.info/Examples/Functionals.html + + """ + + _DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', + 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', + 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', + 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', + 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', + 'dmt', 'dwish') + """ OpenBUGS built-in distributions + + Functions from + http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI + """ + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'comments': [ + # Comments + (r'#.*$', Comment.Single), + ], + 'root': [ + # Comments + include('comments'), + include('whitespace'), + # Block start + (r'(model)(\s+)(\{)', + bygroups(Keyword.Namespace, Text, Punctuation)), + # Reserved Words + (r'(for|in)(?![\w.])', Keyword.Reserved), + # Built-in Functions + (r'(%s)(?=\s*\()' + % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), + Name.Builtin), + # Regular variable names + (r'[A-Za-z][\w.]*', Name), + # Number Literals + (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), + # Punctuation + (r'\[|\]|\(|\)|:|,|;', Punctuation), + # Assignment operators + # SLexer makes these tokens Operators. + (r'<-|~', Operator), + # Infix and prefix operators + (r'\+|-|\*|/', Operator), + # Block + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r"^\s*model\s*{", text, re.M): + return 0.7 + else: + return 0.0 + + +class JagsLexer(RegexLexer): + """ + Pygments Lexer for JAGS. + + .. versionadded:: 1.6 + """ + + name = 'JAGS' + aliases = ['jags'] + filenames = ['*.jag', '*.bug'] + + # JAGS + _FUNCTIONS = ( + 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', + 'cos', 'cosh', 'cloglog', + 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', + 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', + 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', + 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', + 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', + # Truncation/Censoring (should I include) + 'T', 'I') + # Distributions with density, probability and quartile functions + _DISTRIBUTIONS = tuple('[dpq]%s' % x for x in + ('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', + 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', + 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib')) + # Other distributions without density and probability + _OTHER_DISTRIBUTIONS = ( + 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', + 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', + 'dnbinom', 'dweibull', 'ddirich') + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'names': [ + # Regular variable names + (r'[a-zA-Z][\w.]*\b', Name), + ], + 'comments': [ + # do not use stateful comments + (r'(?s)/\*.*?\*/', Comment.Multiline), + # Comments + (r'#.*$', Comment.Single), + ], + 'root': [ + # Comments + include('comments'), + include('whitespace'), + # Block start + (r'(model|data)(\s+)(\{)', + bygroups(Keyword.Namespace, Text, Punctuation)), + (r'var(?![\w.])', Keyword.Declaration), + # Reserved Words + (r'(for|in)(?![\w.])', Keyword.Reserved), + # Builtins + # Need to use lookahead because . is a valid char + (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS + + _DISTRIBUTIONS + + _OTHER_DISTRIBUTIONS), + Name.Builtin), + # Names + include('names'), + # Number Literals + (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), + (r'\[|\]|\(|\)|:|,|;', Punctuation), + # Assignment operators + (r'<-|~', Operator), + # # JAGS includes many more than OpenBUGS + (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r'^\s*model\s*\{', text, re.M): + if re.search(r'^\s*data\s*\{', text, re.M): + return 0.9 + elif re.search(r'^\s*var', text, re.M): + return 0.9 + else: + return 0.3 + else: + return 0 + + +class StanLexer(RegexLexer): + """Pygments Lexer for Stan models. + + The Stan modeling language is specified in the *Stan Modeling Language + User's Guide and Reference Manual, v2.4.0*, + `pdf `__. + + .. versionadded:: 1.6 + """ + + name = 'Stan' + aliases = ['stan'] + filenames = ['*.stan'] + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'comments': [ + (r'(?s)/\*.*?\*/', Comment.Multiline), + # Comments + (r'(//|#).*$', Comment.Single), + ], + 'root': [ + # Stan is more restrictive on strings than this regex + (r'"[^"]*"', String), + # Comments + include('comments'), + # block start + include('whitespace'), + # Block start + (r'(%s)(\s*)(\{)' % + r'|'.join(('functions', 'data', r'transformed\s+?data', + 'parameters', r'transformed\s+parameters', + 'model', r'generated\s+quantities')), + bygroups(Keyword.Namespace, Text, Punctuation)), + # Reserved Words + (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), + # Truncation + (r'T(?=\s*\[)', Keyword), + # Data types + (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), + # Punctuation + (r"[;:,\[\]()]", Punctuation), + # Builtin + (r'(%s)(?=\s*\()' + % r'|'.join(_stan_builtins.FUNCTIONS + + _stan_builtins.DISTRIBUTIONS), + Name.Builtin), + # Special names ending in __, like lp__ + (r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo), + (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), + # Regular variable names + (r'[A-Za-z]\w*\b', Name), + # Real Literals + (r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float), + (r'-?[0-9]*\.[0-9]*', Number.Float), + # Integer Literals + (r'-?[0-9]+', Number.Integer), + # Assignment operators + # SLexer makes these tokens Operators. + (r'<-|~', Operator), + # Infix, prefix and postfix operators (and = ) + (r"\+|-|\.?\*|\.?/|\\|'|\^|==?|!=?|<=?|>=?|\|\||&&", Operator), + # Block delimiters + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r'^\s*parameters\s*\{', text, re.M): + return 1.0 + else: + return 0.0 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/nimrod.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/nimrod.py new file mode 100644 index 0000000..de2eafb --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/nimrod.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.nimrod + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Nimrod language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['NimrodLexer'] + + +class NimrodLexer(RegexLexer): + """ + For `Nimrod `_ source code. + + .. versionadded:: 1.5 + """ + + name = 'Nimrod' + aliases = ['nimrod', 'nim'] + filenames = ['*.nim', '*.nimrod'] + mimetypes = ['text/x-nimrod'] + + flags = re.MULTILINE | re.IGNORECASE | re.UNICODE + + def underscorize(words): + newWords = [] + new = "" + for word in words: + for ch in word: + new += (ch + "_?") + newWords.append(new) + new = "" + return "|".join(newWords) + + keywords = [ + 'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break', + 'case', 'cast', 'const', 'continue', 'converter', 'discard', + 'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally', + 'for', 'generic', 'if', 'implies', 'in', 'yield', + 'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method', + 'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc', + 'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try', + 'tuple', 'type', 'when', 'while', 'with', 'without', 'xor' + ] + + keywordsPseudo = [ + 'nil', 'true', 'false' + ] + + opWords = [ + 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in', + 'notin', 'is', 'isnot' + ] + + types = [ + 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64', + 'bool', 'char', 'range', 'array', 'seq', 'set', 'string' + ] + + tokens = { + 'root': [ + (r'##.*$', String.Doc), + (r'#.*$', Comment), + (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator), + (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;', + Punctuation), + + # Strings + (r'(?:[\w]+)"', String, 'rdqs'), + (r'"""', String, 'tdqs'), + ('"', String, 'dqs'), + + # Char + ("'", String.Char, 'chars'), + + # Keywords + (r'(%s)\b' % underscorize(opWords), Operator.Word), + (r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'), + (r'(%s)\b' % underscorize(keywords), Keyword), + (r'(%s)\b' % underscorize(['from', 'import', 'include']), + Keyword.Namespace), + (r'(v_?a_?r)\b', Keyword.Declaration), + (r'(%s)\b' % underscorize(types), Keyword.Type), + (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), + # Identifiers + (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), + # Numbers + (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))', + Number.Float, ('float-suffix', 'float-number')), + (r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'), + (r'0b[01][01_]*', Number.Bin, 'int-suffix'), + (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), + (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), + # Whitespace + (r'\s+', Text), + (r'.+$', Error), + ], + 'chars': [ + (r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape), + (r"'", String.Char, '#pop'), + (r".", String.Char) + ], + 'strings': [ + (r'(?`_ source. + + .. versionadded:: 2.0 + """ + + name = 'Nit' + aliases = ['nit'] + filenames = ['*.nit'] + tokens = { + 'root': [ + (r'#.*?$', Comment.Single), + (words(( + 'package', 'module', 'import', 'class', 'abstract', 'interface', + 'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef', + 'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern', + 'public', 'protected', 'private', 'intrude', 'if', 'then', + 'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not', + 'implies', 'return', 'continue', 'break', 'abort', 'assert', + 'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable', + 'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'), + Keyword), + (r'[A-Z]\w*', Name.Class), + (r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string + (r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|' + r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt + (r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string + (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string + (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string + (r'"(\\.|([^"}{\\]))*"', String), # Simple String + (r'"(\\.|([^"}{\\]))*\{', String), # Start string + (r'\}(\\.|([^"}{\\]))*\{', String), # Mid String + (r'\}(\\.|([^"}{\\]))*"', String), # End String + (r'(\'[^\'\\]\')|(\'\\.\')', String.Char), + (r'[0-9]+', Number.Integer), + (r'[0-9]*.[0-9]+', Number.Float), + (r'0(x|X)[0-9A-Fa-f]+', Number.Hex), + (r'[a-z]\w*', Name), + (r'_\w+', Name.Variable.Instance), + (r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator), + (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation), + (r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit + (r'[\r\n\t ]+', Text), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/nix.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/nix.py new file mode 100644 index 0000000..1bf533d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/nix.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.nix + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the NixOS Nix language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal + +__all__ = ['NixLexer'] + + +class NixLexer(RegexLexer): + """ + For the `Nix language `_. + + .. versionadded:: 2.0 + """ + + name = 'Nix' + aliases = ['nixos', 'nix'] + filenames = ['*.nix'] + mimetypes = ['text/x-nix'] + + flags = re.MULTILINE | re.UNICODE + + keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if', + 'else', 'then', '...'] + builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins', + 'map', 'removeAttrs', 'throw', 'toString', 'derivation'] + operators = ['++', '+', '?', '.', '!', '//', '==', + '!=', '&&', '||', '->', '='] + + punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"] + + tokens = { + 'root': [ + # comments starting with # + (r'#.*$', Comment.Single), + + # multiline comments + (r'/\*', Comment.Multiline, 'comment'), + + # whitespace + (r'\s+', Text), + + # keywords + ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword), + + # highlight the builtins + ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins), + Name.Builtin), + + (r'\b(true|false|null)\b', Name.Constant), + + # operators + ('(%s)' % '|'.join(re.escape(entry) for entry in operators), + Operator), + + # word operators + (r'\b(or|and)\b', Operator.Word), + + # punctuations + ('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation), + + # integers + (r'[0-9]+', Number.Integer), + + # strings + (r'"', String.Double, 'doublequote'), + (r"''", String.Single, 'singlequote'), + + # paths + (r'[\w.+-]*(\/[\w.+-]+)+', Literal), + (r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal), + + # urls + (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal), + + # names of variables + (r'[\w-]+\s*=', String.Symbol), + (r'[a-zA-Z_][\w\'-]*', Text), + + ], + 'comment': [ + (r'[^/*]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'singlequote': [ + (r"'''", String.Escape), + (r"''\$\{", String.Escape), + (r"''\n", String.Escape), + (r"''\r", String.Escape), + (r"''\t", String.Escape), + (r"''", String.Single, '#pop'), + (r'\$\{', String.Interpol, 'antiquote'), + (r"[^']", String.Single), + ], + 'doublequote': [ + (r'\\', String.Escape), + (r'\\"', String.Escape), + (r'\\$\{', String.Escape), + (r'"', String.Double, '#pop'), + (r'\$\{', String.Interpol, 'antiquote'), + (r'[^"]', String.Double), + ], + 'antiquote': [ + (r"\}", String.Interpol, '#pop'), + # TODO: we should probably escape also here ''${ \${ + (r"\$\{", String.Interpol, '#push'), + include('root'), + ], + } + + def analyse_text(text): + rv = 0.0 + # TODO: let/in + if re.search(r'import.+?<[^>]+>', text): + rv += 0.4 + if re.search(r'mkDerivation\s+(\(|\{|rec)', text): + rv += 0.4 + if re.search(r'=\s+mkIf\s+', text): + rv += 0.4 + if re.search(r'\{[a-zA-Z,\s]+\}:', text): + rv += 0.1 + return rv diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/objective.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/objective.py new file mode 100644 index 0000000..4e0ecf0 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/objective.py @@ -0,0 +1,487 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.objective + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Objective-C family languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, this, words, \ + inherit, default +from pygments.token import Text, Keyword, Name, String, Operator, \ + Number, Punctuation, Literal, Comment + +from pygments.lexers.c_cpp import CLexer, CppLexer + +__all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer'] + + +def objective(baselexer): + """ + Generate a subclass of baselexer that accepts the Objective-C syntax + extensions. + """ + + # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, + # since that's quite common in ordinary C/C++ files. It's OK to match + # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. + # + # The upshot of this is that we CANNOT match @class or @interface + _oc_keywords = re.compile(r'@(?:end|implementation|protocol)') + + # Matches [ ? identifier ( identifier ? ] | identifier? : ) + # (note the identifier is *optional* when there is a ':'!) + _oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+' + r'(?:[a-zA-Z_]\w*\s*\]|' + r'(?:[a-zA-Z_]\w*)?:)') + + class GeneratedObjectiveCVariant(baselexer): + """ + Implements Objective-C syntax on top of an existing C family lexer. + """ + + tokens = { + 'statements': [ + (r'@"', String, 'string'), + (r'@(YES|NO)', Number), + (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex), + (r'@0[0-7]+[Ll]?', Number.Oct), + (r'@\d+[Ll]?', Number.Integer), + (r'@\(', Literal, 'literal_number'), + (r'@\[', Literal, 'literal_array'), + (r'@\{', Literal, 'literal_dictionary'), + (words(( + '@selector', '@private', '@protected', '@public', '@encode', + '@synchronized', '@try', '@throw', '@catch', '@finally', + '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer', + '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong', + 'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic', + 'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in', + 'out', 'inout', 'release', 'class', '@dynamic', '@optional', + '@required', '@autoreleasepool'), suffix=r'\b'), + Keyword), + (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL', + 'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'), + Keyword.Type), + (r'@(true|false|YES|NO)\n', Name.Builtin), + (r'(YES|NO|nil|self|super)\b', Name.Builtin), + # Carbon types + (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type), + # Carbon built-ins + (r'(TRUE|FALSE)\b', Name.Builtin), + (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), + ('#pop', 'oc_classname')), + (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text), + ('#pop', 'oc_forward_classname')), + # @ can also prefix other expressions like @{...} or @(...) + (r'@', Punctuation), + inherit, + ], + 'oc_classname': [ + # interface definition that inherits + ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)', + bygroups(Name.Class, Text, Name.Class, Text, Punctuation), + ('#pop', 'oc_ivars')), + ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', + bygroups(Name.Class, Text, Name.Class), '#pop'), + # interface definition for a category + ('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)', + bygroups(Name.Class, Text, Name.Label, Text, Punctuation), + ('#pop', 'oc_ivars')), + ('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))', + bygroups(Name.Class, Text, Name.Label), '#pop'), + # simple interface / implementation + ('([a-zA-Z$_][\w$]*)(\s*)(\{)', + bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')), + ('([a-zA-Z$_][\w$]*)', Name.Class, '#pop') + ], + 'oc_forward_classname': [ + ('([a-zA-Z$_][\w$]*)(\s*,\s*)', + bygroups(Name.Class, Text), 'oc_forward_classname'), + ('([a-zA-Z$_][\w$]*)(\s*;?)', + bygroups(Name.Class, Text), '#pop') + ], + 'oc_ivars': [ + include('whitespace'), + include('statements'), + (';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'root': [ + # methods + (r'^([-+])(\s*)' # method marker + r'(\(.*?\))?(\s*)' # return type + r'([a-zA-Z$_][\w$]*:?)', # begin of method name + bygroups(Punctuation, Text, using(this), + Text, Name.Function), + 'method'), + inherit, + ], + 'method': [ + include('whitespace'), + # TODO unsure if ellipses are allowed elsewhere, see + # discussion in Issue 789 + (r',', Punctuation), + (r'\.\.\.', Punctuation), + (r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)', + bygroups(using(this), Text, Name.Variable)), + (r'[a-zA-Z$_][\w$]*:', Name.Function), + (';', Punctuation, '#pop'), + (r'\{', Punctuation, 'function'), + default('#pop'), + ], + 'literal_number': [ + (r'\(', Punctuation, 'literal_number_inner'), + (r'\)', Literal, '#pop'), + include('statement'), + ], + 'literal_number_inner': [ + (r'\(', Punctuation, '#push'), + (r'\)', Punctuation, '#pop'), + include('statement'), + ], + 'literal_array': [ + (r'\[', Punctuation, 'literal_array_inner'), + (r'\]', Literal, '#pop'), + include('statement'), + ], + 'literal_array_inner': [ + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), + include('statement'), + ], + 'literal_dictionary': [ + (r'\}', Literal, '#pop'), + include('statement'), + ], + } + + def analyse_text(text): + if _oc_keywords.search(text): + return 1.0 + elif '@"' in text: # strings + return 0.8 + elif re.search('@[0-9]+', text): + return 0.7 + elif _oc_message.search(text): + return 0.8 + return 0 + + def get_tokens_unprocessed(self, text): + from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ + COCOA_PROTOCOLS, COCOA_PRIMITIVES + + for index, token, value in \ + baselexer.get_tokens_unprocessed(self, text): + if token is Name or token is Name.Class: + if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ + or value in COCOA_PRIMITIVES: + token = Name.Builtin.Pseudo + + yield index, token, value + + return GeneratedObjectiveCVariant + + +class ObjectiveCLexer(objective(CLexer)): + """ + For Objective-C source code with preprocessor directives. + """ + + name = 'Objective-C' + aliases = ['objective-c', 'objectivec', 'obj-c', 'objc'] + filenames = ['*.m', '*.h'] + mimetypes = ['text/x-objective-c'] + priority = 0.05 # Lower than C + + +class ObjectiveCppLexer(objective(CppLexer)): + """ + For Objective-C++ source code with preprocessor directives. + """ + + name = 'Objective-C++' + aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++'] + filenames = ['*.mm', '*.hh'] + mimetypes = ['text/x-objective-c++'] + priority = 0.05 # Lower than C++ + + +class LogosLexer(ObjectiveCppLexer): + """ + For Logos + Objective-C source code with preprocessor directives. + + .. versionadded:: 1.6 + """ + + name = 'Logos' + aliases = ['logos'] + filenames = ['*.x', '*.xi', '*.xm', '*.xmi'] + mimetypes = ['text/x-logos'] + priority = 0.25 + + tokens = { + 'statements': [ + (r'(%orig|%log)\b', Keyword), + (r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))', + bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), + (r'(%init)\b(\()', + bygroups(Keyword, Punctuation), 'logos_init_directive'), + (r'(%init)(?=\s*;)', bygroups(Keyword)), + (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', + bygroups(Keyword, Text, Name.Class), '#pop'), + (r'(%subclass)(\s+)', bygroups(Keyword, Text), + ('#pop', 'logos_classname')), + inherit, + ], + 'logos_init_directive': [ + ('\s+', Text), + (',', Punctuation, ('logos_init_directive', '#pop')), + ('([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)', + bygroups(Name.Class, Text, Punctuation, Text, Text)), + ('([a-zA-Z$_][\w$]*)', Name.Class), + ('\)', Punctuation, '#pop'), + ], + 'logos_classname': [ + ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', + bygroups(Name.Class, Text, Name.Class), '#pop'), + ('([a-zA-Z$_][\w$]*)', Name.Class, '#pop') + ], + 'root': [ + (r'(%subclass)(\s+)', bygroups(Keyword, Text), + 'logos_classname'), + (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', + bygroups(Keyword, Text, Name.Class)), + (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', + bygroups(Keyword, Text, Name.Variable, Text, String, Text)), + (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation), + 'function'), + (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', + bygroups(Keyword, Text, Keyword, String, Keyword)), + (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), + inherit, + ], + } + + _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()') + + def analyse_text(text): + if LogosLexer._logos_keywords.search(text): + return 1.0 + return 0 + +class SwiftLexer(RegexLexer): + """ + For `Swift `_ source. + + .. versionadded:: 2.0 + """ + name = 'Swift' + filenames = ['*.swift'] + aliases = ['swift'] + mimetypes = ['text/x-swift'] + + tokens = { + 'root': [ + # Whitespace and Comments + (r'\n', Text), + (r'\s+', Text), + (r'//', Comment.Single, 'comment-single'), + (r'/\*', Comment.Multiline, 'comment-multi'), + (r'#(if|elseif|else|endif)\b', Comment.Preproc, 'preproc'), + + # Keywords + include('keywords'), + + # Global Types + (r'(Array|AutoreleasingUnsafeMutablePointer|BidirectionalReverseView' + r'|Bit|Bool|CFunctionPointer|COpaquePointer|CVaListPointer' + r'|Character|ClosedInterval|CollectionOfOne|ContiguousArray' + r'|Dictionary|DictionaryGenerator|DictionaryIndex|Double' + r'|EmptyCollection|EmptyGenerator|EnumerateGenerator' + r'|EnumerateSequence|FilterCollectionView' + r'|FilterCollectionViewIndex|FilterGenerator|FilterSequenceView' + r'|Float|Float80|FloatingPointClassification|GeneratorOf' + r'|GeneratorOfOne|GeneratorSequence|HalfOpenInterval|HeapBuffer' + r'|HeapBufferStorage|ImplicitlyUnwrappedOptional|IndexingGenerator' + r'|Int|Int16|Int32|Int64|Int8|LazyBidirectionalCollection' + r'|LazyForwardCollection|LazyRandomAccessCollection' + r'|LazySequence|MapCollectionView|MapSequenceGenerator' + r'|MapSequenceView|MirrorDisposition|ObjectIdentifier|OnHeap' + r'|Optional|PermutationGenerator|QuickLookObject' + r'|RandomAccessReverseView|Range|RangeGenerator|RawByte|Repeat' + r'|ReverseBidirectionalIndex|ReverseRandomAccessIndex|SequenceOf' + r'|SinkOf|Slice|StaticString|StrideThrough|StrideThroughGenerator' + r'|StrideTo|StrideToGenerator|String|UInt|UInt16|UInt32|UInt64' + r'|UInt8|UTF16|UTF32|UTF8|UnicodeDecodingResult|UnicodeScalar' + r'|Unmanaged|UnsafeBufferPointer|UnsafeBufferPointerGenerator' + r'|UnsafeMutableBufferPointer|UnsafeMutablePointer|UnsafePointer' + r'|Zip2|ZipGenerator2' + # Protocols + r'|AbsoluteValuable|AnyObject|ArrayLiteralConvertible' + r'|BidirectionalIndexType|BitwiseOperationsType' + r'|BooleanLiteralConvertible|BooleanType|CVarArgType' + r'|CollectionType|Comparable|DebugPrintable' + r'|DictionaryLiteralConvertible|Equatable' + r'|ExtendedGraphemeClusterLiteralConvertible' + r'|ExtensibleCollectionType|FloatLiteralConvertible' + r'|FloatingPointType|ForwardIndexType|GeneratorType|Hashable' + r'|IntegerArithmeticType|IntegerLiteralConvertible|IntegerType' + r'|IntervalType|MirrorType|MutableCollectionType|MutableSliceable' + r'|NilLiteralConvertible|OutputStreamType|Printable' + r'|RandomAccessIndexType|RangeReplaceableCollectionType' + r'|RawOptionSetType|RawRepresentable|Reflectable|SequenceType' + r'|SignedIntegerType|SignedNumberType|SinkType|Sliceable' + r'|Streamable|Strideable|StringInterpolationConvertible' + r'|StringLiteralConvertible|UnicodeCodecType' + r'|UnicodeScalarLiteralConvertible|UnsignedIntegerType' + r'|_ArrayBufferType|_BidirectionalIndexType|_CocoaStringType' + r'|_CollectionType|_Comparable|_ExtensibleCollectionType' + r'|_ForwardIndexType|_Incrementable|_IntegerArithmeticType' + r'|_IntegerType|_ObjectiveCBridgeable|_RandomAccessIndexType' + r'|_RawOptionSetType|_SequenceType|_Sequence_Type' + r'|_SignedIntegerType|_SignedNumberType|_Sliceable|_Strideable' + r'|_SwiftNSArrayRequiredOverridesType|_SwiftNSArrayType' + r'|_SwiftNSCopyingType|_SwiftNSDictionaryRequiredOverridesType' + r'|_SwiftNSDictionaryType|_SwiftNSEnumeratorType' + r'|_SwiftNSFastEnumerationType|_SwiftNSStringRequiredOverridesType' + r'|_SwiftNSStringType|_UnsignedIntegerType' + # Variables + r'|C_ARGC|C_ARGV|Process' + # Typealiases + r'|Any|AnyClass|BooleanLiteralType|CBool|CChar|CChar16|CChar32' + r'|CDouble|CFloat|CInt|CLong|CLongLong|CShort|CSignedChar' + r'|CUnsignedInt|CUnsignedLong|CUnsignedShort|CWideChar' + r'|ExtendedGraphemeClusterType|Float32|Float64|FloatLiteralType' + r'|IntMax|IntegerLiteralType|StringLiteralType|UIntMax|UWord' + r'|UnicodeScalarType|Void|Word' + # Foundation/Cocoa + r'|NSErrorPointer|NSObjectProtocol|Selector)\b', Name.Builtin), + # Functions + (r'(abs|advance|alignof|alignofValue|assert|assertionFailure' + r'|contains|count|countElements|debugPrint|debugPrintln|distance' + r'|dropFirst|dropLast|dump|enumerate|equal|extend|fatalError' + r'|filter|find|first|getVaList|indices|insert|isEmpty|join|last' + r'|lazy|lexicographicalCompare|map|max|maxElement|min|minElement' + r'|numericCast|overlaps|partition|precondition|preconditionFailure' + r'|prefix|print|println|reduce|reflect|removeAll|removeAtIndex' + r'|removeLast|removeRange|reverse|sizeof|sizeofValue|sort|sorted' + r'|splice|split|startsWith|stride|strideof|strideofValue|suffix' + r'|swap|toDebugString|toString|transcode|underestimateCount' + r'|unsafeAddressOf|unsafeBitCast|unsafeDowncast' + r'|withExtendedLifetime|withUnsafeMutablePointer' + r'|withUnsafeMutablePointers|withUnsafePointer|withUnsafePointers' + r'|withVaList)\b', Name.Builtin.Pseudo), + + # Implicit Block Variables + (r'\$\d+', Name.Variable), + + # Binary Literal + (r'0b[01_]+', Number.Bin), + # Octal Literal + (r'0o[0-7_]+', Number.Oct), + # Hexadecimal Literal + (r'0x[0-9a-fA-F_]+', Number.Hex), + # Decimal Literal + (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float), + (r'[0-9][0-9_]*', Number.Integer), + # String Literal + (r'"', String, 'string'), + + # Operators and Punctuation + (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation), + (r'[/=\-+!*%<>&|^?~]+', Operator), + + # Identifier + (r'[a-zA-Z_]\w*', Name) + ], + 'keywords': [ + (r'(break|case|continue|default|do|else|fallthrough|for|if|in' + r'|return|switch|where|while)\b', Keyword), + (r'@availability\([^)]+\)', Keyword.Reserved), + (r'(associativity|convenience|dynamic|didSet|final|get|infix|inout' + r'|lazy|left|mutating|none|nonmutating|optional|override|postfix' + r'|precedence|prefix|Protocol|required|right|set|Type|unowned|weak' + r'|willSet|@(availability|autoclosure|noreturn' + r'|NSApplicationMain|NSCopying|NSManaged|objc' + r'|UIApplicationMain|IBAction|IBDesignable|IBInspectable' + r'|IBOutlet))\b',Keyword.Reserved), + (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__' + r'|__FILE__|__FUNCTION__|__LINE__|_)\b', Keyword.Constant), + (r'import\b', Keyword.Declaration, 'module'), + (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Declaration, Text, Name.Class)), + (r'(func)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Declaration, Text, Name.Function)), + (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, + Text, Name.Variable)), + (r'(class|deinit|enum|extension|func|import|init|internal|let' + r'|operator|private|protocol|public|static|struct|subscript' + r'|typealias|var)\b', Keyword.Declaration) + ], + 'comment': [ + (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):', + Comment.Special) + ], + + # Nested + 'comment-single': [ + (r'\n', Text, '#pop'), + include('comment'), + (r'[^\n]', Comment.Single) + ], + 'comment-multi': [ + include('comment'), + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'module': [ + (r'\n', Text, '#pop'), + (r'[a-zA-Z_]\w*', Name.Class), + include('root') + ], + 'preproc': [ + (r'\n', Text, '#pop'), + include('keywords'), + (r'[A-Za-z]\w*', Comment.Preproc), + include('root') + ], + 'string': [ + (r'\\\(', String.Interpol, 'string-intp'), + (r'"', String, '#pop'), + (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), + (r'[^\\"]+', String), + (r'\\', String) + ], + 'string-intp': [ + (r'\(', String.Interpol, '#push'), + (r'\)', String.Interpol, '#pop'), + include('root') + ] + } + + def get_tokens_unprocessed(self, text): + from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ + COCOA_PROTOCOLS, COCOA_PRIMITIVES + + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name or token is Name.Class: + if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ + or value in COCOA_PRIMITIVES: + token = Name.Builtin.Pseudo + + yield index, token, value diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ooc.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ooc.py new file mode 100644 index 0000000..b58d347 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ooc.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ooc + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the Ooc language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['OocLexer'] + + +class OocLexer(RegexLexer): + """ + For `Ooc `_ source code + + .. versionadded:: 1.2 + """ + name = 'Ooc' + aliases = ['ooc'] + filenames = ['*.ooc'] + mimetypes = ['text/x-ooc'] + + tokens = { + 'root': [ + (words(( + 'class', 'interface', 'implement', 'abstract', 'extends', 'from', + 'this', 'super', 'new', 'const', 'final', 'static', 'import', + 'use', 'extern', 'inline', 'proto', 'break', 'continue', + 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do', + 'switch', 'case', 'as', 'in', 'version', 'return', 'true', + 'false', 'null'), prefix=r'\b', suffix=r'\b'), + Keyword), + (r'include\b', Keyword, 'include'), + (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)', + bygroups(Keyword, Text, Keyword, Text, Name.Class)), + (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)', + bygroups(Keyword, Text, Name.Function)), + (r'\bfunc\b', Keyword), + # Note: %= and ^= not listed on http://ooc-lang.org/syntax + (r'//.*', Comment), + (r'(?s)/\*.*?\*/', Comment.Multiline), + (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' + r'&&?|\|\|?|\^=?)', Operator), + (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, + Name.Function)), + (r'[A-Z][A-Z0-9_]+', Name.Constant), + (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class), + + (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()', + bygroups(Name.Function, Text)), + (r'[a-z]\w*', Name.Variable), + + # : introduces types + (r'[:(){}\[\];,]', Punctuation), + + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'0c[0-9]+', Number.Oct), + (r'0b[01]+', Number.Bin), + (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), + (r'[0-9_]+', Number.Decimal), + + (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"', + String.Double), + (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", + String.Char), + (r'@', Punctuation), # pointer dereference + (r'\.', Punctuation), # imports or chain operator + + (r'\\[ \t\n]', Text), + (r'[ \t]+', Text), + ], + 'include': [ + (r'[\w/]+', Name), + (r',', Punctuation), + (r'[ \t]', Text), + (r'[;\n]', Text, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/other.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/other.py new file mode 100644 index 0000000..cde764a --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/other.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.other + ~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer +from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ + TcshLexer +from pygments.lexers.robotframework import RobotFrameworkLexer +from pygments.lexers.testing import GherkinLexer +from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer +from pygments.lexers.prolog import LogtalkLexer +from pygments.lexers.snobol import SnobolLexer +from pygments.lexers.rebol import RebolLexer +from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer +from pygments.lexers.modeling import ModelicaLexer +from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \ + HybrisLexer +from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \ + AsymptoteLexer, PovrayLexer +from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \ + GoodDataCLLexer, MaqlLexer +from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer +from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \ + MscgenLexer, VGLLexer +from pygments.lexers.basic import CbmBasicV2Lexer +from pygments.lexers.pawn import SourcePawnLexer, PawnLexer +from pygments.lexers.ecl import ECLLexer +from pygments.lexers.urbi import UrbiscriptLexer +from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer +from pygments.lexers.installers import NSISLexer, RPMSpecLexer +from pygments.lexers.textedit import AwkLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/parsers.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/parsers.py similarity index 68% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/parsers.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/parsers.py index c1ad710..91add67 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/parsers.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/parsers.py @@ -5,7 +5,7 @@ Lexers for parser generators. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -14,22 +14,24 @@ import re from pygments.lexer import RegexLexer, DelegatingLexer, \ include, bygroups, using from pygments.token import Punctuation, Other, Text, Comment, Operator, \ - Keyword, Name, String, Number, Whitespace -from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \ - ObjectiveCLexer, DLexer + Keyword, Name, String, Number, Whitespace +from pygments.lexers.jvm import JavaLexer +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers.objective import ObjectiveCLexer +from pygments.lexers.d import DLexer from pygments.lexers.dotnet import CSharpLexer -from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer -from pygments.lexers.web import ActionScriptLexer - +from pygments.lexers.ruby import RubyLexer +from pygments.lexers.python import PythonLexer +from pygments.lexers.perl import PerlLexer __all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer', 'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer', 'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer', 'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer', - #'AntlrCLexer', + # 'AntlrCLexer', 'AntlrCSharpLexer', 'AntlrObjectiveCLexer', - 'AntlrJavaLexer', "AntlrActionScriptLexer", - 'TreetopLexer'] + 'AntlrJavaLexer', 'AntlrActionScriptLexer', + 'TreetopLexer', 'EbnfLexer'] class RagelLexer(RegexLexer): @@ -38,7 +40,7 @@ class RagelLexer(RegexLexer): fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead (or one of the language-specific subclasses). - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel' @@ -63,29 +65,29 @@ class RagelLexer(RegexLexer): (r'[+-]?[0-9]+', Number.Integer), ], 'literals': [ - (r'"(\\\\|\\"|[^"])*"', String), # double quote string - (r"'(\\\\|\\'|[^'])*'", String), # single quote string - (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals - (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions + (r'"(\\\\|\\"|[^"])*"', String), # double quote string + (r"'(\\\\|\\'|[^'])*'", String), # single quote string + (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals + (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions ], 'identifiers': [ - (r'[a-zA-Z_][a-zA-Z_0-9]*', Name.Variable), + (r'[a-zA-Z_]\w*', Name.Variable), ], 'operators': [ - (r',', Operator), # Join - (r'\||&|--?', Operator), # Union, Intersection and Subtraction - (r'\.|<:|:>>?', Operator), # Concatention - (r':', Operator), # Label - (r'->', Operator), # Epsilon Transition - (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions - (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions - (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions - (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions - (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions - (r'>|@|\$|%', Operator), # Transition Actions and Priorities - (r'\*|\?|\+|{[0-9]*,[0-9]*}', Operator), # Repetition - (r'!|\^', Operator), # Negation - (r'\(|\)', Operator), # Grouping + (r',', Operator), # Join + (r'\||&|--?', Operator), # Union, Intersection and Subtraction + (r'\.|<:|:>>?', Operator), # Concatention + (r':', Operator), # Label + (r'->', Operator), # Epsilon Transition + (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions + (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions + (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions + (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions + (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions + (r'>|@|\$|%', Operator), # Transition Actions and Priorities + (r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition + (r'!|\^', Operator), # Negation + (r'\(|\)', Operator), # Grouping ], 'root': [ include('literals'), @@ -95,21 +97,21 @@ class RagelLexer(RegexLexer): include('numbers'), include('identifiers'), include('operators'), - (r'{', Punctuation, 'host'), + (r'\{', Punctuation, 'host'), (r'=', Operator), (r';', Punctuation), ], 'host': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^{}\'"/#]+', # exclude unsafe characters - r'[^\\][\\][{}]', # allow escaped { or } + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^{}\'"/#]+', # exclude unsafe characters + r'[^\\]\\[{}]', # allow escaped { or } # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'\#.*$\n?', # ruby comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'\#.*$\n?', # ruby comment # regular expression: There's no reason for it to start # with a * and this stops confusion with comments. @@ -119,8 +121,8 @@ class RagelLexer(RegexLexer): r'/', )) + r')+', Other), - (r'{', Punctuation, '#push'), - (r'}', Punctuation, '#pop'), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), ], } @@ -132,7 +134,7 @@ class RagelEmbeddedLexer(RegexLexer): This will only highlight Ragel statements. If you want host language highlighting then call the language-specific Ragel lexer. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Embedded Ragel' @@ -141,17 +143,17 @@ class RagelEmbeddedLexer(RegexLexer): tokens = { 'root': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^%\'"/#]+', # exclude unsafe characters - r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^%\'"/#]+', # exclude unsafe characters + r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'//.*$\n?', # single line comment - r'\#.*$\n?', # ruby/ragel comment - r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'//.*$\n?', # single line comment + r'\#.*$\n?', # ruby/ragel comment + r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression # / is safe now that we've handled regex and javadoc comments r'/', @@ -165,15 +167,15 @@ class RagelEmbeddedLexer(RegexLexer): Punctuation, Text)), # Multi Line FSM. - (r'(%%%%|%%){', Punctuation, 'multi-line-fsm'), + (r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'), ], 'multi-line-fsm': [ - (r'(' + r'|'.join(( # keep ragel code in largest possible chunks. + (r'(' + r'|'.join(( # keep ragel code in largest possible chunks. r'(' + r'|'.join(( - r'[^}\'"\[/#]', # exclude unsafe characters - r'}(?=[^%]|$)', # } is okay as long as it's not followed by % - r'}%(?=[^%]|$)', # ...well, one %'s okay, just not two... - r'[^\\][\\][{}]', # ...and } is okay if it's escaped + r'[^}\'"\[/#]', # exclude unsafe characters + r'\}(?=[^%]|$)', # } is okay as long as it's not followed by % + r'\}%(?=[^%]|$)', # ...well, one %'s okay, just not two... + r'[^\\]\\[{}]', # ...and } is okay if it's escaped # allow / if it's preceded with one of these symbols # (ragel EOF actions) @@ -184,35 +186,35 @@ class RagelEmbeddedLexer(RegexLexer): r'/(?!\*)(\\\\|\\/|[^/])*/\*', # allow / as long as it's not followed by another / or by a * - r'/(?=[^/\*]|$)', + r'/(?=[^/*]|$)', # We want to match as many of these as we can in one block. # Not sure if we need the + sign here, # does it help performance? - )) + r')+', + )) + r')+', # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'//.*$\n?', # single line comment - r'\#.*$\n?', # ruby/ragel comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'//.*$\n?', # single line comment + r'\#.*$\n?', # ruby/ragel comment )) + r')+', using(RagelLexer)), - (r'}%%', Punctuation, '#pop'), + (r'\}%%', Punctuation, '#pop'), ] } def analyse_text(text): - return '@LANG: indep' in text or 0.1 + return '@LANG: indep' in text class RagelRubyLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a Ruby host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in Ruby Host' @@ -221,7 +223,7 @@ class RagelRubyLexer(DelegatingLexer): def __init__(self, **options): super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer, - **options) + **options) def analyse_text(text): return '@LANG: ruby' in text @@ -231,7 +233,7 @@ class RagelCLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a C host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in C Host' @@ -250,7 +252,7 @@ class RagelDLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a D host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in D Host' @@ -268,7 +270,7 @@ class RagelCppLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a CPP host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in CPP Host' @@ -286,7 +288,7 @@ class RagelObjectiveCLexer(DelegatingLexer): """ A lexer for `Ragel`_ in an Objective C host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in Objective C Host' @@ -306,7 +308,7 @@ class RagelJavaLexer(DelegatingLexer): """ A lexer for `Ragel`_ in a Java host file. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Ragel in Java Host' @@ -327,7 +329,7 @@ class AntlrLexer(RegexLexer): Should not be called directly, instead use DelegatingLexer for your target language. - *New in Pygments 1.1.* + .. versionadded:: 1.1 .. _ANTLR: http://www.antlr.org/ """ @@ -336,9 +338,9 @@ class AntlrLexer(RegexLexer): aliases = ['antlr'] filenames = [] - _id = r'[A-Za-z][A-Za-z_0-9]*' - _TOKEN_REF = r'[A-Z][A-Za-z_0-9]*' - _RULE_REF = r'[a-z][A-Za-z_0-9]*' + _id = r'[A-Za-z]\w*' + _TOKEN_REF = r'[A-Z]\w*' + _RULE_REF = r'[a-z]\w*' _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\'' _INT = r'[0-9]+' @@ -362,17 +364,17 @@ class AntlrLexer(RegexLexer): # tokensSpec (r'tokens\b', Keyword, 'tokens'), # attrScope - (r'(scope)(\s*)(' + _id + ')(\s*)({)', + (r'(scope)(\s*)(' + _id + ')(\s*)(\{)', bygroups(Keyword, Whitespace, Name.Variable, Whitespace, Punctuation), 'action'), # exception (r'(catch|finally)\b', Keyword, 'exception'), # action - (r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)({)', + (r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)(\{)', bygroups(Name.Label, Whitespace, Punctuation, Whitespace, Name.Label, Whitespace, Punctuation), 'action'), # rule - (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', \ + (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', bygroups(Keyword, Whitespace, Name.Label, Punctuation), ('rule-alts', 'rule-prelims')), ], @@ -395,18 +397,18 @@ class AntlrLexer(RegexLexer): (r'(throws)(\s+)(' + _id + ')', bygroups(Keyword, Whitespace, Name.Label)), (r'(,)(\s*)(' + _id + ')', - bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws + bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws # optionsSpec (r'options\b', Keyword, 'options'), # ruleScopeSpec - scope followed by target language code or name of action # TODO finish implementing other possibilities for scope # L173 ANTLRv3.g from ANTLR book - (r'(scope)(\s+)({)', bygroups(Keyword, Whitespace, Punctuation), - 'action'), + (r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation), + 'action'), (r'(scope)(\s+)(' + _id + ')(\s*)(;)', bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)), # ruleAction - (r'(@' + _id + ')(\s*)({)', + (r'(@' + _id + ')(\s*)(\{)', bygroups(Name.Label, Whitespace, Punctuation), 'action'), # finished prelims, go to rule alts! (r':', Punctuation, '#pop') @@ -425,9 +427,9 @@ class AntlrLexer(RegexLexer): (r'<<([^>]|>[^>])>>', String), # identifiers # Tokens start with capital letter. - (r'\$?[A-Z_][A-Za-z_0-9]*', Name.Constant), + (r'\$?[A-Z_]\w*', Name.Constant), # Rules start with small letter. - (r'\$?[a-z_][A-Za-z_0-9]*', Name.Variable), + (r'\$?[a-z_]\w*', Name.Variable), # operators (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator), (r',', Punctuation), @@ -438,32 +440,32 @@ class AntlrLexer(RegexLexer): 'tokens': [ include('whitespace'), include('comments'), - (r'{', Punctuation), + (r'\{', Punctuation), (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL + ')?(\s*)(;)', bygroups(Name.Label, Whitespace, Punctuation, Whitespace, String, Whitespace, Punctuation)), - (r'}', Punctuation, '#pop'), + (r'\}', Punctuation, '#pop'), ], 'options': [ include('whitespace'), include('comments'), - (r'{', Punctuation), + (r'\{', Punctuation), (r'(' + _id + r')(\s*)(=)(\s*)(' + - '|'.join((_id, _STRING_LITERAL, _INT, '\*'))+ ')(\s*)(;)', + '|'.join((_id, _STRING_LITERAL, _INT, '\*')) + ')(\s*)(;)', bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, Text, Whitespace, Punctuation)), - (r'}', Punctuation, '#pop'), + (r'\}', Punctuation, '#pop'), ], 'action': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^\${}\'"/\\]+', # exclude unsafe characters + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^${}\'"/\\]+', # exclude unsafe characters # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment # regular expression: There's no reason for it to start # with a * and this stops confusion with comments. @@ -479,18 +481,18 @@ class AntlrLexer(RegexLexer): (r'(\\)(%)', bygroups(Punctuation, Other)), (r'(\$[a-zA-Z]+)(\.?)(text|value)?', bygroups(Name.Variable, Punctuation, Name.Property)), - (r'{', Punctuation, '#push'), - (r'}', Punctuation, '#pop'), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), ], 'nested-arg-action': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks. - r'[^\$\[\]\'"/]+', # exclude unsafe characters + (r'(' + r'|'.join(( # keep host code in largest possible chunks. + r'[^$\[\]\'"/]+', # exclude unsafe characters # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment # regular expression: There's no reason for it to start # with a * and this stops confusion with comments. @@ -520,11 +522,11 @@ class AntlrLexer(RegexLexer): # so just assume they're C++. No idea how to make Objective C work in the # future. -#class AntlrCLexer(DelegatingLexer): +# class AntlrCLexer(DelegatingLexer): # """ # ANTLR with C Target # -# *New in Pygments 1.1* +# .. versionadded:: 1.1 # """ # # name = 'ANTLR With C Target' @@ -537,11 +539,12 @@ class AntlrLexer(RegexLexer): # def analyse_text(text): # return re.match(r'^\s*language\s*=\s*C\s*;', text) + class AntlrCppLexer(DelegatingLexer): """ `ANTLR`_ with CPP Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With CPP Target' @@ -553,14 +556,14 @@ class AntlrCppLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*C\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*C\s*;', text, re.M) class AntlrObjectiveCLexer(DelegatingLexer): """ `ANTLR`_ with Objective-C Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With ObjectiveC Target' @@ -573,14 +576,14 @@ class AntlrObjectiveCLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*ObjC\s*;', text) + re.search(r'^\s*language\s*=\s*ObjC\s*;', text) class AntlrCSharpLexer(DelegatingLexer): """ `ANTLR`_ with C# Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With C# Target' @@ -593,14 +596,14 @@ class AntlrCSharpLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M) class AntlrPythonLexer(DelegatingLexer): """ `ANTLR`_ with Python Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With Python Target' @@ -613,14 +616,14 @@ class AntlrPythonLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M) class AntlrJavaLexer(DelegatingLexer): """ `ANTLR`_ with Java Target - *New in Pygments 1.1* + .. versionadded:: 1. """ name = 'ANTLR With Java Target' @@ -640,7 +643,7 @@ class AntlrRubyLexer(DelegatingLexer): """ `ANTLR`_ with Ruby Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With Ruby Target' @@ -653,14 +656,14 @@ class AntlrRubyLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M) class AntlrPerlLexer(DelegatingLexer): """ `ANTLR`_ with Perl Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With Perl Target' @@ -673,14 +676,14 @@ class AntlrPerlLexer(DelegatingLexer): def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M) class AntlrActionScriptLexer(DelegatingLexer): """ `ANTLR`_ with ActionScript Target - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'ANTLR With ActionScript Target' @@ -688,19 +691,21 @@ class AntlrActionScriptLexer(DelegatingLexer): filenames = ['*.G', '*.g'] def __init__(self, **options): + from pygments.lexers.actionscript import ActionScriptLexer super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer, AntlrLexer, **options) def analyse_text(text): return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M) + re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M) + class TreetopBaseLexer(RegexLexer): """ A base lexer for `Treetop `_ grammars. Not for direct use; use TreetopLexer instead. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ tokens = { @@ -715,43 +720,43 @@ class TreetopBaseLexer(RegexLexer): include('end'), (r'module\b', Keyword, '#push'), (r'grammar\b', Keyword, 'grammar'), - (r'[A-Z][A-Za-z_0-9]*(?:::[A-Z][A-Za-z_0-9]*)*', Name.Namespace), + (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace), ], 'grammar': [ include('space'), include('end'), (r'rule\b', Keyword, 'rule'), (r'include\b', Keyword, 'include'), - (r'[A-Z][A-Za-z_0-9]*', Name), + (r'[A-Z]\w*', Name), ], 'include': [ include('space'), - (r'[A-Z][A-Za-z_0-9]*(?:::[A-Z][A-Za-z_0-9]*)*', Name.Class, '#pop'), + (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'), ], 'rule': [ include('space'), include('end'), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'([A-Za-z_][A-Za-z_0-9]*)(:)', bygroups(Name.Label, Punctuation)), - (r'[A-Za-z_][A-Za-z_0-9]*', Name), + (r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)), + (r'[A-Za-z_]\w*', Name), (r'[()]', Punctuation), (r'[?+*/&!~]', Operator), (r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex), (r'([0-9]*)(\.\.)([0-9]*)', bygroups(Number.Integer, Operator, Number.Integer)), (r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)), - (r'{', Punctuation, 'inline_module'), + (r'\{', Punctuation, 'inline_module'), (r'\.', String.Regex), ], 'inline_module': [ - (r'{', Other, 'ruby'), - (r'}', Punctuation, '#pop'), + (r'\{', Other, 'ruby'), + (r'\}', Punctuation, '#pop'), (r'[^{}]+', Other), ], 'ruby': [ - (r'{', Other, '#push'), - (r'}', Other, '#pop'), + (r'\{', Other, '#push'), + (r'\}', Other, '#pop'), (r'[^{}]+', Other), ], 'space': [ @@ -763,11 +768,12 @@ class TreetopBaseLexer(RegexLexer): ], } + class TreetopLexer(DelegatingLexer): """ A lexer for `Treetop `_ grammars. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'Treetop' @@ -776,3 +782,54 @@ class TreetopLexer(DelegatingLexer): def __init__(self, **options): super(TreetopLexer, self).__init__(RubyLexer, TreetopBaseLexer, **options) + + +class EbnfLexer(RegexLexer): + """ + Lexer for `ISO/IEC 14977 EBNF + `_ + grammars. + + .. versionadded:: 2.0 + """ + + name = 'EBNF' + aliases = ['ebnf'] + filenames = ['*.ebnf'] + mimetypes = ['text/x-ebnf'] + + tokens = { + 'root': [ + include('whitespace'), + include('comment_start'), + include('identifier'), + (r'=', Operator, 'production'), + ], + 'production': [ + include('whitespace'), + include('comment_start'), + include('identifier'), + (r'"[^"]*"', String.Double), + (r"'[^']*'", String.Single), + (r'(\?[^?]*\?)', Name.Entity), + (r'[\[\]{}(),|]', Punctuation), + (r'-', Operator), + (r';', Punctuation, '#pop'), + (r'\.', Punctuation, '#pop'), + ], + 'whitespace': [ + (r'\s+', Text), + ], + 'comment_start': [ + (r'\(\*', Comment.Multiline, 'comment'), + ], + 'comment': [ + (r'[^*)]', Comment.Multiline), + include('comment_start'), + (r'\*\)', Comment.Multiline, '#pop'), + (r'[*)]', Comment.Multiline), + ], + 'identifier': [ + (r'([a-zA-Z][\w \-]*)', Keyword), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/pascal.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/pascal.py new file mode 100644 index 0000000..5487748 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/pascal.py @@ -0,0 +1,831 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.pascal + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Pascal family languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, include, bygroups, words, \ + using, this, default +from pygments.util import get_bool_opt, get_list_opt +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error +from pygments.scanner import Scanner + +__all__ = ['DelphiLexer', 'Modula2Lexer', 'AdaLexer'] + + +class DelphiLexer(Lexer): + """ + For `Delphi `_ (Borland Object Pascal), + Turbo Pascal and Free Pascal source code. + + Additional options accepted: + + `turbopascal` + Highlight Turbo Pascal specific keywords (default: ``True``). + `delphi` + Highlight Borland Delphi specific keywords (default: ``True``). + `freepascal` + Highlight Free Pascal specific keywords (default: ``True``). + `units` + A list of units that should be considered builtin, supported are + ``System``, ``SysUtils``, ``Classes`` and ``Math``. + Default is to consider all of them builtin. + """ + name = 'Delphi' + aliases = ['delphi', 'pas', 'pascal', 'objectpascal'] + filenames = ['*.pas'] + mimetypes = ['text/x-pascal'] + + TURBO_PASCAL_KEYWORDS = ( + 'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case', + 'const', 'constructor', 'continue', 'destructor', 'div', 'do', + 'downto', 'else', 'end', 'file', 'for', 'function', 'goto', + 'if', 'implementation', 'in', 'inherited', 'inline', 'interface', + 'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator', + 'or', 'packed', 'procedure', 'program', 'record', 'reintroduce', + 'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to', + 'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor' + ) + + DELPHI_KEYWORDS = ( + 'as', 'class', 'except', 'exports', 'finalization', 'finally', + 'initialization', 'is', 'library', 'on', 'property', 'raise', + 'threadvar', 'try' + ) + + FREE_PASCAL_KEYWORDS = ( + 'dispose', 'exit', 'false', 'new', 'true' + ) + + BLOCK_KEYWORDS = set(( + 'begin', 'class', 'const', 'constructor', 'destructor', 'end', + 'finalization', 'function', 'implementation', 'initialization', + 'label', 'library', 'operator', 'procedure', 'program', 'property', + 'record', 'threadvar', 'type', 'unit', 'uses', 'var' + )) + + FUNCTION_MODIFIERS = set(( + 'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe', + 'pascal', 'register', 'safecall', 'softfloat', 'stdcall', + 'varargs', 'name', 'dynamic', 'near', 'virtual', 'external', + 'override', 'assembler' + )) + + # XXX: those aren't global. but currently we know no way for defining + # them just for the type context. + DIRECTIVES = set(( + 'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far', + 'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected', + 'published', 'public' + )) + + BUILTIN_TYPES = set(( + 'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool', + 'cardinal', 'char', 'comp', 'currency', 'double', 'dword', + 'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint', + 'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean', + 'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency', + 'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle', + 'pint64', 'pinteger', 'plongint', 'plongword', 'pointer', + 'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint', + 'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword', + 'pwordarray', 'pwordbool', 'real', 'real48', 'shortint', + 'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate', + 'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant', + 'widechar', 'widestring', 'word', 'wordbool' + )) + + BUILTIN_UNITS = { + 'System': ( + 'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8', + 'append', 'arctan', 'assert', 'assigned', 'assignfile', + 'beginthread', 'blockread', 'blockwrite', 'break', 'chdir', + 'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble', + 'concat', 'continue', 'copy', 'cos', 'dec', 'delete', + 'dispose', 'doubletocomp', 'endthread', 'enummodules', + 'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr', + 'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize', + 'fillchar', 'finalize', 'findclasshinstance', 'findhinstance', + 'findresourcehinstance', 'flush', 'frac', 'freemem', + 'get8087cw', 'getdir', 'getlasterror', 'getmem', + 'getmemorymanager', 'getmodulefilename', 'getvariantmanager', + 'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert', + 'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset', + 'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd', + 'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount', + 'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random', + 'randomize', 'read', 'readln', 'reallocmem', + 'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir', + 'round', 'runerror', 'seek', 'seekeof', 'seekeoln', + 'set8087cw', 'setlength', 'setlinebreakstyle', + 'setmemorymanager', 'setstring', 'settextbuf', + 'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt', + 'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar', + 'succ', 'swap', 'trunc', 'truncate', 'typeinfo', + 'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring', + 'upcase', 'utf8decode', 'utf8encode', 'utf8toansi', + 'utf8tounicode', 'val', 'vararrayredim', 'varclear', + 'widecharlentostring', 'widecharlentostrvar', + 'widechartostring', 'widechartostrvar', + 'widestringtoucs4string', 'write', 'writeln' + ), + 'SysUtils': ( + 'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks', + 'allocmem', 'ansicomparefilename', 'ansicomparestr', + 'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr', + 'ansilastchar', 'ansilowercase', 'ansilowercasefilename', + 'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext', + 'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp', + 'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan', + 'ansistrscan', 'ansistrupper', 'ansiuppercase', + 'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep', + 'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype', + 'callterminateprocs', 'changefileext', 'charlength', + 'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr', + 'comparetext', 'createdir', 'createguid', 'currentyear', + 'currtostr', 'currtostrf', 'date', 'datetimetofiledate', + 'datetimetostr', 'datetimetostring', 'datetimetosystemtime', + 'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate', + 'decodedatefully', 'decodetime', 'deletefile', 'directoryexists', + 'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime', + 'exceptionerrormessage', 'excludetrailingbackslash', + 'excludetrailingpathdelimiter', 'expandfilename', + 'expandfilenamecase', 'expanduncfilename', 'extractfiledir', + 'extractfiledrive', 'extractfileext', 'extractfilename', + 'extractfilepath', 'extractrelativepath', 'extractshortpathname', + 'fileage', 'fileclose', 'filecreate', 'filedatetodatetime', + 'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly', + 'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr', + 'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage', + 'findclose', 'findcmdlineswitch', 'findfirst', 'findnext', + 'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr', + 'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr', + 'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr', + 'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir', + 'getenvironmentvariable', 'getfileversion', 'getformatsettings', + 'getlocaleformatsettings', 'getmodulename', 'getpackagedescription', + 'getpackageinfo', 'gettime', 'guidtostring', 'incamonth', + 'includetrailingbackslash', 'includetrailingpathdelimiter', + 'incmonth', 'initializepackage', 'interlockeddecrement', + 'interlockedexchange', 'interlockedexchangeadd', + 'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter', + 'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident', + 'languages', 'lastdelimiter', 'loadpackage', 'loadstr', + 'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now', + 'outofmemoryerror', 'quotedstr', 'raiselastoserror', + 'raiselastwin32error', 'removedir', 'renamefile', 'replacedate', + 'replacetime', 'safeloadlibrary', 'samefilename', 'sametext', + 'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize', + 'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy', + 'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp', + 'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy', + 'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew', + 'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos', + 'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr', + 'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime', + 'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint', + 'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime', + 'strtotimedef', 'strupper', 'supports', 'syserrormessage', + 'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime', + 'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright', + 'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime', + 'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime', + 'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime', + 'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext', + 'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase', + 'widesamestr', 'widesametext', 'wideuppercase', 'win32check', + 'wraptext' + ), + 'Classes': ( + 'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize', + 'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect', + 'extractstrings', 'findclass', 'findglobalcomponent', 'getclass', + 'groupdescendantswith', 'hextobin', 'identtoint', + 'initinheritedcomponent', 'inttoident', 'invalidpoint', + 'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext', + 'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource', + 'pointsequal', 'readcomponentres', 'readcomponentresex', + 'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias', + 'registerclasses', 'registercomponents', 'registerintegerconsts', + 'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup', + 'teststreamformat', 'unregisterclass', 'unregisterclasses', + 'unregisterintegerconsts', 'unregistermoduleclasses', + 'writecomponentresfile' + ), + 'Math': ( + 'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec', + 'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil', + 'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc', + 'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle', + 'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance', + 'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask', + 'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg', + 'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate', + 'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero', + 'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue', + 'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue', + 'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods', + 'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance', + 'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd', + 'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant', + 'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode', + 'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev', + 'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation', + 'tan', 'tanh', 'totalvariance', 'variance' + ) + } + + ASM_REGISTERS = set(( + 'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0', + 'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0', + 'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx', + 'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp', + 'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6', + 'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5', + 'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', + 'xmm6', 'xmm7' + )) + + ASM_INSTRUCTIONS = set(( + 'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound', + 'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw', + 'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae', + 'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg', + 'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb', + 'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl', + 'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo', + 'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb', + 'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid', + 'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt', + 'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd', + 'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd', + 'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe', + 'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle', + 'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge', + 'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe', + 'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave', + 'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw', + 'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw', + 'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr', + 'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx', + 'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd', + 'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw', + 'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw', + 'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe', + 'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror', + 'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb', + 'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe', + 'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle', + 'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng', + 'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz', + 'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl', + 'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold', + 'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str', + 'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit', + 'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait', + 'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat', + 'xlatb', 'xor' + )) + + def __init__(self, **options): + Lexer.__init__(self, **options) + self.keywords = set() + if get_bool_opt(options, 'turbopascal', True): + self.keywords.update(self.TURBO_PASCAL_KEYWORDS) + if get_bool_opt(options, 'delphi', True): + self.keywords.update(self.DELPHI_KEYWORDS) + if get_bool_opt(options, 'freepascal', True): + self.keywords.update(self.FREE_PASCAL_KEYWORDS) + self.builtins = set() + for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)): + self.builtins.update(self.BUILTIN_UNITS[unit]) + + def get_tokens_unprocessed(self, text): + scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE) + stack = ['initial'] + in_function_block = False + in_property_block = False + was_dot = False + next_token_is_function = False + next_token_is_property = False + collect_labels = False + block_labels = set() + brace_balance = [0, 0] + + while not scanner.eos: + token = Error + + if stack[-1] == 'initial': + if scanner.scan(r'\s+'): + token = Text + elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): + if scanner.match.startswith('$'): + token = Comment.Preproc + else: + token = Comment.Multiline + elif scanner.scan(r'//.*?$'): + token = Comment.Single + elif scanner.scan(r'[-+*\/=<>:;,.@\^]'): + token = Operator + # stop label highlighting on next ";" + if collect_labels and scanner.match == ';': + collect_labels = False + elif scanner.scan(r'[\(\)\[\]]+'): + token = Punctuation + # abort function naming ``foo = Function(...)`` + next_token_is_function = False + # if we are in a function block we count the open + # braces because ootherwise it's impossible to + # determine the end of the modifier context + if in_function_block or in_property_block: + if scanner.match == '(': + brace_balance[0] += 1 + elif scanner.match == ')': + brace_balance[0] -= 1 + elif scanner.match == '[': + brace_balance[1] += 1 + elif scanner.match == ']': + brace_balance[1] -= 1 + elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): + lowercase_name = scanner.match.lower() + if lowercase_name == 'result': + token = Name.Builtin.Pseudo + elif lowercase_name in self.keywords: + token = Keyword + # if we are in a special block and a + # block ending keyword occours (and the parenthesis + # is balanced) we end the current block context + if (in_function_block or in_property_block) and \ + lowercase_name in self.BLOCK_KEYWORDS and \ + brace_balance[0] <= 0 and \ + brace_balance[1] <= 0: + in_function_block = False + in_property_block = False + brace_balance = [0, 0] + block_labels = set() + if lowercase_name in ('label', 'goto'): + collect_labels = True + elif lowercase_name == 'asm': + stack.append('asm') + elif lowercase_name == 'property': + in_property_block = True + next_token_is_property = True + elif lowercase_name in ('procedure', 'operator', + 'function', 'constructor', + 'destructor'): + in_function_block = True + next_token_is_function = True + # we are in a function block and the current name + # is in the set of registered modifiers. highlight + # it as pseudo keyword + elif in_function_block and \ + lowercase_name in self.FUNCTION_MODIFIERS: + token = Keyword.Pseudo + # if we are in a property highlight some more + # modifiers + elif in_property_block and \ + lowercase_name in ('read', 'write'): + token = Keyword.Pseudo + next_token_is_function = True + # if the last iteration set next_token_is_function + # to true we now want this name highlighted as + # function. so do that and reset the state + elif next_token_is_function: + # Look if the next token is a dot. If yes it's + # not a function, but a class name and the + # part after the dot a function name + if scanner.test(r'\s*\.\s*'): + token = Name.Class + # it's not a dot, our job is done + else: + token = Name.Function + next_token_is_function = False + # same for properties + elif next_token_is_property: + token = Name.Property + next_token_is_property = False + # Highlight this token as label and add it + # to the list of known labels + elif collect_labels: + token = Name.Label + block_labels.add(scanner.match.lower()) + # name is in list of known labels + elif lowercase_name in block_labels: + token = Name.Label + elif lowercase_name in self.BUILTIN_TYPES: + token = Keyword.Type + elif lowercase_name in self.DIRECTIVES: + token = Keyword.Pseudo + # builtins are just builtins if the token + # before isn't a dot + elif not was_dot and lowercase_name in self.builtins: + token = Name.Builtin + else: + token = Name + elif scanner.scan(r"'"): + token = String + stack.append('string') + elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'): + token = String.Char + elif scanner.scan(r'\$[0-9A-Fa-f]+'): + token = Number.Hex + elif scanner.scan(r'\d+(?![eE]|\.[^.])'): + token = Number.Integer + elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): + token = Number.Float + else: + # if the stack depth is deeper than once, pop + if len(stack) > 1: + stack.pop() + scanner.get_char() + + elif stack[-1] == 'string': + if scanner.scan(r"''"): + token = String.Escape + elif scanner.scan(r"'"): + token = String + stack.pop() + elif scanner.scan(r"[^']*"): + token = String + else: + scanner.get_char() + stack.pop() + + elif stack[-1] == 'asm': + if scanner.scan(r'\s+'): + token = Text + elif scanner.scan(r'end'): + token = Keyword + stack.pop() + elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): + if scanner.match.startswith('$'): + token = Comment.Preproc + else: + token = Comment.Multiline + elif scanner.scan(r'//.*?$'): + token = Comment.Single + elif scanner.scan(r"'"): + token = String + stack.append('string') + elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'): + token = Name.Label + elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): + lowercase_name = scanner.match.lower() + if lowercase_name in self.ASM_INSTRUCTIONS: + token = Keyword + elif lowercase_name in self.ASM_REGISTERS: + token = Name.Builtin + else: + token = Name + elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'): + token = Operator + elif scanner.scan(r'[\(\)\[\]]+'): + token = Punctuation + elif scanner.scan(r'\$[0-9A-Fa-f]+'): + token = Number.Hex + elif scanner.scan(r'\d+(?![eE]|\.[^.])'): + token = Number.Integer + elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): + token = Number.Float + else: + scanner.get_char() + stack.pop() + + # save the dot!!!11 + if scanner.match.strip(): + was_dot = scanner.match == '.' + yield scanner.start_pos, token, scanner.match or '' + + +class Modula2Lexer(RegexLexer): + """ + For `Modula-2 `_ source code. + + Additional options that determine which keywords are highlighted: + + `pim` + Select PIM Modula-2 dialect (default: True). + `iso` + Select ISO Modula-2 dialect (default: False). + `objm2` + Select Objective Modula-2 dialect (default: False). + `gm2ext` + Also highlight GNU extensions (default: False). + + .. versionadded:: 1.3 + """ + name = 'Modula-2' + aliases = ['modula2', 'm2'] + filenames = ['*.def', '*.mod'] + mimetypes = ['text/x-modula2'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'whitespace': [ + (r'\n+', Text), # blank lines + (r'\s+', Text), # whitespace + ], + 'identifiers': [ + (r'([a-zA-Z_$][\w$]*)', Name), + ], + 'numliterals': [ + (r'[01]+B', Number.Bin), # binary number (ObjM2) + (r'[0-7]+B', Number.Oct), # octal number (PIM + ISO) + (r'[0-7]+C', Number.Oct), # char code (PIM + ISO) + (r'[0-9A-F]+C', Number.Hex), # char code (ObjM2) + (r'[0-9A-F]+H', Number.Hex), # hexadecimal number + (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number + (r'[0-9]+\.[0-9]+', Number.Float), # real number + (r'[0-9]+', Number.Integer), # decimal whole number + ], + 'strings': [ + (r"'(\\\\|\\'|[^'])*'", String), # single quoted string + (r'"(\\\\|\\"|[^"])*"', String), # double quoted string + ], + 'operators': [ + (r'[*/+=#~&<>\^-]', Operator), + (r':=', Operator), # assignment + (r'@', Operator), # pointer deref (ISO) + (r'\.\.', Operator), # ellipsis or range + (r'`', Operator), # Smalltalk message (ObjM2) + (r'::', Operator), # type conversion (ObjM2) + ], + 'punctuation': [ + (r'[()\[\]{},.:;|]', Punctuation), + ], + 'comments': [ + (r'//.*?\n', Comment.Single), # ObjM2 + (r'/\*(.*?)\*/', Comment.Multiline), # ObjM2 + (r'\(\*([^$].*?)\*\)', Comment.Multiline), + # TO DO: nesting of (* ... *) comments + ], + 'pragmas': [ + (r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM + (r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2 + ], + 'root': [ + include('whitespace'), + include('comments'), + include('pragmas'), + include('identifiers'), + include('numliterals'), + include('strings'), + include('operators'), + include('punctuation'), + ] + } + + pim_reserved_words = [ + # 40 reserved words + 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', + 'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR', + 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', + 'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED', + 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', + 'UNTIL', 'VAR', 'WHILE', 'WITH', + ] + + pim_pervasives = [ + # 31 pervasives + 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC', + 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL', + 'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD', + 'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL', + ] + + iso_reserved_words = [ + # 46 reserved words + 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', + 'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY', + 'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', + 'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER', + 'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY', + 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', + 'WITH', + ] + + iso_pervasives = [ + # 42 pervasives + 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX', + 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', + 'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH', + 'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', + 'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE', + 'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', + ] + + objm2_reserved_words = [ + # base language, 42 reserved words + 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', + 'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF', + 'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD', + 'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE', + 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', + 'UNTIL', 'VAR', 'VARIADIC', 'WHILE', + # OO extensions, 16 reserved words + 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', + 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', + 'SUPER', 'TRY', + ] + + objm2_pervasives = [ + # base language, 38 pervasives + 'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE', + 'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD', + 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL', + 'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX', + 'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF', + # OO extensions, 3 pervasives + 'OBJECT', 'NO', 'YES', + ] + + gnu_reserved_words = [ + # 10 additional reserved words + 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', + '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', + ] + + gnu_pervasives = [ + # 21 identifiers, actually from pseudo-module SYSTEM + # but we will highlight them as if they were pervasives + 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', + 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', + 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', + 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', + ] + + def __init__(self, **options): + self.reserved_words = set() + self.pervasives = set() + # ISO Modula-2 + if get_bool_opt(options, 'iso', False): + self.reserved_words.update(self.iso_reserved_words) + self.pervasives.update(self.iso_pervasives) + # Objective Modula-2 + elif get_bool_opt(options, 'objm2', False): + self.reserved_words.update(self.objm2_reserved_words) + self.pervasives.update(self.objm2_pervasives) + # PIM Modula-2 (DEFAULT) + else: + self.reserved_words.update(self.pim_reserved_words) + self.pervasives.update(self.pim_pervasives) + # GNU extensions + if get_bool_opt(options, 'gm2ext', False): + self.reserved_words.update(self.gnu_reserved_words) + self.pervasives.update(self.gnu_pervasives) + # initialise + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): + # check for reserved words and pervasives + if token is Name: + if value in self.reserved_words: + token = Keyword.Reserved + elif value in self.pervasives: + token = Keyword.Pervasive + # return result + yield index, token, value + + +class AdaLexer(RegexLexer): + """ + For Ada source code. + + .. versionadded:: 1.3 + """ + + name = 'Ada' + aliases = ['ada', 'ada95', 'ada2005'] + filenames = ['*.adb', '*.ads', '*.ada'] + mimetypes = ['text/x-ada'] + + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + (r'[^\S\n]+', Text), + (r'--.*?\n', Comment.Single), + (r'[^\S\n]+', Text), + (r'function|procedure|entry', Keyword.Declaration, 'subprogram'), + (r'(subtype|type)(\s+)(\w+)', + bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), + (r'task|protected', Keyword.Declaration), + (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)), + (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'), + (r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text, + Comment.Preproc)), + (r'(true|false|null)\b', Keyword.Constant), + (words(( + 'Address', 'Byte', 'Boolean', 'Character', 'Controlled', 'Count', 'Cursor', + 'Duration', 'File_Mode', 'File_Type', 'Float', 'Generator', 'Integer', 'Long_Float', + 'Long_Integer', 'Long_Long_Float', 'Long_Long_Integer', 'Natural', 'Positive', + 'Reference_Type', 'Short_Float', 'Short_Integer', 'Short_Short_Float', + 'Short_Short_Integer', 'String', 'Wide_Character', 'Wide_String'), suffix=r'\b'), + Keyword.Type), + (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word), + (r'generic|private', Keyword.Declaration), + (r'package', Keyword.Declaration, 'package'), + (r'array\b', Keyword.Reserved, 'array_def'), + (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), + (r'(\w+)(\s*)(:)(\s*)(constant)', + bygroups(Name.Constant, Text, Punctuation, Text, + Keyword.Reserved)), + (r'<<\w+>>', Name.Label), + (r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)', + bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)), + (words(( + 'abort', 'abs', 'abstract', 'accept', 'access', 'aliased', 'all', + 'array', 'at', 'begin', 'body', 'case', 'constant', 'declare', + 'delay', 'delta', 'digits', 'do', 'else', 'elsif', 'end', 'entry', + 'exception', 'exit', 'interface', 'for', 'goto', 'if', 'is', 'limited', + 'loop', 'new', 'null', 'of', 'or', 'others', 'out', 'overriding', + 'pragma', 'protected', 'raise', 'range', 'record', 'renames', 'requeue', + 'return', 'reverse', 'select', 'separate', 'subtype', 'synchronized', + 'task', 'tagged', 'terminate', 'then', 'type', 'until', 'when', + 'while', 'xor'), prefix=r'\b', suffix=r'\b'), + Keyword.Reserved), + (r'"[^"]*"', String), + include('attribute'), + include('numbers'), + (r"'[^']'", String.Character), + (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))), + (r"(<>|=>|:=|[()|:;,.'])", Punctuation), + (r'[*<>+=/&-]', Operator), + (r'\n+', Text), + ], + 'numbers': [ + (r'[0-9_]+#[0-9a-f]+#', Number.Hex), + (r'[0-9_]+\.[0-9_]*', Number.Float), + (r'[0-9_]+', Number.Integer), + ], + 'attribute': [ + (r"(')(\w+)", bygroups(Punctuation, Name.Attribute)), + ], + 'subprogram': [ + (r'\(', Punctuation, ('#pop', 'formal_part')), + (r';', Punctuation, '#pop'), + (r'is\b', Keyword.Reserved, '#pop'), + (r'"[^"]+"|\w+', Name.Function), + include('root'), + ], + 'end': [ + ('(if|case|record|loop|select)', Keyword.Reserved), + ('"[^"]+"|[\w.]+', Name.Function), + ('\s+', Text), + (';', Punctuation, '#pop'), + ], + 'type_def': [ + (r';', Punctuation, '#pop'), + (r'\(', Punctuation, 'formal_part'), + (r'with|and|use', Keyword.Reserved), + (r'array\b', Keyword.Reserved, ('#pop', 'array_def')), + (r'record\b', Keyword.Reserved, ('record_def')), + (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'), + include('root'), + ], + 'array_def': [ + (r';', Punctuation, '#pop'), + (r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)), + include('root'), + ], + 'record_def': [ + (r'end record', Keyword.Reserved, '#pop'), + include('root'), + ], + 'import': [ + (r'[\w.]+', Name.Namespace, '#pop'), + default('#pop'), + ], + 'formal_part': [ + (r'\)', Punctuation, '#pop'), + (r'\w+', Name.Variable), + (r',|:[^=]', Punctuation), + (r'(in|not|null|out|access)\b', Keyword.Reserved), + include('root'), + ], + 'package': [ + ('body', Keyword.Declaration), + ('is\s+new|renames', Keyword.Reserved), + ('is', Keyword.Reserved, '#pop'), + (';', Punctuation, '#pop'), + ('\(', Punctuation, 'package_instantiation'), + ('([\w.]+)', Name.Class), + include('root'), + ], + 'package_instantiation': [ + (r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)), + (r'[\w.\'"]', Text), + (r'\)', Punctuation, '#pop'), + include('root'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/pawn.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/pawn.py new file mode 100644 index 0000000..d55e2cc --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/pawn.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.pawn + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Pawn languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error +from pygments.util import get_bool_opt + +__all__ = ['SourcePawnLexer', 'PawnLexer'] + + +class SourcePawnLexer(RegexLexer): + """ + For SourcePawn source code with preprocessor directives. + + .. versionadded:: 1.6 + """ + name = 'SourcePawn' + aliases = ['sp'] + filenames = ['*.sp'] + mimetypes = ['text/x-sourcepawn'] + + #: optional Comment or Whitespace + _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' + #: only one /* */ style comment + _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' + + tokens = { + 'root': [ + # preprocessor directives: without whitespace + ('^#if\s+0', Comment.Preproc, 'if0'), + ('^#', Comment.Preproc, 'macro'), + # or with whitespace + ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'), + ('^' + _ws1 + '#', Comment.Preproc, 'macro'), + (r'\n', Text), + (r'\s+', Text), + (r'\\\n', Text), # line continuation + (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), + (r'[{}]', Punctuation), + (r'L?"', String, 'string'), + (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), + (r'0[0-7]+[LlUu]*', Number.Oct), + (r'\d+[LlUu]*', Number.Integer), + (r'\*/', Error), + (r'[~!%^&*+=|?:<>/-]', Operator), + (r'[()\[\],.;]', Punctuation), + (r'(case|const|continue|native|' + r'default|else|enum|for|if|new|operator|' + r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), + (r'(bool|Float)\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + ('[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/\*(.|\n)*?\*/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?/-]', Operator), + (r'[()\[\],.;]', Punctuation), + (r'(switch|case|default|const|new|static|char|continue|break|' + r'if|else|for|while|do|operator|enum|' + r'public|return|sizeof|tagof|state|goto)\b', Keyword), + (r'(bool|Float)\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + ('[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/\*(.|\n)*?\*/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?`_ source code. + """ + + name = 'Perl' + aliases = ['perl', 'pl'] + filenames = ['*.pl', '*.pm', '*.t'] + mimetypes = ['text/x-perl', 'application/x-perl'] + + flags = re.DOTALL | re.MULTILINE + # TODO: give this to a perl guy who knows how to parse perl... + tokens = { + 'balanced-regex': [ + (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'), + (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'), + (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), + (r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'), + (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'), + (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'), + (r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'), + (r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'), + (r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'), + (r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'), + ], + 'root': [ + (r'\#.*?$', Comment.Single), + (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline), + (words(( + 'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach', + 'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then', + 'unless', 'until', 'while', 'use', 'print', 'new', 'BEGIN', + 'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'), + Keyword), + (r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)', + bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'), + (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word), + # common delimiters + (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', + String.Regex), + (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex), + (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex), + (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', + String.Regex), + (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', + String.Regex), + # balanced delimiters + (r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'), + (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'), + (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex, + 'balanced-regex'), + (r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex, + 'balanced-regex'), + + (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex), + (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), + (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*', + String.Regex), + (r'\s+', Text), + (words(( + 'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir', + 'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect', + 'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die', + 'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent', + 'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl', + 'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid', + 'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin', + 'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp', + 'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber', + 'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname', + 'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime', + 'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last', + 'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat', + 'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'no', 'oct', 'open', + 'opendir', 'ord', 'our', 'pack', 'package', 'pipe', 'pop', 'pos', 'printf', + 'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir', + 'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename', 'require', + 'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir', + 'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent', + 'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent', + 'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown', + 'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt', + 'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread', + 'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr', + 'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie', + 'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'), + Name.Builtin), + (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo), + (r'<<([\'"]?)([a-zA-Z_]\w*)\1;?\n.*?\n\2\n', String), + (r'__END__', Comment.Preproc, 'end-part'), + (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global), + (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global), + (r'[$@%#]+', Name.Variable, 'varname'), + (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), + (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), + (r'0b[01]+(_[01]+)*', Number.Bin), + (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', + Number.Float), + (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), + (r'\d+(_\d+)*', Number.Integer), + (r"'(\\\\|\\[^\\]|[^'\\])*'", String), + (r'"(\\\\|\\[^\\]|[^"\\])*"', String), + (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick), + (r'<([^\s>]+)>', String.Regex), + (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'), + (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'), + (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'), + (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'), + (r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other), + (r'package\s+', Keyword, 'modulename'), + (r'sub\s+', Keyword, 'funcname'), + (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|' + r'!~|&&?|\|\||\.{1,3})', Operator), + (r'[-+/*%=<>&^|!\\~]=?', Operator), + (r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage + # of punctuation in Perl! + (r'(?=\w)', Name, 'name'), + ], + 'format': [ + (r'\.\n', String.Interpol, '#pop'), + (r'[^\n]*\n', String.Interpol), + ], + 'varname': [ + (r'\s+', Text), + (r'\{', Punctuation, '#pop'), # hash syntax? + (r'\)|,', Punctuation, '#pop'), # argument specifier + (r'\w+::', Name.Namespace), + (r'[\w:]+', Name.Variable, '#pop'), + ], + 'name': [ + (r'\w+::', Name.Namespace), + (r'[\w:]+', Name, '#pop'), + (r'[A-Z_]+(?=\W)', Name.Constant, '#pop'), + (r'(?=\W)', Text, '#pop'), + ], + 'modulename': [ + (r'[a-zA-Z_]\w*', Name.Namespace, '#pop') + ], + 'funcname': [ + (r'[a-zA-Z_]\w*[!?]?', Name.Function), + (r'\s+', Text), + # argument declaration + (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)), + (r';', Punctuation, '#pop'), + (r'.*?\{', Punctuation, '#pop'), + ], + 'cb-string': [ + (r'\\[{}\\]', String.Other), + (r'\\', String.Other), + (r'\{', String.Other, 'cb-string'), + (r'\}', String.Other, '#pop'), + (r'[^{}\\]+', String.Other) + ], + 'rb-string': [ + (r'\\[()\\]', String.Other), + (r'\\', String.Other), + (r'\(', String.Other, 'rb-string'), + (r'\)', String.Other, '#pop'), + (r'[^()]+', String.Other) + ], + 'sb-string': [ + (r'\\[\[\]\\]', String.Other), + (r'\\', String.Other), + (r'\[', String.Other, 'sb-string'), + (r'\]', String.Other, '#pop'), + (r'[^\[\]]+', String.Other) + ], + 'lt-string': [ + (r'\\[<>\\]', String.Other), + (r'\\', String.Other), + (r'\<', String.Other, 'lt-string'), + (r'\>', String.Other, '#pop'), + (r'[^<>]+', String.Other) + ], + 'end-part': [ + (r'.+', Comment.Preproc, '#pop') + ] + } + + def analyse_text(text): + if shebang_matches(text, r'perl'): + return True + if re.search('(?:my|our)\s+[$@%(]', text): + return 0.9 + + +class Perl6Lexer(ExtendedRegexLexer): + """ + For `Perl 6 `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'Perl6' + aliases = ['perl6', 'pl6'] + filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', + '*.6pm', '*.p6m', '*.pm6', '*.t'] + mimetypes = ['text/x-perl6', 'application/x-perl6'] + flags = re.MULTILINE | re.DOTALL | re.UNICODE + + PERL6_IDENTIFIER_RANGE = "['\w:-]" + + PERL6_KEYWORDS = ( + 'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT', + 'KEEP', 'LAST', 'LEAVE', 'NEXT', 'POST', 'PRE', 'START', 'TEMP', + 'UNDO', 'as', 'assoc', 'async', 'augment', 'binary', 'break', 'but', + 'cached', 'category', 'class', 'constant', 'contend', 'continue', + 'copy', 'deep', 'default', 'defequiv', 'defer', 'die', 'do', 'else', + 'elsif', 'enum', 'equiv', 'exit', 'export', 'fail', 'fatal', 'for', + 'gather', 'given', 'goto', 'grammar', 'handles', 'has', 'if', 'inline', + 'irs', 'is', 'last', 'leave', 'let', 'lift', 'loop', 'looser', 'macro', + 'make', 'maybe', 'method', 'module', 'multi', 'my', 'next', 'of', + 'ofs', 'only', 'oo', 'ors', 'our', 'package', 'parsed', 'prec', + 'proto', 'readonly', 'redo', 'ref', 'regex', 'reparsed', 'repeat', + 'require', 'required', 'return', 'returns', 'role', 'rule', 'rw', + 'self', 'slang', 'state', 'sub', 'submethod', 'subset', 'supersede', + 'take', 'temp', 'tighter', 'token', 'trusts', 'try', 'unary', + 'unless', 'until', 'use', 'warn', 'when', 'where', 'while', 'will', + ) + + PERL6_BUILTINS = ( + 'ACCEPTS', 'HOW', 'REJECTS', 'VAR', 'WHAT', 'WHENCE', 'WHERE', 'WHICH', + 'WHO', 'abs', 'acos', 'acosec', 'acosech', 'acosh', 'acotan', 'acotanh', + 'all', 'any', 'approx', 'arity', 'asec', 'asech', 'asin', 'asinh', + 'assuming', 'atan', 'atan2', 'atanh', 'attr', 'bless', 'body', 'by', + 'bytes', 'caller', 'callsame', 'callwith', 'can', 'capitalize', 'cat', + 'ceiling', 'chars', 'chmod', 'chomp', 'chop', 'chr', 'chroot', + 'circumfix', 'cis', 'classify', 'clone', 'close', 'cmp_ok', 'codes', + 'comb', 'connect', 'contains', 'context', 'cos', 'cosec', 'cosech', + 'cosh', 'cotan', 'cotanh', 'count', 'defined', 'delete', 'diag', + 'dies_ok', 'does', 'e', 'each', 'eager', 'elems', 'end', 'eof', 'eval', + 'eval_dies_ok', 'eval_elsewhere', 'eval_lives_ok', 'evalfile', 'exists', + 'exp', 'first', 'flip', 'floor', 'flunk', 'flush', 'fmt', 'force_todo', + 'fork', 'from', 'getc', 'gethost', 'getlogin', 'getpeername', 'getpw', + 'gmtime', 'graphs', 'grep', 'hints', 'hyper', 'im', 'index', 'infix', + 'invert', 'is_approx', 'is_deeply', 'isa', 'isa_ok', 'isnt', 'iterator', + 'join', 'key', 'keys', 'kill', 'kv', 'lastcall', 'lazy', 'lc', 'lcfirst', + 'like', 'lines', 'link', 'lives_ok', 'localtime', 'log', 'log10', 'map', + 'max', 'min', 'minmax', 'name', 'new', 'nextsame', 'nextwith', 'nfc', + 'nfd', 'nfkc', 'nfkd', 'nok_error', 'nonce', 'none', 'normalize', 'not', + 'nothing', 'ok', 'once', 'one', 'open', 'opendir', 'operator', 'ord', + 'p5chomp', 'p5chop', 'pack', 'pair', 'pairs', 'pass', 'perl', 'pi', + 'pick', 'plan', 'plan_ok', 'polar', 'pop', 'pos', 'postcircumfix', + 'postfix', 'pred', 'prefix', 'print', 'printf', 'push', 'quasi', + 'quotemeta', 'rand', 're', 'read', 'readdir', 'readline', 'reduce', + 'reverse', 'rewind', 'rewinddir', 'rindex', 'roots', 'round', + 'roundrobin', 'run', 'runinstead', 'sameaccent', 'samecase', 'say', + 'sec', 'sech', 'sech', 'seek', 'shape', 'shift', 'sign', 'signature', + 'sin', 'sinh', 'skip', 'skip_rest', 'sleep', 'slurp', 'sort', 'splice', + 'split', 'sprintf', 'sqrt', 'srand', 'strand', 'subst', 'substr', 'succ', + 'sum', 'symlink', 'tan', 'tanh', 'throws_ok', 'time', 'times', 'to', + 'todo', 'trim', 'trim_end', 'trim_start', 'true', 'truncate', 'uc', + 'ucfirst', 'undef', 'undefine', 'uniq', 'unlike', 'unlink', 'unpack', + 'unpolar', 'unshift', 'unwrap', 'use_ok', 'value', 'values', 'vec', + 'version_lt', 'void', 'wait', 'want', 'wrap', 'write', 'zip', + ) + + PERL6_BUILTIN_CLASSES = ( + 'Abstraction', 'Any', 'AnyChar', 'Array', 'Associative', 'Bag', 'Bit', + 'Blob', 'Block', 'Bool', 'Buf', 'Byte', 'Callable', 'Capture', 'Char', 'Class', + 'Code', 'Codepoint', 'Comparator', 'Complex', 'Decreasing', 'Exception', + 'Failure', 'False', 'Grammar', 'Grapheme', 'Hash', 'IO', 'Increasing', + 'Int', 'Junction', 'KeyBag', 'KeyExtractor', 'KeyHash', 'KeySet', + 'KitchenSink', 'List', 'Macro', 'Mapping', 'Match', 'Matcher', 'Method', + 'Module', 'Num', 'Object', 'Ordered', 'Ordering', 'OrderingPair', + 'Package', 'Pair', 'Positional', 'Proxy', 'Range', 'Rat', 'Regex', + 'Role', 'Routine', 'Scalar', 'Seq', 'Set', 'Signature', 'Str', 'StrLen', + 'StrPos', 'Sub', 'Submethod', 'True', 'UInt', 'Undef', 'Version', 'Void', + 'Whatever', 'bit', 'bool', 'buf', 'buf1', 'buf16', 'buf2', 'buf32', + 'buf4', 'buf64', 'buf8', 'complex', 'int', 'int1', 'int16', 'int2', + 'int32', 'int4', 'int64', 'int8', 'num', 'rat', 'rat1', 'rat16', 'rat2', + 'rat32', 'rat4', 'rat64', 'rat8', 'uint', 'uint1', 'uint16', 'uint2', + 'uint32', 'uint4', 'uint64', 'uint8', 'utf16', 'utf32', 'utf8', + ) + + PERL6_OPERATORS = ( + 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div', + 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm', + 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx', + '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^', + '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&', + 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^', + '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^', + '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv', + '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so', + 'not', '<==', '==>', '<<==', '==>>', + ) + + # Perl 6 has a *lot* of possible bracketing characters + # this list was lifted from STD.pm6 (https://github.com/perl6/std) + PERL6_BRACKETS = { + u'\u0028': u'\u0029', u'\u003c': u'\u003e', u'\u005b': u'\u005d', + u'\u007b': u'\u007d', u'\u00ab': u'\u00bb', u'\u0f3a': u'\u0f3b', + u'\u0f3c': u'\u0f3d', u'\u169b': u'\u169c', u'\u2018': u'\u2019', + u'\u201a': u'\u2019', u'\u201b': u'\u2019', u'\u201c': u'\u201d', + u'\u201e': u'\u201d', u'\u201f': u'\u201d', u'\u2039': u'\u203a', + u'\u2045': u'\u2046', u'\u207d': u'\u207e', u'\u208d': u'\u208e', + u'\u2208': u'\u220b', u'\u2209': u'\u220c', u'\u220a': u'\u220d', + u'\u2215': u'\u29f5', u'\u223c': u'\u223d', u'\u2243': u'\u22cd', + u'\u2252': u'\u2253', u'\u2254': u'\u2255', u'\u2264': u'\u2265', + u'\u2266': u'\u2267', u'\u2268': u'\u2269', u'\u226a': u'\u226b', + u'\u226e': u'\u226f', u'\u2270': u'\u2271', u'\u2272': u'\u2273', + u'\u2274': u'\u2275', u'\u2276': u'\u2277', u'\u2278': u'\u2279', + u'\u227a': u'\u227b', u'\u227c': u'\u227d', u'\u227e': u'\u227f', + u'\u2280': u'\u2281', u'\u2282': u'\u2283', u'\u2284': u'\u2285', + u'\u2286': u'\u2287', u'\u2288': u'\u2289', u'\u228a': u'\u228b', + u'\u228f': u'\u2290', u'\u2291': u'\u2292', u'\u2298': u'\u29b8', + u'\u22a2': u'\u22a3', u'\u22a6': u'\u2ade', u'\u22a8': u'\u2ae4', + u'\u22a9': u'\u2ae3', u'\u22ab': u'\u2ae5', u'\u22b0': u'\u22b1', + u'\u22b2': u'\u22b3', u'\u22b4': u'\u22b5', u'\u22b6': u'\u22b7', + u'\u22c9': u'\u22ca', u'\u22cb': u'\u22cc', u'\u22d0': u'\u22d1', + u'\u22d6': u'\u22d7', u'\u22d8': u'\u22d9', u'\u22da': u'\u22db', + u'\u22dc': u'\u22dd', u'\u22de': u'\u22df', u'\u22e0': u'\u22e1', + u'\u22e2': u'\u22e3', u'\u22e4': u'\u22e5', u'\u22e6': u'\u22e7', + u'\u22e8': u'\u22e9', u'\u22ea': u'\u22eb', u'\u22ec': u'\u22ed', + u'\u22f0': u'\u22f1', u'\u22f2': u'\u22fa', u'\u22f3': u'\u22fb', + u'\u22f4': u'\u22fc', u'\u22f6': u'\u22fd', u'\u22f7': u'\u22fe', + u'\u2308': u'\u2309', u'\u230a': u'\u230b', u'\u2329': u'\u232a', + u'\u23b4': u'\u23b5', u'\u2768': u'\u2769', u'\u276a': u'\u276b', + u'\u276c': u'\u276d', u'\u276e': u'\u276f', u'\u2770': u'\u2771', + u'\u2772': u'\u2773', u'\u2774': u'\u2775', u'\u27c3': u'\u27c4', + u'\u27c5': u'\u27c6', u'\u27d5': u'\u27d6', u'\u27dd': u'\u27de', + u'\u27e2': u'\u27e3', u'\u27e4': u'\u27e5', u'\u27e6': u'\u27e7', + u'\u27e8': u'\u27e9', u'\u27ea': u'\u27eb', u'\u2983': u'\u2984', + u'\u2985': u'\u2986', u'\u2987': u'\u2988', u'\u2989': u'\u298a', + u'\u298b': u'\u298c', u'\u298d': u'\u298e', u'\u298f': u'\u2990', + u'\u2991': u'\u2992', u'\u2993': u'\u2994', u'\u2995': u'\u2996', + u'\u2997': u'\u2998', u'\u29c0': u'\u29c1', u'\u29c4': u'\u29c5', + u'\u29cf': u'\u29d0', u'\u29d1': u'\u29d2', u'\u29d4': u'\u29d5', + u'\u29d8': u'\u29d9', u'\u29da': u'\u29db', u'\u29f8': u'\u29f9', + u'\u29fc': u'\u29fd', u'\u2a2b': u'\u2a2c', u'\u2a2d': u'\u2a2e', + u'\u2a34': u'\u2a35', u'\u2a3c': u'\u2a3d', u'\u2a64': u'\u2a65', + u'\u2a79': u'\u2a7a', u'\u2a7d': u'\u2a7e', u'\u2a7f': u'\u2a80', + u'\u2a81': u'\u2a82', u'\u2a83': u'\u2a84', u'\u2a8b': u'\u2a8c', + u'\u2a91': u'\u2a92', u'\u2a93': u'\u2a94', u'\u2a95': u'\u2a96', + u'\u2a97': u'\u2a98', u'\u2a99': u'\u2a9a', u'\u2a9b': u'\u2a9c', + u'\u2aa1': u'\u2aa2', u'\u2aa6': u'\u2aa7', u'\u2aa8': u'\u2aa9', + u'\u2aaa': u'\u2aab', u'\u2aac': u'\u2aad', u'\u2aaf': u'\u2ab0', + u'\u2ab3': u'\u2ab4', u'\u2abb': u'\u2abc', u'\u2abd': u'\u2abe', + u'\u2abf': u'\u2ac0', u'\u2ac1': u'\u2ac2', u'\u2ac3': u'\u2ac4', + u'\u2ac5': u'\u2ac6', u'\u2acd': u'\u2ace', u'\u2acf': u'\u2ad0', + u'\u2ad1': u'\u2ad2', u'\u2ad3': u'\u2ad4', u'\u2ad5': u'\u2ad6', + u'\u2aec': u'\u2aed', u'\u2af7': u'\u2af8', u'\u2af9': u'\u2afa', + u'\u2e02': u'\u2e03', u'\u2e04': u'\u2e05', u'\u2e09': u'\u2e0a', + u'\u2e0c': u'\u2e0d', u'\u2e1c': u'\u2e1d', u'\u2e20': u'\u2e21', + u'\u3008': u'\u3009', u'\u300a': u'\u300b', u'\u300c': u'\u300d', + u'\u300e': u'\u300f', u'\u3010': u'\u3011', u'\u3014': u'\u3015', + u'\u3016': u'\u3017', u'\u3018': u'\u3019', u'\u301a': u'\u301b', + u'\u301d': u'\u301e', u'\ufd3e': u'\ufd3f', u'\ufe17': u'\ufe18', + u'\ufe35': u'\ufe36', u'\ufe37': u'\ufe38', u'\ufe39': u'\ufe3a', + u'\ufe3b': u'\ufe3c', u'\ufe3d': u'\ufe3e', u'\ufe3f': u'\ufe40', + u'\ufe41': u'\ufe42', u'\ufe43': u'\ufe44', u'\ufe47': u'\ufe48', + u'\ufe59': u'\ufe5a', u'\ufe5b': u'\ufe5c', u'\ufe5d': u'\ufe5e', + u'\uff08': u'\uff09', u'\uff1c': u'\uff1e', u'\uff3b': u'\uff3d', + u'\uff5b': u'\uff5d', u'\uff5f': u'\uff60', u'\uff62': u'\uff63', + } + + def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''): + if boundary_regex_fragment is None: + return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \ + suffix + r')\b' + else: + return r'(? 0: + next_open_pos = text.find(opening_chars, search_pos + n_chars) + next_close_pos = text.find(closing_chars, search_pos + n_chars) + + if next_close_pos == -1: + next_close_pos = len(text) + nesting_level = 0 + elif next_open_pos != -1 and next_open_pos < next_close_pos: + nesting_level += 1 + search_pos = next_open_pos + else: # next_close_pos < next_open_pos + nesting_level -= 1 + search_pos = next_close_pos + + end_pos = next_close_pos + + if end_pos < 0: # if we didn't find a closer, just highlight the + # rest of the text in this class + end_pos = len(text) + + if adverbs is not None and re.search(r':to\b', adverbs): + heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos] + end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + + r'\s*$', text[end_pos:], re.MULTILINE) + + if end_heredoc: + end_pos += end_heredoc.end() + else: + end_pos = len(text) + + yield match.start(), token_class, text[match.start():end_pos + n_chars] + context.pos = end_pos + n_chars + + return callback + + def opening_brace_callback(lexer, match, context): + stack = context.stack + + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + + # if we encounter an opening brace and we're one level + # below a token state, it means we need to increment + # the nesting level for braces so we know later when + # we should return to the token rules. + if len(stack) > 2 and stack[-2] == 'token': + context.perl6_token_nesting_level += 1 + + def closing_brace_callback(lexer, match, context): + stack = context.stack + + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + + # if we encounter a free closing brace and we're one level + # below a token state, it means we need to check the nesting + # level to see if we need to return to the token state. + if len(stack) > 2 and stack[-2] == 'token': + context.perl6_token_nesting_level -= 1 + if context.perl6_token_nesting_level == 0: + stack.pop() + + def embedded_perl6_callback(lexer, match, context): + context.perl6_token_nesting_level = 1 + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + context.stack.append('root') + + # If you're modifying these rules, be careful if you need to process '{' or '}' + # characters. We have special logic for processing these characters (due to the fact + # that you can nest Perl 6 code in regex blocks), so if you need to process one of + # them, make sure you also process the corresponding one! + tokens = { + 'common': [ + (r'#[`|=](?P(?P[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', + brackets_callback(Comment.Multiline)), + (r'#[^\n]*$', Comment.Singleline), + (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline), + (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline), + (r'^=.*?\n\s*?\n', Comment.Multiline), + (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', + bygroups(Keyword, Name), 'token-sym-brackets'), + (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + ')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', + bygroups(Keyword, Name), 'pre-token'), + # deal with a special case in the Perl 6 grammar (role q { ... }) + (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)), + (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword), + (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'), + Name.Builtin), + (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin), + # copied from PerlLexer + (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', + Name.Variable), + (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), + (r'::\?\w+', Name.Variable.Global), + (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', + Name.Variable.Global), + (r'\$(?:<.*?>)+', Name.Variable), + (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P:[\w\s:]+)?\s*(?P(?P[^0-9a-zA-Z:\s])' + r'(?P=first_char)*)', brackets_callback(String)), + # copied from PerlLexer + (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), + (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), + (r'0b[01]+(_[01]+)*', Number.Bin), + (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', + Number.Float), + (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), + (r'\d+(_\d+)*', Number.Integer), + (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex), + (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex), + (r'm\w+(?=\()', Name), + (r'(?:m|ms|rx)\s*(?P:[\w\s:]+)?\s*(?P(?P[^\w:\s])' + r'(?P=first_char)*)', brackets_callback(String.Regex)), + (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', + String.Regex), + (r'<[^\s=].*?\S>', String), + (_build_word_match(PERL6_OPERATORS), Operator), + (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name), + (r"'(\\\\|\\[^\\]|[^'\\])*'", String), + (r'"(\\\\|\\[^\\]|[^"\\])*"', String), + ], + 'root': [ + include('common'), + (r'\{', opening_brace_callback), + (r'\}', closing_brace_callback), + (r'.+?', Text), + ], + 'pre-token': [ + include('common'), + (r'\{', Text, ('#pop', 'token')), + (r'.+?', Text), + ], + 'token-sym-brackets': [ + (r'(?P(?P[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', + brackets_callback(Name), ('#pop', 'pre-token')), + default(('#pop', 'pre-token')), + ], + 'token': [ + (r'\}', Text, '#pop'), + (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)), + # make sure that quotes in character classes aren't treated as strings + (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex), + # make sure that '#' characters in quotes aren't treated as comments + (r"(?my|our)\s+)?(?:module|class|role|enum|grammar)', line) + if class_decl: + if saw_perl_decl or class_decl.group('scope') is not None: + return True + rating = 0.05 + continue + break + + return rating + + def __init__(self, **options): + super(Perl6Lexer, self).__init__(**options) + self.encoding = options.get('encoding', 'utf-8') diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/php.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/php.py new file mode 100644 index 0000000..08dfa6a --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/php.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.php + ~~~~~~~~~~~~~~~~~~~ + + Lexers for PHP and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, using, this +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Other +from pygments.util import get_bool_opt, get_list_opt, iteritems + +__all__ = ['ZephirLexer', 'PhpLexer'] + + +class ZephirLexer(RegexLexer): + """ + For `Zephir language `_ source code. + + Zephir is a compiled high level language aimed + to the creation of C-extensions for PHP. + + .. versionadded:: 2.0 + """ + + name = 'Zephir' + aliases = ['zephir'] + filenames = ['*.zep'] + + zephir_keywords = ['fetch', 'echo', 'isset', 'empty'] + zephir_type = ['bit', 'bits', 'string'] + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'([gim]+\b|\B)', String.Regex, '#pop'), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + (r'^(?=\s|/|', Punctuation), + (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' + r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double), + (r"'(?:''|[^'])*'", String.Atom), # quoted atom + # Needs to not be followed by an atom. + # (r'=(?=\s|[a-zA-Z\[])', Operator), + (r'is\b', Operator), + (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])', + Operator), + (r'(mod|div|not)\b', Operator), + (r'_', Keyword), # The don't-care variable + (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)), + (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' + u'(\\s*)(:-|-->)', + bygroups(Name.Function, Text, Operator)), # function defn + (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' + u'(\\s*)(\\()', + bygroups(Name.Function, Text, Punctuation)), + (u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*', + String.Atom), # atom, characters + # This one includes ! + (u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+', + String.Atom), # atom, graphics + (r'[A-Z_]\w*', Name.Variable), + (u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text), + ], + 'nested-comment': [ + (r'\*/', Comment.Multiline, '#pop'), + (r'/\*', Comment.Multiline, '#push'), + (r'[^*/]+', Comment.Multiline), + (r'[*/]', Comment.Multiline), + ], + } + + def analyse_text(text): + return ':-' in text + + +class LogtalkLexer(RegexLexer): + """ + For `Logtalk `_ source code. + + .. versionadded:: 0.10 + """ + + name = 'Logtalk' + aliases = ['logtalk'] + filenames = ['*.lgt', '*.logtalk'] + mimetypes = ['text/x-logtalk'] + + tokens = { + 'root': [ + # Directives + (r'^\s*:-\s', Punctuation, 'directive'), + # Comments + (r'%.*?\n', Comment), + (r'/\*(.|\n)*?\*/', Comment), + # Whitespace + (r'\n', Text), + (r'\s+', Text), + # Numbers + (r"0'.", Number), + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), + # Variables + (r'([A-Z_]\w*)', Name.Variable), + # Event handlers + (r'(after|before)(?=[(])', Keyword), + # Message forwarding handler + (r'forward(?=[(])', Keyword), + # Execution-context methods + (r'(parameter|this|se(lf|nder))(?=[(])', Keyword), + # Reflection + (r'(current_predicate|predicate_property)(?=[(])', Keyword), + # DCGs and term expansion + (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword), + # Entity + (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword), + (r'(object|protocol|category)_property(?=[(])', Keyword), + # Entity relations + (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), + (r'extends_(object|protocol|category)(?=[(])', Keyword), + (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), + (r'(instantiat|specializ)es_class(?=[(])', Keyword), + # Events + (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), + # Flags + (r'(current|set)_logtalk_flag(?=[(])', Keyword), + # Compiling, loading, and library paths + (r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make)(?=[(])', Keyword), + (r'\blogtalk_make\b', Keyword), + # Database + (r'(clause|retract(all)?)(?=[(])', Keyword), + (r'a(bolish|ssert(a|z))(?=[(])', Keyword), + # Control constructs + (r'(ca(ll|tch)|throw)(?=[(])', Keyword), + (r'(fa(il|lse)|true)\b', Keyword), + # All solutions + (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), + # Multi-threading meta-predicates + (r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword), + # Term unification + (r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword), + # Term creation and decomposition + (r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword), + # Evaluable functors + (r'(rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword), + (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), + (r'(floor|truncate|round|ceiling)(?=[(])', Keyword), + # Other arithmetic functors + (r'(cos|a(cos|sin|tan)|exp|log|s(in|qrt))(?=[(])', Keyword), + # Term testing + (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|' + r'ground|acyclic_term)(?=[(])', Keyword), + # Term comparison + (r'compare(?=[(])', Keyword), + # Stream selection and control + (r'(curren|se)t_(in|out)put(?=[(])', Keyword), + (r'(open|close)(?=[(])', Keyword), + (r'flush_output(?=[(])', Keyword), + (r'(at_end_of_stream|flush_output)\b', Keyword), + (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword), + # Character and byte input/output + (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), + (r'\bnl\b', Keyword), + # Term input/output + (r'read(_term)?(?=[(])', Keyword), + (r'write(q|_(canonical|term))?(?=[(])', Keyword), + (r'(current_)?op(?=[(])', Keyword), + (r'(current_)?char_conversion(?=[(])', Keyword), + # Atomic term processing + (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), + (r'(char_code|sub_atom)(?=[(])', Keyword), + (r'number_c(har|ode)s(?=[(])', Keyword), + # Implementation defined hooks functions + (r'(se|curren)t_prolog_flag(?=[(])', Keyword), + (r'\bhalt\b', Keyword), + (r'halt(?=[(])', Keyword), + # Message sending operators + (r'(::|:|\^\^)', Operator), + # External call + (r'[{}]', Keyword), + # Logic and control + (r'(ignore|once)(?=[(])', Keyword), + (r'\brepeat\b', Keyword), + # Sorting + (r'(key)?sort(?=[(])', Keyword), + # Bitwise functors + (r'(>>|<<|/\\|\\\\|\\)', Operator), + # Predicate aliases + (r'\bas\b', Operator), + # Arithemtic evaluation + (r'\bis\b', Keyword), + # Arithemtic comparison + (r'(=:=|=\\=|<|=<|>=|>)', Operator), + # Term creation and decomposition + (r'=\.\.', Operator), + # Term unification + (r'(=|\\=)', Operator), + # Term comparison + (r'(==|\\==|@=<|@<|@>=|@>)', Operator), + # Evaluable functors + (r'(//|[-+*/])', Operator), + (r'\b(e|pi|mod|rem)\b', Operator), + # Other arithemtic functors + (r'\b\*\*\b', Operator), + # DCG rules + (r'-->', Operator), + # Control constructs + (r'([!;]|->)', Operator), + # Logic and control + (r'\\+', Operator), + # Mode operators + (r'[?@]', Operator), + # Existential quantifier + (r'\^', Operator), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # Ponctuation + (r'[()\[\],.|]', Text), + # Atoms + (r"[a-z]\w*", Text), + (r"'", String, 'quoted_atom'), + ], + + 'quoted_atom': [ + (r"''", String), + (r"'", String, '#pop'), + (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), + (r"[^\\'\n]+", String), + (r'\\', String), + ], + + 'directive': [ + # Conditional compilation directives + (r'(el)?if(?=[(])', Keyword, 'root'), + (r'(e(lse|ndif))[.]', Keyword, 'root'), + # Entity directives + (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), + (r'(end_(category|object|protocol))[.]', Keyword, 'root'), + # Predicate scope directives + (r'(public|protected|private)(?=[(])', Keyword, 'root'), + # Other directives + (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), + (r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'), + (r'(built_in|dynamic|synchronized|threaded)[.]', Keyword, 'root'), + (r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|' + r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'), + (r'op(?=[(])', Keyword, 'root'), + (r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'), + (r'[a-z]\w*(?=[(])', Text, 'root'), + (r'[a-z]\w*[.]', Text, 'root'), + ], + + 'entityrelations': [ + (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword), + # Numbers + (r"0'.", Number), + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), + # Variables + (r'([A-Z_]\w*)', Name.Variable), + # Atoms + (r"[a-z]\w*", Text), + (r"'", String, 'quoted_atom'), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # End of entity-opening directive + (r'([)]\.)', Text, 'root'), + # Scope operator + (r'(::)', Operator), + # Ponctuation + (r'[()\[\],.|]', Text), + # Comments + (r'%.*?\n', Comment), + (r'/\*(.|\n)*?\*/', Comment), + # Whitespace + (r'\n', Text), + (r'\s+', Text), + ] + } + + def analyse_text(text): + if ':- object(' in text: + return 1.0 + elif ':- protocol(' in text: + return 1.0 + elif ':- category(' in text: + return 1.0 + elif re.search('^:-\s[a-z]', text, re.M): + return 0.9 + else: + return 0.0 diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/python.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/python.py new file mode 100644 index 0000000..259d1a9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/python.py @@ -0,0 +1,833 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.python + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Python and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \ + default, words, combined, do_insertions +from pygments.util import get_bool_opt, shebang_matches +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Other, Error +from pygments import unistring as uni + +__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', + 'Python3Lexer', 'Python3TracebackLexer', 'CythonLexer', + 'DgLexer', 'NumPyLexer'] + +line_re = re.compile('.*?\n') + + +class PythonLexer(RegexLexer): + """ + For `Python `_ source code. + """ + + name = 'Python' + aliases = ['python', 'py', 'sage'] + filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'] + mimetypes = ['text/x-python', 'application/x-python'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)), + (r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)), + (r'[^\S\n]+', Text), + (r'#.*$', Comment), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), + include('keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('builtins'), + include('backtick'), + ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), + ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), + ('[uU]?"""', String, combined('stringescape', 'tdqs')), + ("[uU]?'''", String, combined('stringescape', 'tsqs')), + ('[uU]?"', String, combined('stringescape', 'dqs')), + ("[uU]?'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', + 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass', + 'print', 'raise', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', + 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', + 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', + 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', + 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', + 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', + 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce', + 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', + 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', + 'unichr', 'unicode', 'vars', 'xrange', 'zip'), + prefix=r'(?`_ source code (version 3.0). + + .. versionadded:: 0.10 + """ + + name = 'Python 3' + aliases = ['python3', 'py3'] + filenames = [] # Nothing until Python 3 gets widespread + mimetypes = ['text/x-python3', 'application/x-python3'] + + flags = re.MULTILINE | re.UNICODE + + uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) + + tokens = PythonLexer.tokens.copy() + tokens['keywords'] = [ + (words(( + 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', + 'finally', 'for', 'global', 'if', 'lambda', 'pass', 'raise', + 'nonlocal', 'return', 'try', 'while', 'yield', 'yield from', 'as', + 'with', 'True', 'False', 'None'), suffix=r'\b'), + Keyword), + ] + tokens['builtins'] = [ + (words(( + '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes', + 'chr', 'classmethod', 'cmp', 'compile', 'complex', 'delattr', 'dict', + 'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format', + 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', + 'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list', + 'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct', + 'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed', + 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', + 'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?>> a = 'foo' + >>> print a + foo + >>> 1 / 0 + Traceback (most recent call last): + File "", line 1, in + ZeroDivisionError: integer division or modulo by zero + + Additional options: + + `python3` + Use Python 3 lexer for code. Default is ``False``. + + .. versionadded:: 1.0 + """ + name = 'Python console session' + aliases = ['pycon'] + mimetypes = ['text/x-python-doctest'] + + def __init__(self, **options): + self.python3 = get_bool_opt(options, 'python3', False) + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + if self.python3: + pylexer = Python3Lexer(**self.options) + tblexer = Python3TracebackLexer(**self.options) + else: + pylexer = PythonLexer(**self.options) + tblexer = PythonTracebackLexer(**self.options) + + curcode = '' + insertions = [] + curtb = '' + tbindex = 0 + tb = 0 + for match in line_re.finditer(text): + line = match.group() + if line.startswith(u'>>> ') or line.startswith(u'... '): + tb = 0 + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:4])])) + curcode += line[4:] + elif line.rstrip() == u'...' and not tb: + # only a new >>> prompt can end an exception block + # otherwise an ellipsis in place of the traceback frames + # will be mishandled + insertions.append((len(curcode), + [(0, Generic.Prompt, u'...')])) + curcode += line[3:] + else: + if curcode: + for item in do_insertions( + insertions, pylexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + if (line.startswith(u'Traceback (most recent call last):') or + re.match(u' File "[^"]+", line \\d+\\n$', line)): + tb = 1 + curtb = line + tbindex = match.start() + elif line == 'KeyboardInterrupt\n': + yield match.start(), Name.Class, line + elif tb: + curtb += line + if not (line.startswith(' ') or line.strip() == u'...'): + tb = 0 + for i, t, v in tblexer.get_tokens_unprocessed(curtb): + yield tbindex+i, t, v + else: + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions(insertions, + pylexer.get_tokens_unprocessed(curcode)): + yield item + if curtb: + for i, t, v in tblexer.get_tokens_unprocessed(curtb): + yield tbindex+i, t, v + + +class PythonTracebackLexer(RegexLexer): + """ + For Python tracebacks. + + .. versionadded:: 0.7 + """ + + name = 'Python Traceback' + aliases = ['pytb'] + filenames = ['*.pytb'] + mimetypes = ['text/x-python-traceback'] + + tokens = { + 'root': [ + (r'^Traceback \(most recent call last\):\n', + Generic.Traceback, 'intb'), + # SyntaxError starts with this. + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text)), + (r'^( )(.+)(\n)', + bygroups(Text, using(PythonLexer), Text)), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Text)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Text), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Text), '#pop') + ], + } + + +class Python3TracebackLexer(RegexLexer): + """ + For Python 3.0 tracebacks, with support for chained exceptions. + + .. versionadded:: 1.0 + """ + + name = 'Python 3.0 Traceback' + aliases = ['py3tb'] + filenames = ['*.py3tb'] + mimetypes = ['text/x-python3-traceback'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), + (r'^During handling of the above exception, another ' + r'exception occurred:\n\n', Generic.Traceback), + (r'^The above exception was the direct cause of the ' + r'following exception:\n\n', Generic.Traceback), + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text)), + (r'^( )(.+)(\n)', + bygroups(Text, using(Python3Lexer), Text)), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Text)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Text), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Text), '#pop') + ], + } + + +class CythonLexer(RegexLexer): + """ + For Pyrex and `Cython `_ source code. + + .. versionadded:: 1.1 + """ + + name = 'Cython' + aliases = ['cython', 'pyx', 'pyrex'] + filenames = ['*.pyx', '*.pxd', '*.pxi'] + mimetypes = ['text/x-cython', 'application/x-cython'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)), + (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)), + (r'[^\S\n]+', Text), + (r'#.*$', Comment), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'(<)([a-zA-Z0-9.?]+)(>)', + bygroups(Punctuation, Keyword.Type, Punctuation)), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), + (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', + bygroups(Keyword, Number.Integer, Operator, Name, Operator, + Name, Punctuation)), + include('keywords'), + (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), + (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), + (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), + (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), + include('builtins'), + include('backtick'), + ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), + ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), + ('[uU]?"""', String, combined('stringescape', 'tdqs')), + ("[uU]?'''", String, combined('stringescape', 'tsqs')), + ('[uU]?"', String, combined('stringescape', 'dqs')), + ("[uU]?'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', + 'else', 'except', 'except?', 'exec', 'finally', 'for', 'gil', + 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), + Keyword), + (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', + 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', + 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', + 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', + 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', + 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', + 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', + 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', + 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', + 'vars', 'xrange', 'zip'), prefix=r'(?`_, + a functional and object-oriented programming language + running on the CPython 3 VM. + + .. versionadded:: 1.6 + """ + name = 'dg' + aliases = ['dg'] + filenames = ['*.dg'] + mimetypes = ['text/x-dg'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?$', Comment.Single), + + (r'(?i)0b[01]+', Number.Bin), + (r'(?i)0o[0-7]+', Number.Oct), + (r'(?i)0x[0-9a-f]+', Number.Hex), + (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float), + (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float), + (r'(?i)[+-]?[0-9]+j?', Number.Integer), + + (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')), + (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')), + (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')), + (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')), + + (r"`\w+'*`", Operator), + (r'\b(and|in|is|or|where)\b', Operator.Word), + (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator), + + (words(( + 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'', + 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object', + 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', 'super', + 'tuple', 'tuple\'', 'type'), prefix=r'(?') or line.startswith('+'): + # Colorize the prompt as such, + # then put rest of line into current_code_block + insertions.append((len(current_code_block), + [(0, Generic.Prompt, line[:2])])) + current_code_block += line[2:] + else: + # We have reached a non-prompt line! + # If we have stored prompt lines, need to process them first. + if current_code_block: + # Weave together the prompts and highlight code. + for item in do_insertions( + insertions, slexer.get_tokens_unprocessed(current_code_block)): + yield item + # Reset vars for next code block. + current_code_block = '' + insertions = [] + # Now process the actual line itself, this is output from R. + yield match.start(), Generic.Output, line + + # If we happen to end on a code block with nothing after it, need to + # process the last code block. This is neither elegant nor DRY so + # should be changed. + if current_code_block: + for item in do_insertions( + insertions, slexer.get_tokens_unprocessed(current_code_block)): + yield item + + +class SLexer(RegexLexer): + """ + For S, S-plus, and R source code. + + .. versionadded:: 0.10 + """ + + name = 'S' + aliases = ['splus', 's', 'r'] + filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'] + mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', + 'text/x-R', 'text/x-r-history', 'text/x-r-profile'] + + builtins_base = ( + 'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE', + 'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf', + 'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame', + 'Math.difftime', 'Math.factor', 'Mod', 'NA_character_', + 'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN', + 'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame', + 'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered', + 'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string', + 'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall', + 'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt', + 'Summary.data.frame', 'Summary.difftime', 'Summary.factor', + 'Summary.numeric_version', 'Summary.ordered', 'Sys.Date', + 'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid', + 'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink', + 'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep', + 'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv', + 'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs', + 'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist', + 'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character', + 'all.equal.default', 'all.equal.factor', 'all.equal.formula', + 'all.equal.language', 'all.equal.list', 'all.equal.numeric', + 'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated', + 'anyDuplicated.array', 'anyDuplicated.data.frame', + 'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm', + 'aperm.default', 'aperm.table', 'append', 'apply', 'args', + 'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt', + 'as.Date.character', 'as.Date.date', 'as.Date.dates', + 'as.Date.default', 'as.Date.factor', 'as.Date.numeric', + 'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt', + 'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default', + 'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date', + 'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date', + 'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor', + 'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call', + 'as.character', 'as.character.Date', 'as.character.POSIXt', + 'as.character.condition', 'as.character.default', + 'as.character.error', 'as.character.factor', 'as.character.hexmode', + 'as.character.numeric_version', 'as.character.octmode', + 'as.character.srcref', 'as.complex', 'as.data.frame', + 'as.data.frame.AsIs', 'as.data.frame.Date', 'as.data.frame.POSIXct', + 'as.data.frame.POSIXlt', 'as.data.frame.array', + 'as.data.frame.character', 'as.data.frame.complex', + 'as.data.frame.data.frame', 'as.data.frame.default', + 'as.data.frame.difftime', 'as.data.frame.factor', + 'as.data.frame.integer', 'as.data.frame.list', + 'as.data.frame.logical', 'as.data.frame.matrix', + 'as.data.frame.model.matrix', 'as.data.frame.numeric', + 'as.data.frame.numeric_version', 'as.data.frame.ordered', + 'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts', + 'as.data.frame.vector', 'as.difftime', 'as.double', + 'as.double.POSIXlt', 'as.double.difftime', 'as.environment', + 'as.expression', 'as.expression.default', 'as.factor', + 'as.function', 'as.function.default', 'as.hexmode', 'as.integer', + 'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame', + 'as.list.default', 'as.list.environment', 'as.list.factor', + 'as.list.function', 'as.list.numeric_version', 'as.logical', + 'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt', + 'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote', + 'as.name', 'as.null', 'as.null.default', 'as.numeric', + 'as.numeric_version', 'as.octmode', 'as.ordered', + 'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single', + 'as.single.default', 'as.symbol', 'as.table', 'as.table.default', + 'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4', + 'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh', + 'attachNamespace', 'attr', 'attr.all.equal', 'attributes', + 'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename', + 'besselI', 'besselJ', 'besselK', 'besselY', 'beta', + 'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd', + 'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body', + 'bquote', 'browser', 'browserCondition', 'browserSetDebug', + 'browserText', 'builtins', 'by', 'by.data.frame', 'by.default', + 'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote', + 'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold', + 'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling', + 'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones', + 'chol', 'chol.default', 'chol2inv', 'choose', 'class', + 'clearPushBack', 'close', 'close.connection', 'close.srcfile', + 'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans', + 'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts', + 'conditionCall', 'conditionCall.condition', 'conditionMessage', + 'conditionMessage.condition', 'conflicts', 'contributors', 'cos', + 'cosh', 'crossprod', 'cummax', 'cummin', 'cumprod', 'cumsum', 'cut', + 'cut.Date', 'cut.POSIXt', 'cut.default', 'dQuote', 'data.class', + 'data.matrix', 'date', 'debug', 'debugonce', + 'default.stringsAsFactors', 'delayedAssign', 'deparse', 'det', + 'determinant', 'determinant.matrix', 'dget', 'diag', 'diff', + 'diff.Date', 'diff.POSIXt', 'diff.default', 'difftime', 'digamma', + 'dim', 'dim.data.frame', 'dimnames', 'dimnames.data.frame', 'dir', + 'dir.create', 'dirname', 'do.call', 'dput', 'drop', 'droplevels', + 'droplevels.data.frame', 'droplevels.factor', 'dump', 'duplicated', + 'duplicated.POSIXlt', 'duplicated.array', 'duplicated.data.frame', + 'duplicated.default', 'duplicated.matrix', + 'duplicated.numeric_version', 'dyn.load', 'dyn.unload', 'eapply', + 'eigen', 'else', 'emptyenv', 'enc2native', 'enc2utf8', + 'encodeString', 'enquote', 'env.profile', 'environment', + 'environmentIsLocked', 'environmentName', 'eval', 'eval.parent', + 'evalq', 'exists', 'exp', 'expand.grid', 'expm1', 'expression', + 'factor', 'factorial', 'fifo', 'file', 'file.access', 'file.append', + 'file.choose', 'file.copy', 'file.create', 'file.exists', + 'file.info', 'file.link', 'file.path', 'file.remove', 'file.rename', + 'file.show', 'file.symlink', 'find.package', 'findInterval', + 'findPackageEnv', 'findRestart', 'floor', 'flush', + 'flush.connection', 'force', 'formals', 'format', + 'format.AsIs', 'format.Date', 'format.POSIXct', 'format.POSIXlt', + 'format.data.frame', 'format.default', 'format.difftime', + 'format.factor', 'format.hexmode', 'format.info', + 'format.libraryIQR', 'format.numeric_version', 'format.octmode', + 'format.packageInfo', 'format.pval', 'format.summaryDefault', + 'formatC', 'formatDL', 'forwardsolve', 'gamma', 'gc', 'gc.time', + 'gcinfo', 'gctorture', 'gctorture2', 'get', 'getAllConnections', + 'getCallingDLL', 'getCallingDLLe', 'getConnection', + 'getDLLRegisteredRoutines', 'getDLLRegisteredRoutines.DLLInfo', + 'getDLLRegisteredRoutines.character', 'getElement', + 'getExportedValue', 'getHook', 'getLoadedDLLs', 'getNamespace', + 'getNamespaceExports', 'getNamespaceImports', 'getNamespaceInfo', + 'getNamespaceName', 'getNamespaceUsers', 'getNamespaceVersion', + 'getNativeSymbolInfo', 'getOption', 'getRversion', 'getSrcLines', + 'getTaskCallbackNames', 'geterrmessage', 'gettext', 'gettextf', + 'getwd', 'gl', 'globalenv', 'gregexpr', 'grep', 'grepRaw', 'grepl', + 'gsub', 'gzcon', 'gzfile', 'head', 'iconv', 'iconvlist', + 'icuSetCollate', 'identical', 'identity', 'ifelse', 'importIntoEnv', + 'in', 'inherits', 'intToBits', 'intToUtf8', 'interaction', 'interactive', + 'intersect', 'inverse.rle', 'invisible', 'invokeRestart', + 'invokeRestartInteractively', 'is.R', 'is.array', 'is.atomic', + 'is.call', 'is.character', 'is.complex', 'is.data.frame', + 'is.double', 'is.element', 'is.environment', 'is.expression', + 'is.factor', 'is.finite', 'is.function', 'is.infinite', + 'is.integer', 'is.language', 'is.list', 'is.loaded', 'is.logical', + 'is.matrix', 'is.na', 'is.na.POSIXlt', 'is.na.data.frame', + 'is.na.numeric_version', 'is.name', 'is.nan', 'is.null', + 'is.numeric', 'is.numeric.Date', 'is.numeric.POSIXt', + 'is.numeric.difftime', 'is.numeric_version', 'is.object', + 'is.ordered', 'is.package_version', 'is.pairlist', 'is.primitive', + 'is.qr', 'is.raw', 'is.recursive', 'is.single', 'is.symbol', + 'is.table', 'is.unsorted', 'is.vector', 'isBaseNamespace', + 'isIncomplete', 'isNamespace', 'isOpen', 'isRestart', 'isS4', + 'isSeekable', 'isSymmetric', 'isSymmetric.matrix', 'isTRUE', + 'isatty', 'isdebugged', 'jitter', 'julian', 'julian.Date', + 'julian.POSIXt', 'kappa', 'kappa.default', 'kappa.lm', 'kappa.qr', + 'kronecker', 'l10n_info', 'labels', 'labels.default', 'lapply', + 'lazyLoad', 'lazyLoadDBexec', 'lazyLoadDBfetch', 'lbeta', 'lchoose', + 'length', 'length.POSIXlt', 'letters', 'levels', 'levels.default', + 'lfactorial', 'lgamma', 'library.dynam', 'library.dynam.unload', + 'licence', 'license', 'list.dirs', 'list.files', 'list2env', 'load', + 'loadNamespace', 'loadedNamespaces', 'loadingNamespaceInfo', + 'local', 'lockBinding', 'lockEnvironment', 'log', 'log10', 'log1p', + 'log2', 'logb', 'lower.tri', 'ls', 'make.names', 'make.unique', + 'makeActiveBinding', 'mapply', 'margin.table', 'mat.or.vec', + 'match', 'match.arg', 'match.call', 'match.fun', 'max', 'max.col', + 'mean', 'mean.Date', 'mean.POSIXct', 'mean.POSIXlt', 'mean.default', + 'mean.difftime', 'mem.limits', 'memCompress', 'memDecompress', + 'memory.profile', 'merge', 'merge.data.frame', 'merge.default', + 'message', 'mget', 'min', 'missing', 'mode', 'month.abb', + 'month.name', 'months', 'months.Date', 'months.POSIXt', + 'months.abb', 'months.nameletters', 'names', 'names.POSIXlt', + 'namespaceExport', 'namespaceImport', 'namespaceImportClasses', + 'namespaceImportFrom', 'namespaceImportMethods', 'nargs', 'nchar', + 'ncol', 'new.env', 'ngettext', 'nlevels', 'noquote', 'norm', + 'normalizePath', 'nrow', 'numeric_version', 'nzchar', 'objects', + 'oldClass', 'on.exit', 'open', 'open.connection', 'open.srcfile', + 'open.srcfilealias', 'open.srcfilecopy', 'options', 'order', + 'ordered', 'outer', 'packBits', 'packageEvent', + 'packageHasNamespace', 'packageStartupMessage', 'package_version', + 'pairlist', 'parent.env', 'parent.frame', 'parse', + 'parseNamespaceFile', 'paste', 'paste0', 'path.expand', + 'path.package', 'pipe', 'pmatch', 'pmax', 'pmax.int', 'pmin', + 'pmin.int', 'polyroot', 'pos.to.env', 'pretty', 'pretty.default', + 'prettyNum', 'print', 'print.AsIs', 'print.DLLInfo', + 'print.DLLInfoList', 'print.DLLRegisteredRoutines', 'print.Date', + 'print.NativeRoutineList', 'print.POSIXct', 'print.POSIXlt', + 'print.by', 'print.condition', 'print.connection', + 'print.data.frame', 'print.default', 'print.difftime', + 'print.factor', 'print.function', 'print.hexmode', + 'print.libraryIQR', 'print.listof', 'print.noquote', + 'print.numeric_version', 'print.octmode', 'print.packageInfo', + 'print.proc_time', 'print.restart', 'print.rle', + 'print.simple.list', 'print.srcfile', 'print.srcref', + 'print.summary.table', 'print.summaryDefault', 'print.table', + 'print.warnings', 'prmatrix', 'proc.time', 'prod', 'prop.table', + 'provideDimnames', 'psigamma', 'pushBack', 'pushBackLength', 'q', + 'qr', 'qr.Q', 'qr.R', 'qr.X', 'qr.coef', 'qr.default', 'qr.fitted', + 'qr.qty', 'qr.qy', 'qr.resid', 'qr.solve', 'quarters', + 'quarters.Date', 'quarters.POSIXt', 'quit', 'quote', 'range', + 'range.default', 'rank', 'rapply', 'raw', 'rawConnection', + 'rawConnectionValue', 'rawShift', 'rawToBits', 'rawToChar', 'rbind', + 'rbind.data.frame', 'rcond', 'read.dcf', 'readBin', 'readChar', + 'readLines', 'readRDS', 'readRenviron', 'readline', 'reg.finalizer', + 'regexec', 'regexpr', 'registerS3method', 'registerS3methods', + 'regmatches', 'remove', 'removeTaskCallback', 'rep', 'rep.Date', + 'rep.POSIXct', 'rep.POSIXlt', 'rep.factor', 'rep.int', + 'rep.numeric_version', 'rep_len', 'replace', 'replicate', + 'requireNamespace', 'restartDescription', 'restartFormals', + 'retracemem', 'rev', 'rev.default', 'rle', 'rm', 'round', + 'round.Date', 'round.POSIXt', 'row', 'row.names', + 'row.names.data.frame', 'row.names.default', 'rowMeans', 'rowSums', + 'rownames', 'rowsum', 'rowsum.data.frame', 'rowsum.default', + 'sQuote', 'sample', 'sample.int', 'sapply', 'save', 'save.image', + 'saveRDS', 'scale', 'scale.default', 'scan', 'search', + 'searchpaths', 'seek', 'seek.connection', 'seq', 'seq.Date', + 'seq.POSIXt', 'seq.default', 'seq.int', 'seq_along', 'seq_len', + 'sequence', 'serialize', 'set.seed', 'setHook', 'setNamespaceInfo', + 'setSessionTimeLimit', 'setTimeLimit', 'setdiff', 'setequal', + 'setwd', 'shQuote', 'showConnections', 'sign', 'signalCondition', + 'signif', 'simpleCondition', 'simpleError', 'simpleMessage', + 'simpleWarning', 'simplify2array', 'sin', 'single', + 'sinh', 'sink', 'sink.number', 'slice.index', 'socketConnection', + 'socketSelect', 'solve', 'solve.default', 'solve.qr', 'sort', + 'sort.POSIXlt', 'sort.default', 'sort.int', 'sort.list', 'split', + 'split.Date', 'split.POSIXct', 'split.data.frame', 'split.default', + 'sprintf', 'sqrt', 'srcfile', 'srcfilealias', 'srcfilecopy', + 'srcref', 'standardGeneric', 'stderr', 'stdin', 'stdout', 'stop', + 'stopifnot', 'storage.mode', 'strftime', 'strptime', 'strsplit', + 'strtoi', 'strtrim', 'structure', 'strwrap', 'sub', 'subset', + 'subset.data.frame', 'subset.default', 'subset.matrix', + 'substitute', 'substr', 'substring', 'sum', 'summary', + 'summary.Date', 'summary.POSIXct', 'summary.POSIXlt', + 'summary.connection', 'summary.data.frame', 'summary.default', + 'summary.factor', 'summary.matrix', 'summary.proc_time', + 'summary.srcfile', 'summary.srcref', 'summary.table', + 'suppressMessages', 'suppressPackageStartupMessages', + 'suppressWarnings', 'svd', 'sweep', 'sys.call', 'sys.calls', + 'sys.frame', 'sys.frames', 'sys.function', 'sys.load.image', + 'sys.nframe', 'sys.on.exit', 'sys.parent', 'sys.parents', + 'sys.save.image', 'sys.source', 'sys.status', 'system', + 'system.file', 'system.time', 'system2', 't', 't.data.frame', + 't.default', 'table', 'tabulate', 'tail', 'tan', 'tanh', 'tapply', + 'taskCallbackManager', 'tcrossprod', 'tempdir', 'tempfile', + 'testPlatformEquivalence', 'textConnection', 'textConnectionValue', + 'toString', 'toString.default', 'tolower', 'topenv', 'toupper', + 'trace', 'traceback', 'tracemem', 'tracingState', 'transform', + 'transform.data.frame', 'transform.default', 'trigamma', 'trunc', + 'trunc.Date', 'trunc.POSIXt', 'truncate', 'truncate.connection', + 'try', 'tryCatch', 'typeof', 'unclass', 'undebug', 'union', + 'unique', 'unique.POSIXlt', 'unique.array', 'unique.data.frame', + 'unique.default', 'unique.matrix', 'unique.numeric_version', + 'units', 'units.difftime', 'unix.time', 'unlink', 'unlist', + 'unloadNamespace', 'unlockBinding', 'unname', 'unserialize', + 'unsplit', 'untrace', 'untracemem', 'unz', 'upper.tri', 'url', + 'utf8ToInt', 'vapply', 'version', 'warning', 'warnings', 'weekdays', + 'weekdays.Date', 'weekdays.POSIXt', 'which', 'which.max', + 'which.min', 'with', 'with.default', 'withCallingHandlers', + 'withRestarts', 'withVisible', 'within', 'within.data.frame', + 'within.list', 'write', 'write.dcf', 'writeBin', 'writeChar', + 'writeLines', 'xor', 'xor.hexmode', 'xor.octmode', + 'xpdrows.data.frame', 'xtfrm', 'xtfrm.AsIs', 'xtfrm.Date', + 'xtfrm.POSIXct', 'xtfrm.POSIXlt', 'xtfrm.Surv', 'xtfrm.default', + 'xtfrm.difftime', 'xtfrm.factor', 'xtfrm.numeric_version', 'xzfile', + 'zapsmall' + ) + + tokens = { + 'comments': [ + (r'#.*$', Comment.Single), + ], + 'valid_name': [ + (r'[a-zA-Z][\w.]*', Text), + # can begin with ., but not if that is followed by a digit + (r'\.[a-zA-Z_][\w.]*', Text), + ], + 'punctuation': [ + (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation), + ], + 'keywords': [ + (words(builtins_base, suffix=r'(?![\w. =])'), + Keyword.Pseudo), + (r'(if|else|for|while|repeat|in|next|break|return|switch|function)' + r'(?![\w.])', + Keyword.Reserved), + (r'(array|category|character|complex|double|function|integer|list|' + r'logical|matrix|numeric|vector|data.frame|c)' + r'(?![\w.])', + Keyword.Type), + (r'(library|require|attach|detach|source)' + r'(?![\w.])', + Keyword.Namespace) + ], + 'operators': [ + (r'<>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator), + (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator) + ], + 'builtin_symbols': [ + (r'(NULL|NA(_(integer|real|complex|character)_)?|' + r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))' + r'(?![\w.])', + Keyword.Constant), + (r'(T|F)\b', Name.Builtin.Pseudo), + ], + 'numbers': [ + # hex number + (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex), + # decimal number + (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?', + Number), + ], + 'statements': [ + include('comments'), + # whitespaces + (r'\s+', Text), + (r'`.*?`', String.Backtick), + (r'\'', String, 'string_squote'), + (r'\"', String, 'string_dquote'), + include('builtin_symbols'), + include('numbers'), + include('keywords'), + include('punctuation'), + include('operators'), + include('valid_name'), + ], + 'root': [ + include('statements'), + # blocks: + (r'\{|\}', Punctuation), + # (r'\{', Punctuation, 'block'), + (r'.', Text), + ], + # 'block': [ + # include('statements'), + # ('\{', Punctuation, '#push'), + # ('\}', Punctuation, '#pop') + # ], + 'string_squote': [ + (r'([^\'\\]|\\.)*\'', String, '#pop'), + ], + 'string_dquote': [ + (r'([^"\\]|\\.)*"', String, '#pop'), + ], + } + + def analyse_text(text): + if re.search(r'[a-z0-9_\])\s]<-(?!-)', text): + return 0.11 + + +class RdLexer(RegexLexer): + """ + Pygments Lexer for R documentation (Rd) files + + This is a very minimal implementation, highlighting little more + than the macros. A description of Rd syntax is found in `Writing R + Extensions `_ + and `Parsing Rd files `_. + + .. versionadded:: 1.6 + """ + name = 'Rd' + aliases = ['rd'] + filenames = ['*.Rd'] + mimetypes = ['text/x-r-doc'] + + # To account for verbatim / LaTeX-like / and R-like areas + # would require parsing. + tokens = { + 'root': [ + # catch escaped brackets and percent sign + (r'\\[\\{}%]', String.Escape), + # comments + (r'%.*$', Comment), + # special macros with no arguments + (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant), + # macros + (r'\\[a-zA-Z]+\b', Keyword), + # special preprocessor macros + (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc), + # non-escaped brackets + (r'[{}]', Name.Builtin), + # everything else + (r'[^\\%\n{}]+', Text), + (r'.', Text), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rdf.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rdf.py new file mode 100644 index 0000000..292b1ae --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rdf.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.rdf + ~~~~~~~~~~~~~~~~~~~ + + Lexers for semantic web and RDF query languages and markup. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, default +from pygments.token import Keyword, Punctuation, String, Number, Operator, \ + Whitespace, Name, Literal, Comment, Text + +__all__ = ['SparqlLexer'] + + +class SparqlLexer(RegexLexer): + """ + Lexer for `SPARQL `_ query language. + + .. versionadded:: 2.0 + """ + name = 'SPARQL' + aliases = ['sparql'] + filenames = ['*.rq', '*.sparql'] + mimetypes = ['application/sparql-query'] + + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'\s+', Whitespace), + (r'(select|construct|describe|ask|where|filter|group\s+by|minus|' + r'distinct|reduced|from named|from|order\s+by|limit|' + r'offset|bindings|load|clear|drop|create|add|move|copy|' + r'insert\s+data|delete\s+data|delete\s+where|delete|insert|' + r'using named|using|graph|default|named|all|optional|service|' + r'silent|bind|union|not in|in|as|a)', Keyword), + (r'(prefix|base)(\s+)([a-z][\w-]*)(\s*)(\:)', + bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, + Punctuation)), + (r'\?[a-z_]\w*', Name.Variable), + (r'<[^>]+>', Name.Label), + (r'([a-z][\w-]*)(\:)([a-z][\w-]*)', + bygroups(Name.Namespace, Punctuation, Name.Tag)), + (r'(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|' + r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|' + r'contains|strstarts|strends|strbefore|strafter|year|month|day|' + r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|' + r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|' + r'isliteral|isnumeric|regex|substr|replace|exists|not exists|' + r'count|sum|min|max|avg|sample|group_concat|separator)\b', + Name.Function), + (r'(true|false)', Literal), + (r'[+\-]?\d*\.\d+', Number.Float), + (r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float), + (r'[+\-]?\d+', Number.Integer), + (r'(\|\||&&|=|\*|\-|\+|/)', Operator), + (r'[(){}.;,:^]', Punctuation), + (r'#[^\n]+', Comment), + (r'"""', String, 'triple-double-quoted-string'), + (r'"', String, 'single-double-quoted-string'), + (r"'''", String, 'triple-single-quoted-string'), + (r"'", String, 'single-single-quoted-string'), + ], + 'triple-double-quoted-string': [ + (r'"""', String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-double-quoted-string': [ + (r'"', String, 'end-of-string'), + (r'[^"\\\n]+', String), + (r'\\', String, 'string-escape'), + ], + 'triple-single-quoted-string': [ + (r"'''", String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-single-quoted-string': [ + (r"'", String, 'end-of-string'), + (r"[^'\\\n]+", String), + (r'\\', String, 'string-escape'), + ], + 'string-escape': [ + (r'.', String, '#pop'), + ], + 'end-of-string': [ + (r'(@)([a-z]+(:?-[a-z0-9]+)*)', + bygroups(Operator, Name.Function), '#pop:2'), + (r'\^\^', Operator, '#pop:2'), + default('#pop:2'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rebol.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rebol.py new file mode 100644 index 0000000..8ff97ee --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rebol.py @@ -0,0 +1,431 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.rebol + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the REBOL and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Generic, Whitespace + +__all__ = ['RebolLexer', 'RedLexer'] + + +class RebolLexer(RegexLexer): + """ + A `REBOL `_ lexer. + + .. versionadded:: 1.1 + """ + name = 'REBOL' + aliases = ['rebol'] + filenames = ['*.r', '*.r3', '*.reb'] + mimetypes = ['text/x-rebol'] + + flags = re.IGNORECASE | re.MULTILINE + + escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' + + def word_callback(lexer, match): + word = match.group() + + if re.match(".*:$", word): + yield match.start(), Generic.Subheading, word + elif re.match( + r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' + r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' + r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' + r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' + r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' + r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' + r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' + r'while|compress|decompress|secure|open|close|read|read-io|' + r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' + r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' + r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' + r'browse|launch|stats|get-modes|set-modes|to-local-file|' + r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' + r'hide|draw|show|size-text|textinfo|offset-to-caret|' + r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' + r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' + r'dsa-make-key|dsa-generate-key|dsa-make-signature|' + r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' + r'rsa-encrypt)$', word): + yield match.start(), Name.Builtin, word + elif re.match( + r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' + r'minimum|maximum|negate|complement|absolute|random|head|tail|' + r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' + r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' + r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' + r'copy)$', word): + yield match.start(), Name.Function, word + elif re.match( + r'(error|source|input|license|help|install|echo|Usage|with|func|' + r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' + r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' + r'remold|charset|array|replace|move|extract|forskip|forall|alter|' + r'first+|also|take|for|forever|dispatch|attempt|what-dir|' + r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' + r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' + r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' + r'write-user|save-user|set-user-name|protect-system|parse-xml|' + r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' + r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' + r'request-dir|center-face|do-events|net-error|decode-url|' + r'parse-header|parse-header-date|parse-email-addrs|import-email|' + r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' + r'find-key-face|do-face|viewtop|confine|find-window|' + r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' + r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' + r'read-thru|load-thru|do-thru|launch-thru|load-image|' + r'request-download|do-face-alt|set-font|set-para|get-style|' + r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' + r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' + r'resize-face|load-stock|load-stock-block|notify|request|flash|' + r'request-color|request-pass|request-text|request-list|' + r'request-date|request-file|dbug|editor|link-relative-path|' + r'emailer|parse-error)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match( + r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' + r'return|exit|break)$', word): + yield match.start(), Name.Exception, word + elif re.match('REBOL$', word): + yield match.start(), Generic.Heading, word + elif re.match("to-.*", word): + yield match.start(), Keyword, word + elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', + word): + yield match.start(), Operator, word + elif re.match(".*\?$", word): + yield match.start(), Keyword, word + elif re.match(".*\!$", word): + yield match.start(), Keyword.Type, word + elif re.match("'.*", word): + yield match.start(), Name.Variable.Instance, word # lit-word + elif re.match("#.*", word): + yield match.start(), Name.Label, word # issue + elif re.match("%.*", word): + yield match.start(), Name.Decorator, word # file + else: + yield match.start(), Name.Variable, word + + tokens = { + 'root': [ + (r'[^R]+', Comment), + (r'REBOL\s+\[', Generic.Strong, 'script'), + (r'R', Comment) + ], + 'script': [ + (r'\s+', Text), + (r'#"', String.Char, 'char'), + (r'#\{[0-9a-f]*\}', Number.Hex), + (r'2#\{', Number.Hex, 'bin2'), + (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), + (r'"', String, 'string'), + (r'\{', String, 'string2'), + (r';#+.*\n', Comment.Special), + (r';\*+.*\n', Comment.Preproc), + (r';.*\n', Comment), + (r'%"', Name.Decorator, 'stringFile'), + (r'%[^(^{")\s\[\]]+', Name.Decorator), + (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money + (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time + (r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?' + r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date + (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple + (r'\d+X\d+', Keyword.Constant), # pair + (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), + (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), + (r'[+-]?\d+(\'\d+)?', Number), + (r'[\[\]()]', Generic.Strong), + (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url + (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url + (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email + (r'comment\s"', Comment, 'commentString1'), + (r'comment\s\{', Comment, 'commentString2'), + (r'comment\s\[', Comment, 'commentBlock'), + (r'comment\s[^(\s{"\[]+', Comment), + (r'/[^(^{")\s/[\]]*', Name.Attribute), + (r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), + (r'<[\w:.-]*>', Name.Tag), + (r'<[^(<>\s")]+', Name.Tag, 'tag'), + (r'([^(^{")\s]+)', Text), + ], + 'string': [ + (r'[^(^")]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'"', String, '#pop'), + ], + 'string2': [ + (r'[^(^{})]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + 'stringFile': [ + (r'[^(^")]+', Name.Decorator), + (escape_re, Name.Decorator), + (r'\^.', Name.Decorator), + (r'"', Name.Decorator, '#pop'), + ], + 'char': [ + (escape_re + '"', String.Char, '#pop'), + (r'\^."', String.Char, '#pop'), + (r'."', String.Char, '#pop'), + ], + 'tag': [ + (escape_re, Name.Tag), + (r'"', Name.Tag, 'tagString'), + (r'[^(<>\r\n")]+', Name.Tag), + (r'>', Name.Tag, '#pop'), + ], + 'tagString': [ + (r'[^(^")]+', Name.Tag), + (escape_re, Name.Tag), + (r'[(|)]+', Name.Tag), + (r'\^.', Name.Tag), + (r'"', Name.Tag, '#pop'), + ], + 'tuple': [ + (r'(\d+\.)+', Keyword.Constant), + (r'\d+', Keyword.Constant, '#pop'), + ], + 'bin2': [ + (r'\s+', Number.Hex), + (r'([01]\s*){8}', Number.Hex), + (r'\}', Number.Hex, '#pop'), + ], + 'commentString1': [ + (r'[^(^")]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'"', Comment, '#pop'), + ], + 'commentString2': [ + (r'[^(^{})]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'\{', Comment, '#push'), + (r'\}', Comment, '#pop'), + ], + 'commentBlock': [ + (r'\[', Comment, '#push'), + (r'\]', Comment, '#pop'), + (r'"', Comment, "commentString1"), + (r'\{', Comment, "commentString2"), + (r'[^(\[\]"{)]+', Comment), + ], + } + + def analyse_text(text): + """ + Check if code contains REBOL header and so it probably not R code + """ + if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): + # The code starts with REBOL header + return 1.0 + elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE): + # The code contains REBOL header but also some text before it + return 0.5 + + +class RedLexer(RegexLexer): + """ + A `Red-language `_ lexer. + + .. versionadded:: 2.0 + """ + name = 'Red' + aliases = ['red', 'red/system'] + filenames = ['*.red', '*.reds'] + mimetypes = ['text/x-red', 'text/x-red-system'] + + flags = re.IGNORECASE | re.MULTILINE + + escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' + + def word_callback(lexer, match): + word = match.group() + + if re.match(".*:$", word): + yield match.start(), Generic.Subheading, word + elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|' + r'foreach|forall|func|function|does|has|switch|' + r'case|reduce|compose|get|set|print|prin|equal\?|' + r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|' + r'greater-or-equal\?|same\?|not|type\?|stats|' + r'bind|union|replace|charset|routine)$', word): + yield match.start(), Name.Builtin, word + elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|' + r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|' + r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|' + r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|' + r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|' + r'update|write)$', word): + yield match.start(), Name.Function, word + elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|' + r'none|crlf|dot|null-byte)$', word): + yield match.start(), Name.Builtin.Pseudo, word + elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|' + r'#switch|#default|#get-definition)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|' + r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|' + r'quote|forever)$', word): + yield match.start(), Name.Exception, word + elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|' + r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|' + r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|' + r'any-struct\?|none\?|word\?|any-series\?)$', word): + yield match.start(), Keyword, word + elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match("to-.*", word): + yield match.start(), Keyword, word + elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|' + '<<<|>>>|<<|>>|<|>%)$', word): + yield match.start(), Operator, word + elif re.match(".*\!$", word): + yield match.start(), Keyword.Type, word + elif re.match("'.*", word): + yield match.start(), Name.Variable.Instance, word # lit-word + elif re.match("#.*", word): + yield match.start(), Name.Label, word # issue + elif re.match("%.*", word): + yield match.start(), Name.Decorator, word # file + elif re.match(":.*", word): + yield match.start(), Generic.Subheading, word # get-word + else: + yield match.start(), Name.Variable, word + + tokens = { + 'root': [ + (r'[^R]+', Comment), + (r'Red/System\s+\[', Generic.Strong, 'script'), + (r'Red\s+\[', Generic.Strong, 'script'), + (r'R', Comment) + ], + 'script': [ + (r'\s+', Text), + (r'#"', String.Char, 'char'), + (r'#\{[0-9a-f\s]*\}', Number.Hex), + (r'2#\{', Number.Hex, 'bin2'), + (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), + (r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))', + bygroups(Number.Hex, Name.Variable, Whitespace)), + (r'"', String, 'string'), + (r'\{', String, 'string2'), + (r';#+.*\n', Comment.Special), + (r';\*+.*\n', Comment.Preproc), + (r';.*\n', Comment), + (r'%"', Name.Decorator, 'stringFile'), + (r'%[^(^{")\s\[\]]+', Name.Decorator), + (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money + (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time + (r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?' + r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date + (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple + (r'\d+X\d+', Keyword.Constant), # pair + (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), + (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), + (r'[+-]?\d+(\'\d+)?', Number), + (r'[\[\]()]', Generic.Strong), + (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url + (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url + (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email + (r'comment\s"', Comment, 'commentString1'), + (r'comment\s\{', Comment, 'commentString2'), + (r'comment\s\[', Comment, 'commentBlock'), + (r'comment\s[^(\s{"\[]+', Comment), + (r'/[^(^{^")\s/[\]]*', Name.Attribute), + (r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), + (r'<[\w:.-]*>', Name.Tag), + (r'<[^(<>\s")]+', Name.Tag, 'tag'), + (r'([^(^{")\s]+)', Text), + ], + 'string': [ + (r'[^(^")]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'"', String, '#pop'), + ], + 'string2': [ + (r'[^(^{})]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + 'stringFile': [ + (r'[^(^")]+', Name.Decorator), + (escape_re, Name.Decorator), + (r'\^.', Name.Decorator), + (r'"', Name.Decorator, '#pop'), + ], + 'char': [ + (escape_re + '"', String.Char, '#pop'), + (r'\^."', String.Char, '#pop'), + (r'."', String.Char, '#pop'), + ], + 'tag': [ + (escape_re, Name.Tag), + (r'"', Name.Tag, 'tagString'), + (r'[^(<>\r\n")]+', Name.Tag), + (r'>', Name.Tag, '#pop'), + ], + 'tagString': [ + (r'[^(^")]+', Name.Tag), + (escape_re, Name.Tag), + (r'[(|)]+', Name.Tag), + (r'\^.', Name.Tag), + (r'"', Name.Tag, '#pop'), + ], + 'tuple': [ + (r'(\d+\.)+', Keyword.Constant), + (r'\d+', Keyword.Constant, '#pop'), + ], + 'bin2': [ + (r'\s+', Number.Hex), + (r'([01]\s*){8}', Number.Hex), + (r'\}', Number.Hex, '#pop'), + ], + 'commentString1': [ + (r'[^(^")]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'"', Comment, '#pop'), + ], + 'commentString2': [ + (r'[^(^{})]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'\{', Comment, '#push'), + (r'\}', Comment, '#pop'), + ], + 'commentBlock': [ + (r'\[', Comment, '#push'), + (r'\]', Comment, '#pop'), + (r'"', Comment, "commentString1"), + (r'\{', Comment, "commentString2"), + (r'[^(\[\]"{)]+', Comment), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/resource.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/resource.py new file mode 100644 index 0000000..6a0da2f --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/resource.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.resource + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for resource definition files. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Comment, String, Number, Operator, Text, \ + Keyword, Name + +__all__ = ['ResourceLexer'] + + +class ResourceLexer(RegexLexer): + """Lexer for `ICU Resource bundles + `_. + + .. versionadded:: 2.0 + """ + name = 'ResourceBundle' + aliases = ['resource', 'resourcebundle'] + filenames = ['*.txt'] + + _types = (':table', ':array', ':string', ':bin', ':import', ':intvector', + ':int', ':alias') + + flags = re.MULTILINE | re.IGNORECASE + tokens = { + 'root': [ + (r'//.*?$', Comment), + (r'"', String, 'string'), + (r'-?\d+', Number.Integer), + (r'[,{}]', Operator), + (r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types), + bygroups(Name, Text, Keyword)), + (r'\s+', Text), + (words(_types), Keyword), + ], + 'string': [ + (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|' + r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String), + (r'\{', String.Escape, 'msgname'), + (r'"', String, '#pop') + ], + 'msgname': [ + (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message')) + ], + 'message': [ + (r'\{', String.Escape, 'msgname'), + (r'\}', String.Escape, '#pop'), + (r'(,)(\s*)([a-z]+)(\s*\})', + bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'), + (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)', + bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, + String.Escape, Operator.Word, String.Escape, Operator, + String.Escape, Number.Integer, String.Escape), 'choice'), + (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)', + bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, + String.Escape), 'choice'), + (r'\s+', String.Escape) + ], + 'choice': [ + (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)', + bygroups(Operator, Number.Integer, String.Escape), 'message'), + (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'), + (r'\}', String.Escape, ('#pop', '#pop')), + (r'\s+', String.Escape) + ], + 'str': [ + (r'\}', String.Escape, '#pop'), + (r'\{', String.Escape, 'msgname'), + (r'[^{}]+', String) + ] + } + + def analyse_text(text): + return text.startswith('root:table') diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/_robotframeworklexer.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/robotframework.py similarity index 98% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/_robotframeworklexer.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/robotframework.py index dbf5f5f..7b6f556 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/_robotframeworklexer.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/robotframework.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """ - pygments.lexers._robotframeworklexer - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + pygments.lexers.robotframework + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Robot Framework. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -27,6 +27,9 @@ import re from pygments.lexer import Lexer from pygments.token import Token +from pygments.util import text_type + +__all__ = ['RobotFrameworkLexer'] HEADING = Token.Generic.Heading @@ -57,10 +60,10 @@ class RobotFrameworkLexer(Lexer): Supports both space and pipe separated plain text formats. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'RobotFramework' - aliases = ['RobotFramework', 'robotframework'] + aliases = ['robotframework'] filenames = ['*.txt', '*.robot'] mimetypes = ['text/x-robotframework'] @@ -77,7 +80,7 @@ class RobotFrameworkLexer(Lexer): for value, token in row_tokenizer.tokenize(row): for value, token in var_tokenizer.tokenize(value, token): if value: - yield index, token, str(value) + yield index, token, text_type(value) index += len(value) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ruby.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ruby.py new file mode 100644 index 0000000..d346df9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/ruby.py @@ -0,0 +1,518 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.ruby + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Ruby and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \ + bygroups, default, LexerContext, do_insertions, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Generic +from pygments.util import shebang_matches + +__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer'] + +line_re = re.compile('.*?\n') + + +RUBY_OPERATORS = ( + '*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~', + '[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '===' +) + + +class RubyLexer(ExtendedRegexLexer): + """ + For `Ruby `_ source code. + """ + + name = 'Ruby' + aliases = ['rb', 'ruby', 'duby'] + filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', + '*.rbx', '*.duby'] + mimetypes = ['text/x-ruby', 'application/x-ruby'] + + flags = re.DOTALL | re.MULTILINE + + def heredoc_callback(self, match, ctx): + # okay, this is the hardest part of parsing Ruby... + # match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line + + start = match.start(1) + yield start, Operator, match.group(1) # <<-? + yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` + yield match.start(3), Name.Constant, match.group(3) # heredoc name + yield match.start(4), String.Heredoc, match.group(4) # quote again + + heredocstack = ctx.__dict__.setdefault('heredocstack', []) + outermost = not bool(heredocstack) + heredocstack.append((match.group(1) == '<<-', match.group(3))) + + ctx.pos = match.start(5) + ctx.end = match.end(5) + # this may find other heredocs + for i, t, v in self.get_tokens_unprocessed(context=ctx): + yield i, t, v + ctx.pos = match.end() + + if outermost: + # this is the outer heredoc again, now we can process them all + for tolerant, hdname in heredocstack: + lines = [] + for match in line_re.finditer(ctx.text, ctx.pos): + if tolerant: + check = match.group().strip() + else: + check = match.group().rstrip() + if check == hdname: + for amatch in lines: + yield amatch.start(), String.Heredoc, amatch.group() + yield match.start(), Name.Constant, match.group() + ctx.pos = match.end() + break + else: + lines.append(match) + else: + # end of heredoc not found -- error! + for amatch in lines: + yield amatch.start(), Error, amatch.group() + ctx.end = len(ctx.text) + del heredocstack[:] + + def gen_rubystrings_rules(): + def intp_regex_callback(self, match, ctx): + yield match.start(1), String.Regex, match.group(1) # begin + nctx = LexerContext(match.group(3), 0, ['interpolated-regex']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Regex, match.group(4) # end[mixounse]* + ctx.pos = match.end() + + def intp_string_callback(self, match, ctx): + yield match.start(1), String.Other, match.group(1) + nctx = LexerContext(match.group(3), 0, ['interpolated-string']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Other, match.group(4) # end + ctx.pos = match.end() + + states = {} + states['strings'] = [ + # easy ones + (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol), + (words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol), + (r":'(\\\\|\\'|[^'])*'", String.Symbol), + (r"'(\\\\|\\'|[^'])*'", String.Single), + (r':"', String.Symbol, 'simple-sym'), + (r'([a-zA-Z_]\w*)(:)(?!:)', + bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9 + (r'"', String.Double, 'simple-string'), + (r'(?', '<>', 'ab'): + states[name+'-intp-string'] = [ + (r'\\[\\' + bracecc + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + include('string-intp-escaped'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + states['strings'].append((r'%[QWx]?' + lbrace, String.Other, + name+'-intp-string')) + states[name+'-string'] = [ + (r'\\[\\' + bracecc + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + states['strings'].append((r'%[qsw]' + lbrace, String.Other, + name+'-string')) + states[name+'-regex'] = [ + (r'\\[\\' + bracecc + ']', String.Regex), + (lbrace, String.Regex, '#push'), + (rbrace + '[mixounse]*', String.Regex, '#pop'), + include('string-intp'), + (r'[\\#' + bracecc + ']', String.Regex), + (r'[^\\#' + bracecc + ']+', String.Regex), + ] + states['strings'].append((r'%r' + lbrace, String.Regex, + name+'-regex')) + + # these must come after %! + states['strings'] += [ + # %r regex + (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)', + intp_regex_callback), + # regular fancy strings with qsw + (r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other), + (r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + # special forms of fancy strings after operators or + # in method calls with braces + (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # and because of fixed width lookbehinds the whole thing a + # second time for line startings... + (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # all regular fancy strings without qsw + (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + ] + + return states + + tokens = { + 'root': [ + (r'#.*?$', Comment.Single), + (r'=begin\s.*?\n=end.*?$', Comment.Multiline), + # keywords + (words(( + 'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?', + 'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo', + 'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef', + 'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'), + Keyword), + # start of function, class and module names + (r'(module)(\s+)([a-zA-Z_]\w*' + r'(?:::[a-zA-Z_]\w*)*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'), + (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + # special methods + (words(( + 'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader', + 'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private', + 'module_function', 'public', 'protected', 'true', 'false', 'nil'), + suffix=r'\b'), + Keyword.Pseudo), + (r'(not|and|or)\b', Operator.Word), + (words(( + 'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include', + 'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil', + 'private_method_defined', 'protected_method_defined', + 'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'), + Name.Builtin), + (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin), + (words(( + 'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort', + 'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller', + 'catch', 'chomp', 'chop', 'class_eval', 'class_variables', + 'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set', + 'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork', + 'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub', + 'hash', 'id', 'included_modules', 'inspect', 'instance_eval', + 'instance_method', 'instance_methods', + 'instance_variable_get', 'instance_variable_set', 'instance_variables', + 'lambda', 'load', 'local_variables', 'loop', + 'method', 'method_missing', 'methods', 'module_eval', 'name', + 'object_id', 'open', 'p', 'print', 'printf', 'private_class_method', + 'private_instance_methods', + 'private_methods', 'proc', 'protected_instance_methods', + 'protected_methods', 'public_class_method', + 'public_instance_methods', 'public_methods', + 'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require', + 'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep', + 'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint', + 'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint', + 'untrace_var', 'warn'), prefix=r'(?~!:])|' + r'(?<=(?:\s|;)when\s)|' + r'(?<=(?:\s|;)or\s)|' + r'(?<=(?:\s|;)and\s)|' + r'(?<=(?:\s|;|\.)index\s)|' + r'(?<=(?:\s|;|\.)scan\s)|' + r'(?<=(?:\s|;|\.)sub\s)|' + r'(?<=(?:\s|;|\.)sub!\s)|' + r'(?<=(?:\s|;|\.)gsub\s)|' + r'(?<=(?:\s|;|\.)gsub!\s)|' + r'(?<=(?:\s|;|\.)match\s)|' + r'(?<=(?:\s|;)if\s)|' + r'(?<=(?:\s|;)elsif\s)|' + r'(?<=^when\s)|' + r'(?<=^index\s)|' + r'(?<=^scan\s)|' + r'(?<=^sub\s)|' + r'(?<=^gsub\s)|' + r'(?<=^sub!\s)|' + r'(?<=^gsub!\s)|' + r'(?<=^match\s)|' + r'(?<=^if\s)|' + r'(?<=^elsif\s)' + r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'), + # multiline regex (in method calls or subscripts) + (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'), + # multiline regex (this time the funny no whitespace rule) + (r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex), + 'multiline-regex'), + # lex numbers and ignore following regular expressions which + # are division operators in fact (grrrr. i hate that. any + # better ideas?) + # since pygments 0.7 we also eat a "?" operator after numbers + # so that the char operator does not work. Chars are not allowed + # there so that you can use the ternary operator. + # stupid example: + # x>=0?n[x]:"" + (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', + bygroups(Number.Oct, Text, Operator)), + (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', + bygroups(Number.Hex, Text, Operator)), + (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?', + bygroups(Number.Bin, Text, Operator)), + (r'([\d]+(?:_\d+)*)(\s*)([/?])?', + bygroups(Number.Integer, Text, Operator)), + # Names + (r'@@[a-zA-Z_]\w*', Name.Variable.Class), + (r'@[a-zA-Z_]\w*', Name.Variable.Instance), + (r'\$\w+', Name.Variable.Global), + (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global), + (r'\$-[0adFiIlpvw]', Name.Variable.Global), + (r'::', Operator), + include('strings'), + # chars + (r'\?(\\[MC]-)*' # modifiers + r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)' + r'(?!\w)', + String.Char), + (r'[A-Z]\w+', Name.Constant), + # this is needed because ruby attributes can look + # like keywords (class) or like this: ` ?!? + (words(RUBY_OPERATORS, prefix=r'(\.|::)'), + bygroups(Operator, Name.Operator)), + (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])', + bygroups(Operator, Name)), + (r'[a-zA-Z_]\w*[!?]?', Name), + (r'(\[|\]|\*\*|<>?|>=|<=|<=>|=~|={3}|' + r'!~|&&?|\|\||\.{1,3})', Operator), + (r'[-+/*%=<>&!^|~]=?', Operator), + (r'[(){};,/?:\\]', Punctuation), + (r'\s+', Text) + ], + 'funcname': [ + (r'\(', Punctuation, 'defexpr'), + (r'(?:([a-zA-Z_]\w*)(\.))?' + r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|' + r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', + bygroups(Name.Class, Operator, Name.Function), '#pop'), + default('#pop') + ], + 'classname': [ + (r'\(', Punctuation, 'defexpr'), + (r'<<', Operator, '#pop'), + (r'[A-Z_]\w*', Name.Class, '#pop'), + default('#pop') + ], + 'defexpr': [ + (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'), + (r'\(', Operator, '#push'), + include('root') + ], + 'in-intp': [ + (r'\{', String.Interpol, '#push'), + (r'\}', String.Interpol, '#pop'), + include('root'), + ], + 'string-intp': [ + (r'#\{', String.Interpol, 'in-intp'), + (r'#@@?[a-zA-Z_]\w*', String.Interpol), + (r'#\$[a-zA-Z_]\w*', String.Interpol) + ], + 'string-intp-escaped': [ + include('string-intp'), + (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', + String.Escape) + ], + 'interpolated-regex': [ + include('string-intp'), + (r'[\\#]', String.Regex), + (r'[^\\#]+', String.Regex), + ], + 'interpolated-string': [ + include('string-intp'), + (r'[\\#]', String.Other), + (r'[^\\#]+', String.Other), + ], + 'multiline-regex': [ + include('string-intp'), + (r'\\\\', String.Regex), + (r'\\/', String.Regex), + (r'[\\#]', String.Regex), + (r'[^\\/#]+', String.Regex), + (r'/[mixounse]*', String.Regex, '#pop'), + ], + 'end-part': [ + (r'.+', Comment.Preproc, '#pop') + ] + } + tokens.update(gen_rubystrings_rules()) + + def analyse_text(text): + return shebang_matches(text, r'ruby(1\.\d)?') + + +class RubyConsoleLexer(Lexer): + """ + For Ruby interactive console (**irb**) output like: + + .. sourcecode:: rbcon + + irb(main):001:0> a = 1 + => 1 + irb(main):002:0> puts a + 1 + => nil + """ + name = 'Ruby irb session' + aliases = ['rbcon', 'irb'] + mimetypes = ['text/x-ruby-shellsession'] + + _prompt_re = re.compile('irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] ' + '|>> |\?> ') + + def get_tokens_unprocessed(self, text): + rblexer = RubyLexer(**self.options) + + curcode = '' + insertions = [] + for match in line_re.finditer(text): + line = match.group() + m = self._prompt_re.match(line) + if m is not None: + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + for item in do_insertions( + insertions, rblexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions( + insertions, rblexer.get_tokens_unprocessed(curcode)): + yield item + + +class FancyLexer(RegexLexer): + """ + Pygments Lexer For `Fancy `_. + + Fancy is a self-hosted, pure object-oriented, dynamic, + class-based, concurrent general-purpose programming language + running on Rubinius, the Ruby VM. + + .. versionadded:: 1.5 + """ + name = 'Fancy' + filenames = ['*.fy', '*.fancypack'] + aliases = ['fancy', 'fy'] + mimetypes = ['text/x-fancysrc'] + + tokens = { + # copied from PerlLexer: + 'balanced-regex': [ + (r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'), + (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'), + (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), + (r'\{(\\\\|\\\}|[^}])*\}[egimosx]*', String.Regex, '#pop'), + (r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'), + (r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'), + (r'\((\\\\|\\\)|[^)])*\)[egimosx]*', String.Regex, '#pop'), + (r'@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex, '#pop'), + (r'%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex, '#pop'), + (r'\$(\\\\|\\\$|[^$])*\$[egimosx]*', String.Regex, '#pop'), + ], + 'root': [ + (r'\s+', Text), + + # balanced delimiters (copied from PerlLexer): + (r's\{(\\\\|\\\}|[^}])*\}\s*', String.Regex, 'balanced-regex'), + (r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'), + (r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'), + (r's\((\\\\|\\\)|[^)])*\)\s*', String.Regex, 'balanced-regex'), + (r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex), + (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), + + # Comments + (r'#(.*?)\n', Comment.Single), + # Symbols + (r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol), + # Multi-line DoubleQuotedString + (r'"""(\\\\|\\"|[^"])*"""', String), + # DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"', String), + # keywords + (r'(def|class|try|catch|finally|retry|return|return_local|match|' + r'case|->|=>)\b', Keyword), + # constants + (r'(self|super|nil|false|true)\b', Name.Constant), + (r'[(){};,/?|:\\]', Punctuation), + # names + (words(( + 'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String', + 'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass', + 'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set', + 'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'), + Name.Builtin), + # functions + (r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function), + # operators, must be below functions + (r'[-+*/~,<>=&!?%^\[\].$]+', Operator), + ('[A-Z]\w*', Name.Constant), + ('@[a-zA-Z_]\w*', Name.Variable.Instance), + ('@@[a-zA-Z_]\w*', Name.Variable.Class), + ('@@?', Operator), + ('[a-zA-Z_]\w*', Name), + # numbers - / checks are necessary to avoid mismarking regexes, + # see comment in RubyLexer + (r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', + bygroups(Number.Oct, Text, Operator)), + (r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', + bygroups(Number.Hex, Text, Operator)), + (r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?', + bygroups(Number.Bin, Text, Operator)), + (r'([\d]+(?:_\d+)*)(\s*)([/?])?', + bygroups(Number.Integer, Text, Operator)), + (r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rust.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rust.py new file mode 100644 index 0000000..4447e1d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/rust.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.rust + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Rust language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, words, default +from pygments.token import Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Whitespace + +__all__ = ['RustLexer'] + + +class RustLexer(RegexLexer): + """ + Lexer for the Rust programming language (version 0.9). + + .. versionadded:: 1.6 + """ + name = 'Rust' + filenames = ['*.rs'] + aliases = ['rust'] + mimetypes = ['text/x-rustsrc'] + + tokens = { + 'root': [ + # Whitespace and Comments + (r'\n', Whitespace), + (r'\s+', Whitespace), + (r'//[/!](.*?)\n', Comment.Doc), + (r'//(.*?)\n', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + + # Lifetime + (r"""'[a-zA-Z_]\w*""", Name.Label), + # Macro parameters + (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), + # Keywords + (words(( + 'as', 'box', 'break', 'continue', 'do', 'else', 'enum', 'extern', + 'fn', 'for', 'if', 'impl', 'in', 'loop', 'match', 'mut', 'priv', + 'proc', 'pub', 'ref', 'return', 'static', '\'static', 'struct', + 'trait', 'true', 'type', 'unsafe', 'while'), suffix=r'\b'), + Keyword), + (words(('alignof', 'be', 'const', 'offsetof', 'pure', 'sizeof', + 'typeof', 'once', 'unsized', 'yield'), suffix=r'\b'), + Keyword.Reserved), + (r'(mod|use)\b', Keyword.Namespace), + (r'(true|false)\b', Keyword.Constant), + (r'let\b', Keyword.Declaration), + (words(('u8', 'u16', 'u32', 'u64', 'i8', 'i16', 'i32', 'i64', 'uint', + 'int', 'f32', 'f64', 'str', 'bool'), suffix=r'\b'), + Keyword.Type), + (r'self\b', Name.Builtin.Pseudo), + # Prelude + (words(( + 'Freeze', 'Pod', 'Send', 'Sized', 'Add', 'Sub', 'Mul', 'Div', 'Rem', 'Neg', 'Not', 'BitAnd', + 'BitOr', 'BitXor', 'Drop', 'Shl', 'Shr', 'Index', 'Option', 'Some', 'None', 'Result', + 'Ok', 'Err', 'from_str', 'range', 'print', 'println', 'Any', 'AnyOwnExt', 'AnyRefExt', + 'AnyMutRefExt', 'Ascii', 'AsciiCast', 'OnwedAsciiCast', 'AsciiStr', + 'IntoBytes', 'Bool', 'ToCStr', 'Char', 'Clone', 'DeepClone', 'Eq', 'ApproxEq', + 'Ord', 'TotalEq', 'Ordering', 'Less', 'Equal', 'Greater', 'Equiv', 'Container', + 'Mutable', 'Map', 'MutableMap', 'Set', 'MutableSet', 'Default', 'FromStr', + 'Hash', 'FromIterator', 'Extendable', 'Iterator', 'DoubleEndedIterator', + 'RandomAccessIterator', 'CloneableIterator', 'OrdIterator', + 'MutableDoubleEndedIterator', 'ExactSize', 'Times', 'Algebraic', + 'Trigonometric', 'Exponential', 'Hyperbolic', 'Bitwise', 'BitCount', + 'Bounded', 'Integer', 'Fractional', 'Real', 'RealExt', 'Num', 'NumCast', + 'CheckedAdd', 'CheckedSub', 'CheckedMul', 'Orderable', 'Signed', + 'Unsigned', 'Round', 'Primitive', 'Int', 'Float', 'ToStrRadix', + 'ToPrimitive', 'FromPrimitive', 'GenericPath', 'Path', 'PosixPath', + 'WindowsPath', 'RawPtr', 'Buffer', 'Writer', 'Reader', 'Seek', + 'SendStr', 'SendStrOwned', 'SendStrStatic', 'IntoSendStr', 'Str', + 'StrVector', 'StrSlice', 'OwnedStr', 'IterBytes', 'ToStr', 'IntoStr', + 'CopyableTuple', 'ImmutableTuple', 'ImmutableEqVector', 'ImmutableTotalOrdVector', + 'ImmutableCopyableVector', 'OwnedVector', 'OwnedCopyableVector', + 'OwnedEqVector', 'MutableVector', 'MutableTotalOrdVector', + 'Vector', 'VectorVector', 'CopyableVector', 'ImmutableVector', + 'Port', 'Chan', 'SharedChan', 'spawn', 'drop'), suffix=r'\b'), + Name.Builtin), + (r'(ImmutableTuple\d+|Tuple\d+)\b', Name.Builtin), + # Borrowed pointer + (r'(&)(\'[A-Za-z_]\w*)?', bygroups(Operator, Name)), + # Labels + (r'\'[A-Za-z_]\w*:', Name.Label), + # Character Literal + (r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", + String.Char), + # Binary Literal + (r'0b[01_]+', Number.Bin, 'number_lit'), + # Octal Literal + (r'0o[0-7_]+', Number.Oct, 'number_lit'), + # Hexadecimal Literal + (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), + # Decimal Literal + (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float, 'number_lit'), + (r'[0-9][0-9_]*', Number.Integer, 'number_lit'), + # String Literal + (r'"', String, 'string'), + (r'r(#*)".*?"\1', String.Raw), + + # Operators and Punctuation + (r'[{}()\[\],.;]', Punctuation), + (r'[+\-*/%&|<>^!~@=:?]', Operator), + + # Identifier + (r'[a-zA-Z_]\w*', Name), + + # Attributes + (r'#!?\[', Comment.Preproc, 'attribute['), + # Macros + (r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\s*)(\{)', + bygroups(Comment.Preproc, Punctuation, Whitespace, Name, + Whitespace, Punctuation), 'macro{'), + (r'([A-Za-z_]\w*)(!)(\s*)([A-Za-z_]\w*)?(\()', + bygroups(Comment.Preproc, Punctuation, Whitespace, Name, + Punctuation), 'macro('), + ], + 'comment': [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'number_lit': [ + (r'[ui](8|16|32|64)', Keyword, '#pop'), + (r'f(32|64)', Keyword, '#pop'), + default('#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), + (r'[^\\"]+', String), + (r'\\', String), + ], + 'macro{': [ + (r'\{', Operator, '#push'), + (r'\}', Operator, '#pop'), + ], + 'macro(': [ + (r'\(', Operator, '#push'), + (r'\)', Operator, '#pop'), + ], + 'attribute_common': [ + (r'"', String, 'string'), + (r'\[', Comment.Preproc, 'attribute['), + (r'\(', Comment.Preproc, 'attribute('), + ], + 'attribute[': [ + include('attribute_common'), + (r'\];?', Comment.Preproc, '#pop'), + (r'[^"\]]+', Comment.Preproc), + ], + 'attribute(': [ + include('attribute_common'), + (r'\);?', Comment.Preproc, '#pop'), + (r'[^")]+', Comment.Preproc), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/scripting.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/scripting.py new file mode 100644 index 0000000..678cab2 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/scripting.py @@ -0,0 +1,923 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.scripting + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for scripting and embedded languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, combined, \ + words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Whitespace +from pygments.util import get_bool_opt, get_list_opt, iteritems + +__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer', + 'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer'] + + +class LuaLexer(RegexLexer): + """ + For `Lua `_ source code. + + Additional options accepted: + + `func_name_highlighting` + If given and ``True``, highlight builtin function names + (default: ``True``). + `disabled_modules` + If given, must be a list of module names whose function names + should not be highlighted. By default all modules are highlighted. + + To get a list of allowed modules have a look into the + `_lua_builtins` module: + + .. sourcecode:: pycon + + >>> from pygments.lexers._lua_builtins import MODULES + >>> MODULES.keys() + ['string', 'coroutine', 'modules', 'io', 'basic', ...] + """ + + name = 'Lua' + aliases = ['lua'] + filenames = ['*.lua', '*.wlua'] + mimetypes = ['text/x-lua', 'application/x-lua'] + + tokens = { + 'root': [ + # lua allows a file to start with a shebang + (r'#!(.*?)$', Comment.Preproc), + default('base'), + ], + 'base': [ + (r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline), + ('--.*$', Comment.Single), + + (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), + (r'(?i)\d+e[+-]?\d+', Number.Float), + ('(?i)0x[0-9a-f]*', Number.Hex), + (r'\d+', Number.Integer), + + (r'\n', Text), + (r'[^\S\n]', Text), + # multiline strings + (r'(?s)\[(=*)\[.*?\]\1\]', String), + + (r'(==|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#])', Operator), + (r'[\[\]{}().,:;]', Punctuation), + (r'(and|or|not)\b', Operator.Word), + + ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|' + r'while)\b', Keyword), + (r'(local)\b', Keyword.Declaration), + (r'(true|false|nil)\b', Keyword.Constant), + + (r'(function)\b', Keyword, 'funcname'), + + (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), + + ("'", String.Single, combined('stringescape', 'sqs')), + ('"', String.Double, combined('stringescape', 'dqs')) + ], + + 'funcname': [ + (r'\s+', Text), + ('(?:([A-Za-z_]\w*)(\.))?([A-Za-z_]\w*)', + bygroups(Name.Class, Punctuation, Name.Function), '#pop'), + # inline function + ('\(', Punctuation, '#pop'), + ], + + # if I understand correctly, every character is valid in a lua string, + # so this state is only for later corrections + 'string': [ + ('.', String) + ], + + 'stringescape': [ + (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) + ], + + 'sqs': [ + ("'", String, '#pop'), + include('string') + ], + + 'dqs': [ + ('"', String, '#pop'), + include('string') + ] + } + + def __init__(self, **options): + self.func_name_highlighting = get_bool_opt( + options, 'func_name_highlighting', True) + self.disabled_modules = get_list_opt(options, 'disabled_modules', []) + + self._functions = set() + if self.func_name_highlighting: + from pygments.lexers._lua_builtins import MODULES + for mod, func in iteritems(MODULES): + if mod not in self.disabled_modules: + self._functions.update(func) + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name: + if value in self._functions: + yield index, Name.Builtin, value + continue + elif '.' in value: + a, b = value.split('.') + yield index, Name, a + yield index + len(a), Punctuation, u'.' + yield index + len(a) + 1, Name, b + continue + yield index, token, value + + +class MoonScriptLexer(LuaLexer): + """ + For `MoonScript `_ source code. + + .. versionadded:: 1.5 + """ + + name = "MoonScript" + aliases = ["moon", "moonscript"] + filenames = ["*.moon"] + mimetypes = ['text/x-moonscript', 'application/x-moonscript'] + + tokens = { + 'root': [ + (r'#!(.*?)$', Comment.Preproc), + default('base'), + ], + 'base': [ + ('--.*$', Comment.Single), + (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), + (r'(?i)\d+e[+-]?\d+', Number.Float), + (r'(?i)0x[0-9a-f]*', Number.Hex), + (r'\d+', Number.Integer), + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'(?s)\[(=*)\[.*?\]\1\]', String), + (r'(->|=>)', Name.Function), + (r':[a-zA-Z_]\w*', Name.Variable), + (r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator), + (r'[;,]', Punctuation), + (r'[\[\]{}()]', Keyword.Type), + (r'[a-zA-Z_]\w*:', Name.Variable), + (words(( + 'class', 'extends', 'if', 'then', 'super', 'do', 'with', + 'import', 'export', 'while', 'elseif', 'return', 'for', 'in', + 'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch', + 'break'), suffix=r'\b'), + Keyword), + (r'(true|false|nil)\b', Keyword.Constant), + (r'(and|or|not)\b', Operator.Word), + (r'(self)\b', Name.Builtin.Pseudo), + (r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class), + (r'[A-Z]\w*', Name.Class), # proper name + (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), + ("'", String.Single, combined('stringescape', 'sqs')), + ('"', String.Double, combined('stringescape', 'dqs')) + ], + 'stringescape': [ + (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) + ], + 'sqs': [ + ("'", String.Single, '#pop'), + (".", String) + ], + 'dqs': [ + ('"', String.Double, '#pop'), + (".", String) + ] + } + + def get_tokens_unprocessed(self, text): + # set . as Operator instead of Punctuation + for index, token, value in LuaLexer.get_tokens_unprocessed(self, text): + if token == Punctuation and value == ".": + token = Operator + yield index, token, value + + +class ChaiscriptLexer(RegexLexer): + """ + For `ChaiScript `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'ChaiScript' + aliases = ['chai', 'chaiscript'] + filenames = ['*.chai'] + mimetypes = ['text/x-chaiscript', 'application/x-chaiscript'] + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'^\#.*?\n', Comment.Single) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'([gim]+\b|\B)', String.Regex, '#pop'), + (r'(?=/)', Text, ('#pop', 'badregex')), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + include('commentsandwhitespace'), + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.' + r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), + (r'[{(\[;,]', Punctuation, 'slashstartsregex'), + (r'[})\].]', Punctuation), + (r'[=+\-*/]', Operator), + (r'(for|in|while|do|break|return|continue|if|else|' + r'throw|try|catch' + r')\b', Keyword, 'slashstartsregex'), + (r'(var)\b', Keyword.Declaration, 'slashstartsregex'), + (r'(attr|def|fun)\b', Keyword.Reserved), + (r'(true|false)\b', Keyword.Constant), + (r'(eval|throw)\b', Name.Builtin), + (r'`\S+`', Name.Builtin), + (r'[$a-zA-Z_]\w*', Name.Other), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'"', String.Double, 'dqstring'), + (r"'(\\\\|\\'|[^'])*'", String.Single), + ], + 'dqstring': [ + (r'\$\{[^"}]+?\}', String.Interpol), + (r'\$', String.Double), + (r'\\\\', String.Double), + (r'\\"', String.Double), + (r'[^\\"$]+', String.Double), + (r'"', String.Double, '#pop'), + ], + } + + +class LSLLexer(RegexLexer): + """ + For Second Life's Linden Scripting Language source code. + + .. versionadded:: 2.0 + """ + + name = 'LSL' + aliases = ['lsl'] + filenames = ['*.lsl'] + mimetypes = ['text/x-lsl'] + + flags = re.MULTILINE + + lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b' + lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b' + lsl_states = r'\b(?:(?:state)\s+\w+|default)\b' + lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b' + lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b' + lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b' + lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b' + lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b' + lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b' + lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b' + lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b' + lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b' + lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b' + lsl_invalid_illegal = r'\b(?:event)\b' + lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b' + lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b' + lsl_reserved_log = r'\b(?:print)\b' + lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?' + + tokens = { + 'root': + [ + (r'//.*?\n', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + (r'"', String.Double, 'string'), + (lsl_keywords, Keyword), + (lsl_types, Keyword.Type), + (lsl_states, Name.Class), + (lsl_events, Name.Builtin), + (lsl_functions_builtin, Name.Function), + (lsl_constants_float, Keyword.Constant), + (lsl_constants_integer, Keyword.Constant), + (lsl_constants_integer_boolean, Keyword.Constant), + (lsl_constants_rotation, Keyword.Constant), + (lsl_constants_string, Keyword.Constant), + (lsl_constants_vector, Keyword.Constant), + (lsl_invalid_broken, Error), + (lsl_invalid_deprecated, Error), + (lsl_invalid_illegal, Error), + (lsl_invalid_unimplemented, Error), + (lsl_reserved_godmode, Keyword.Reserved), + (lsl_reserved_log, Keyword.Reserved), + (r'\b([a-zA-Z_]\w*)\b', Name.Variable), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float), + (r'(\d+\.\d*|\.\d+)', Number.Float), + (r'0[xX][0-9a-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + (lsl_operators, Operator), + (r':=?', Error), + (r'[,;{}()\[\]]', Punctuation), + (r'\n+', Whitespace), + (r'\s+', Whitespace) + ], + 'comment': + [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'string': + [ + (r'\\([nt"\\])', String.Escape), + (r'"', String.Double, '#pop'), + (r'\\.', Error), + (r'[^"\\]+', String.Double), + ] + } + + +class AppleScriptLexer(RegexLexer): + """ + For `AppleScript source code + `_, + including `AppleScript Studio + `_. + Contributed by Andreas Amann . + + .. versionadded:: 1.0 + """ + + name = 'AppleScript' + aliases = ['applescript'] + filenames = ['*.applescript'] + + flags = re.MULTILINE | re.DOTALL + + Identifiers = r'[a-zA-Z]\w*' + + # XXX: use words() for all of these + Literals = ('AppleScript', 'current application', 'false', 'linefeed', + 'missing value', 'pi', 'quote', 'result', 'return', 'space', + 'tab', 'text item delimiters', 'true', 'version') + Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ', + 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', + 'real ', 'record ', 'reference ', 'RGB color ', 'script ', + 'text ', 'unit types', '(?:Unicode )?text', 'string') + BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month', + 'paragraph', 'word', 'year') + HandlerParams = ('about', 'above', 'against', 'apart from', 'around', + 'aside from', 'at', 'below', 'beneath', 'beside', + 'between', 'for', 'given', 'instead of', 'on', 'onto', + 'out of', 'over', 'since') + Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL', + 'choose application', 'choose color', 'choose file( name)?', + 'choose folder', 'choose from list', + 'choose remote application', 'clipboard info', + 'close( access)?', 'copy', 'count', 'current date', 'delay', + 'delete', 'display (alert|dialog)', 'do shell script', + 'duplicate', 'exists', 'get eof', 'get volume settings', + 'info for', 'launch', 'list (disks|folder)', 'load script', + 'log', 'make', 'mount volume', 'new', 'offset', + 'open( (for access|location))?', 'path to', 'print', 'quit', + 'random number', 'read', 'round', 'run( script)?', + 'say', 'scripting components', + 'set (eof|the clipboard to|volume)', 'store script', + 'summarize', 'system attribute', 'system info', + 'the clipboard', 'time to GMT', 'write', 'quoted form') + References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', + 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', + 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', + 'before', 'behind', 'every', 'front', 'index', 'last', + 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose') + Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not", + "isn't", "isn't equal( to)?", "is not equal( to)?", + "doesn't equal", "does not equal", "(is )?greater than", + "comes after", "is not less than or equal( to)?", + "isn't less than or equal( to)?", "(is )?less than", + "comes before", "is not greater than or equal( to)?", + "isn't greater than or equal( to)?", + "(is )?greater than or equal( to)?", "is not less than", + "isn't less than", "does not come before", + "doesn't come before", "(is )?less than or equal( to)?", + "is not greater than", "isn't greater than", + "does not come after", "doesn't come after", "starts? with", + "begins? with", "ends? with", "contains?", "does not contain", + "doesn't contain", "is in", "is contained by", "is not in", + "is not contained by", "isn't contained by", "div", "mod", + "not", "(a )?(ref( to)?|reference to)", "is", "does") + Control = ('considering', 'else', 'error', 'exit', 'from', 'if', + 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', + 'try', 'until', 'using terms from', 'while', 'whith', + 'with timeout( of)?', 'with transaction', 'by', 'continue', + 'end', 'its?', 'me', 'my', 'return', 'of', 'as') + Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get') + Reserved = ('but', 'put', 'returning', 'the') + StudioClasses = ('action cell', 'alert reply', 'application', 'box', + 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', + 'clip view', 'color well', 'color-panel', + 'combo box( item)?', 'control', + 'data( (cell|column|item|row|source))?', 'default entry', + 'dialog reply', 'document', 'drag info', 'drawer', + 'event', 'font(-panel)?', 'formatter', + 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', + 'movie( view)?', 'open-panel', 'outline view', 'panel', + 'pasteboard', 'plugin', 'popup button', + 'progress indicator', 'responder', 'save-panel', + 'scroll view', 'secure text field( cell)?', 'slider', + 'sound', 'split view', 'stepper', 'tab view( item)?', + 'table( (column|header cell|header view|view))', + 'text( (field( cell)?|view))?', 'toolbar( item)?', + 'user-defaults', 'view', 'window') + StudioEvents = ('accept outline drop', 'accept table drop', 'action', + 'activated', 'alert ended', 'awake from nib', 'became key', + 'became main', 'begin editing', 'bounds changed', + 'cell value', 'cell value changed', 'change cell value', + 'change item value', 'changed', 'child of item', + 'choose menu item', 'clicked', 'clicked toolbar item', + 'closed', 'column clicked', 'column moved', + 'column resized', 'conclude drop', 'data representation', + 'deminiaturized', 'dialog ended', 'document nib name', + 'double clicked', 'drag( (entered|exited|updated))?', + 'drop', 'end editing', 'exposed', 'idle', 'item expandable', + 'item value', 'item value changed', 'items changed', + 'keyboard down', 'keyboard up', 'launched', + 'load data representation', 'miniaturized', 'mouse down', + 'mouse dragged', 'mouse entered', 'mouse exited', + 'mouse moved', 'mouse up', 'moved', + 'number of browser rows', 'number of items', + 'number of rows', 'open untitled', 'opened', 'panel ended', + 'parameters updated', 'plugin loaded', 'prepare drop', + 'prepare outline drag', 'prepare outline drop', + 'prepare table drag', 'prepare table drop', + 'read from file', 'resigned active', 'resigned key', + 'resigned main', 'resized( sub views)?', + 'right mouse down', 'right mouse dragged', + 'right mouse up', 'rows changed', 'scroll wheel', + 'selected tab view item', 'selection changed', + 'selection changing', 'should begin editing', + 'should close', 'should collapse item', + 'should end editing', 'should expand item', + 'should open( untitled)?', + 'should quit( after last window closed)?', + 'should select column', 'should select item', + 'should select row', 'should select tab view item', + 'should selection change', 'should zoom', 'shown', + 'update menu item', 'update parameters', + 'update toolbar item', 'was hidden', 'was miniaturized', + 'will become active', 'will close', 'will dismiss', + 'will display browser cell', 'will display cell', + 'will display item cell', 'will display outline cell', + 'will finish launching', 'will hide', 'will miniaturize', + 'will move', 'will open', 'will pop up', 'will quit', + 'will resign active', 'will resize( sub views)?', + 'will select tab view item', 'will show', 'will zoom', + 'write to file', 'zoomed') + StudioCommands = ('animate', 'append', 'call method', 'center', + 'close drawer', 'close panel', 'display', + 'display alert', 'display dialog', 'display panel', 'go', + 'hide', 'highlight', 'increment', 'item for', + 'load image', 'load movie', 'load nib', 'load panel', + 'load sound', 'localized string', 'lock focus', 'log', + 'open drawer', 'path for', 'pause', 'perform action', + 'play', 'register', 'resume', 'scroll', 'select( all)?', + 'show', 'size to fit', 'start', 'step back', + 'step forward', 'stop', 'synchronize', 'unlock focus', + 'update') + StudioProperties = ('accepts arrow key', 'action method', 'active', + 'alignment', 'allowed identifiers', + 'allows branch selection', 'allows column reordering', + 'allows column resizing', 'allows column selection', + 'allows customization', + 'allows editing text attributes', + 'allows empty selection', 'allows mixed state', + 'allows multiple selection', 'allows reordering', + 'allows undo', 'alpha( value)?', 'alternate image', + 'alternate increment value', 'alternate title', + 'animation delay', 'associated file name', + 'associated object', 'auto completes', 'auto display', + 'auto enables items', 'auto repeat', + 'auto resizes( outline column)?', + 'auto save expanded items', 'auto save name', + 'auto save table columns', 'auto saves configuration', + 'auto scroll', 'auto sizes all columns to fit', + 'auto sizes cells', 'background color', 'bezel state', + 'bezel style', 'bezeled', 'border rect', 'border type', + 'bordered', 'bounds( rotation)?', 'box type', + 'button returned', 'button type', + 'can choose directories', 'can choose files', + 'can draw', 'can hide', + 'cell( (background color|size|type))?', 'characters', + 'class', 'click count', 'clicked( data)? column', + 'clicked data item', 'clicked( data)? row', + 'closeable', 'collating', 'color( (mode|panel))', + 'command key down', 'configuration', + 'content(s| (size|view( margins)?))?', 'context', + 'continuous', 'control key down', 'control size', + 'control tint', 'control view', + 'controller visible', 'coordinate system', + 'copies( on scroll)?', 'corner view', 'current cell', + 'current column', 'current( field)? editor', + 'current( menu)? item', 'current row', + 'current tab view item', 'data source', + 'default identifiers', 'delta (x|y|z)', + 'destination window', 'directory', 'display mode', + 'displayed cell', 'document( (edited|rect|view))?', + 'double value', 'dragged column', 'dragged distance', + 'dragged items', 'draws( cell)? background', + 'draws grid', 'dynamically scrolls', 'echos bullets', + 'edge', 'editable', 'edited( data)? column', + 'edited data item', 'edited( data)? row', 'enabled', + 'enclosing scroll view', 'ending page', + 'error handling', 'event number', 'event type', + 'excluded from windows menu', 'executable path', + 'expanded', 'fax number', 'field editor', 'file kind', + 'file name', 'file type', 'first responder', + 'first visible column', 'flipped', 'floating', + 'font( panel)?', 'formatter', 'frameworks path', + 'frontmost', 'gave up', 'grid color', 'has data items', + 'has horizontal ruler', 'has horizontal scroller', + 'has parent data item', 'has resize indicator', + 'has shadow', 'has sub menu', 'has vertical ruler', + 'has vertical scroller', 'header cell', 'header view', + 'hidden', 'hides when deactivated', 'highlights by', + 'horizontal line scroll', 'horizontal page scroll', + 'horizontal ruler view', 'horizontally resizable', + 'icon image', 'id', 'identifier', + 'ignores multiple clicks', + 'image( (alignment|dims when disabled|frame style|scaling))?', + 'imports graphics', 'increment value', + 'indentation per level', 'indeterminate', 'index', + 'integer value', 'intercell spacing', 'item height', + 'key( (code|equivalent( modifier)?|window))?', + 'knob thickness', 'label', 'last( visible)? column', + 'leading offset', 'leaf', 'level', 'line scroll', + 'loaded', 'localized sort', 'location', 'loop mode', + 'main( (bunde|menu|window))?', 'marker follows cell', + 'matrix mode', 'maximum( content)? size', + 'maximum visible columns', + 'menu( form representation)?', 'miniaturizable', + 'miniaturized', 'minimized image', 'minimized title', + 'minimum column width', 'minimum( content)? size', + 'modal', 'modified', 'mouse down state', + 'movie( (controller|file|rect))?', 'muted', 'name', + 'needs display', 'next state', 'next text', + 'number of tick marks', 'only tick mark values', + 'opaque', 'open panel', 'option key down', + 'outline table column', 'page scroll', 'pages across', + 'pages down', 'palette label', 'pane splitter', + 'parent data item', 'parent window', 'pasteboard', + 'path( (names|separator))?', 'playing', + 'plays every frame', 'plays selection only', 'position', + 'preferred edge', 'preferred type', 'pressure', + 'previous text', 'prompt', 'properties', + 'prototype cell', 'pulls down', 'rate', + 'released when closed', 'repeated', + 'requested print time', 'required file type', + 'resizable', 'resized column', 'resource path', + 'returns records', 'reuses columns', 'rich text', + 'roll over', 'row height', 'rulers visible', + 'save panel', 'scripts path', 'scrollable', + 'selectable( identifiers)?', 'selected cell', + 'selected( data)? columns?', 'selected data items?', + 'selected( data)? rows?', 'selected item identifier', + 'selection by rect', 'send action on arrow key', + 'sends action when done editing', 'separates columns', + 'separator item', 'sequence number', 'services menu', + 'shared frameworks path', 'shared support path', + 'sheet', 'shift key down', 'shows alpha', + 'shows state by', 'size( mode)?', + 'smart insert delete enabled', 'sort case sensitivity', + 'sort column', 'sort order', 'sort type', + 'sorted( data rows)?', 'sound', 'source( mask)?', + 'spell checking enabled', 'starting page', 'state', + 'string value', 'sub menu', 'super menu', 'super view', + 'tab key traverses cells', 'tab state', 'tab type', + 'tab view', 'table view', 'tag', 'target( printer)?', + 'text color', 'text container insert', + 'text container origin', 'text returned', + 'tick mark position', 'time stamp', + 'title(d| (cell|font|height|position|rect))?', + 'tool tip', 'toolbar', 'trailing offset', 'transparent', + 'treat packages as directories', 'truncated labels', + 'types', 'unmodified characters', 'update views', + 'use sort indicator', 'user defaults', + 'uses data source', 'uses ruler', + 'uses threaded animation', + 'uses title from previous column', 'value wraps', + 'version', + 'vertical( (line scroll|page scroll|ruler view))?', + 'vertically resizable', 'view', + 'visible( document rect)?', 'volume', 'width', 'window', + 'windows menu', 'wraps', 'zoomable', 'zoomed') + + tokens = { + 'root': [ + (r'\s+', Text), + (u'¬\\n', String.Escape), + (r"'s\s+", Text), # This is a possessive, consider moving + (r'(--|#).*?$', Comment), + (r'\(\*', Comment.Multiline, 'comment'), + (r'[(){}!,.:]', Punctuation), + (u'(«)([^»]+)(»)', + bygroups(Text, Name.Builtin, Text)), + (r'\b((?:considering|ignoring)\s*)' + r'(application responses|case|diacriticals|hyphens|' + r'numeric strings|punctuation|white space)', + bygroups(Keyword, Name.Builtin)), + (u'(-|\\*|\\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\\^)', Operator), + (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), + (r'^(\s*(?:on|end)\s+)' + r'(%s)' % '|'.join(StudioEvents[::-1]), + bygroups(Keyword, Name.Function)), + (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), + (r'\b(as )(%s)\b' % '|'.join(Classes), + bygroups(Keyword, Name.Class)), + (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), + (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), + (r'\b(%s)\b' % '|'.join(Control), Keyword), + (r'\b(%s)\b' % '|'.join(Declarations), Keyword), + (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), + (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), + (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), + (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), + (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), + (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), + (r'\b(%s)\b' % '|'.join(References), Name.Builtin), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r'\b(%s)\b' % Identifiers, Name.Variable), + (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), + (r'[-+]?\d+', Number.Integer), + ], + 'comment': [ + ('\(\*', Comment.Multiline, '#push'), + ('\*\)', Comment.Multiline, '#pop'), + ('[^*(]+', Comment.Multiline), + ('[*(]', Comment.Multiline), + ], + } + + +class RexxLexer(RegexLexer): + """ + `Rexx `_ is a scripting language available for + a wide range of different platforms with its roots found on mainframe + systems. It is popular for I/O- and data based tasks and can act as glue + language to bind different applications together. + + .. versionadded:: 2.0 + """ + name = 'Rexx' + aliases = ['rexx', 'arexx'] + filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] + mimetypes = ['text/x-rexx'] + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'\s', Whitespace), + (r'/\*', Comment.Multiline, 'comment'), + (r'"', String, 'string_double'), + (r"'", String, 'string_single'), + (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), + (r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b', + bygroups(Name.Function, Whitespace, Operator, Whitespace, + Keyword.Declaration)), + (r'([a-z_]\w*)(\s*)(:)', + bygroups(Name.Label, Whitespace, Operator)), + include('function'), + include('keyword'), + include('operator'), + (r'[a-z_]\w*', Text), + ], + 'function': [ + (words(( + 'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor', + 'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare', + 'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr', + 'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert', + 'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max', + 'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign', + 'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol', + 'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word', + 'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d', + 'xrange'), suffix=r'(\s*)(\()'), + bygroups(Name.Builtin, Whitespace, Operator)), + ], + 'keyword': [ + (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' + r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' + r'pull|push|queue|return|say|select|signal|to|then|trace|until|' + r'while)\b', Keyword.Reserved), + ], + 'operator': [ + (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' + r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' + r'¬>>|¬>|¬|\.|,)', Operator), + ], + 'string_double': [ + (r'[^"\n]+', String), + (r'""', String), + (r'"', String, '#pop'), + (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. + ], + 'string_single': [ + (r'[^\'\n]', String), + (r'\'\'', String), + (r'\'', String, '#pop'), + (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. + ], + 'comment': [ + (r'[^*]+', Comment.Multiline), + (r'\*/', Comment.Multiline, '#pop'), + (r'\*', Comment.Multiline), + ] + } + + _c = lambda s: re.compile(s, re.MULTILINE) + _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') + _ADDRESS_PATTERN = _c(r'^\s*address\s+') + _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') + _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') + _PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b') + _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') + _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') + PATTERNS_AND_WEIGHTS = ( + (_ADDRESS_COMMAND_PATTERN, 0.2), + (_ADDRESS_PATTERN, 0.05), + (_DO_WHILE_PATTERN, 0.1), + (_ELSE_DO_PATTERN, 0.1), + (_IF_THEN_DO_PATTERN, 0.1), + (_PROCEDURE_PATTERN, 0.5), + (_PARSE_ARG_PATTERN, 0.2), + ) + + def analyse_text(text): + """ + Check for inital comment and patterns that distinguish Rexx from other + C-like languages. + """ + if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): + # Header matches MVS Rexx requirements, this is certainly a Rexx + # script. + return 1.0 + elif text.startswith('/*'): + # Header matches general Rexx requirements; the source code might + # still be any language using C comments such as C++, C# or Java. + lowerText = text.lower() + result = sum(weight + for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS + if pattern.search(lowerText)) + 0.01 + return min(result, 1.0) + + +class MOOCodeLexer(RegexLexer): + """ + For `MOOCode `_ (the MOO scripting + language). + + .. versionadded:: 0.9 + """ + name = 'MOOCode' + filenames = ['*.moo'] + aliases = ['moocode', 'moo'] + mimetypes = ['text/x-moocode'] + + tokens = { + 'root': [ + # Numbers + (r'(0|[1-9][0-9_]*)', Number.Integer), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # exceptions + (r'(E_PERM|E_DIV)', Name.Exception), + # db-refs + (r'((#[-0-9]+)|(\$\w+))', Name.Entity), + # Keywords + (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' + r'|endwhile|break|continue|return|try' + r'|except|endtry|finally|in)\b', Keyword), + # builtins + (r'(random|length)', Name.Builtin), + # special variables + (r'(player|caller|this|args)', Name.Variable.Instance), + # skip whitespace + (r'\s+', Text), + (r'\n', Text), + # other operators + (r'([!;=,{}&|:.\[\]@()<>?]+)', Operator), + # function call + (r'(\w+)(\()', bygroups(Name.Function, Operator)), + # variables + (r'(\w+)', Text), + ] + } + + +class HybrisLexer(RegexLexer): + """ + For `Hybris `_ source code. + + .. versionadded:: 1.4 + """ + + name = 'Hybris' + aliases = ['hybris', 'hy'] + filenames = ['*.hy', '*.hyb'] + mimetypes = ['text/x-hybris', 'application/x-hybris'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'root': [ + # method names + (r'^(\s*(?:function|method|operator\s+)+?)' + r'([a-zA-Z_]\w*)' + r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), + (r'[^\S\n]+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'@[a-zA-Z_][\w.]*', Name.Decorator), + (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' + r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), + (r'(extends|private|protected|public|static|throws|function|method|' + r'operator)\b', Keyword.Declaration), + (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' + r'__INC_PATH__)\b', Keyword.Constant), + (r'(class|struct)(\s+)', + bygroups(Keyword.Declaration, Text), 'class'), + (r'(import|include)(\s+)', + bygroups(Keyword.Namespace, Text), 'import'), + (words(( + 'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold', + 'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32', 'sha2', + 'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'exp', + 'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', + 'isint', 'isfloat', 'ischar', 'isstring', 'isarray', 'ismap', 'isalias', 'typeof', + 'sizeof', 'toint', 'tostring', 'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval', + 'var_names', 'var_values', 'user_functions', 'dyn_functions', 'methods', 'call', + 'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks', 'usleep', + 'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink', 'dllcall', 'dllcall_argv', + 'dllclose', 'env', 'exec', 'fork', 'getpid', 'wait', 'popen', 'pclose', 'exit', 'kill', + 'pthread_create', 'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill', + 'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind', 'listen', + 'accept', 'getsockname', 'getpeername', 'settimeout', 'connect', 'server', 'recv', + 'send', 'close', 'print', 'println', 'printf', 'input', 'readline', 'serial_open', + 'serial_fcntl', 'serial_get_attr', 'serial_get_ispeed', 'serial_get_ospeed', + 'serial_set_attr', 'serial_set_ispeed', 'serial_set_ospeed', 'serial_write', + 'serial_read', 'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell', + 'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir', 'pcre_replace', 'size', + 'pop', 'unmap', 'has', 'keys', 'values', 'length', 'find', 'substr', 'replace', 'split', + 'trim', 'remove', 'contains', 'join'), suffix=r'\b'), + Name.Builtin), + (words(( + 'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process', + 'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket', + 'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'), + Keyword.Type), + (r'"(\\\\|\\"|[^"])*"', String), + (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), + (r'(\.)([a-zA-Z_]\w*)', + bygroups(Operator, Name.Attribute)), + (r'[a-zA-Z_]\w*:', Name.Label), + (r'[a-zA-Z_$]\w*', Name), + (r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-f]+', Number.Hex), + (r'[0-9]+L?', Number.Integer), + (r'\n', Text), + ], + 'class': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop') + ], + 'import': [ + (r'[\w.]+\*?', Name.Namespace, '#pop') + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/shell.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/shell.py similarity index 87% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/shell.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/shell.py index 5ec9dea..1bbfd7a 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/shell.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/shell.py @@ -5,7 +5,7 @@ Lexers for various shells. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -27,28 +27,32 @@ class BashLexer(RegexLexer): """ Lexer for (ba|k|)sh shell scripts. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'Bash' - aliases = ['bash', 'sh', 'ksh'] + aliases = ['bash', 'sh', 'ksh', 'shell'] filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', - '.bashrc', 'bashrc', '.bash_*', 'bash_*'] + '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'] mimetypes = ['application/x-sh', 'application/x-shellscript'] tokens = { 'root': [ include('basic'), - (r'\$\(\(', Keyword, 'math'), - (r'\$\(', Keyword, 'paren'), - (r'\${#?', Keyword, 'curly'), (r'`', String.Backtick, 'backticks'), include('data'), + include('interp'), + ], + 'interp': [ + (r'\$\(\(', Keyword, 'math'), + (r'\$\(', Keyword, 'paren'), + (r'\$\{#?', String.Interpol, 'curly'), + (r'\$#?(\w+|.)', Name.Variable), ], 'basic': [ (r'\b(if|fi|else|while|do|done|for|then|return|function|case|' - r'select|continue|until|esac|elif)\s*\b', - Keyword), + r'select|continue|until|esac|elif)(\s*)\b', + bygroups(Keyword, Text)), (r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|' r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|' r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|' @@ -65,22 +69,28 @@ class BashLexer(RegexLexer): (r'&&|\|\|', Operator), ], 'data': [ - (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), - (r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double), + (r'"', String.Double, 'string'), + (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r"(?s)'.*?'", String.Single), (r';', Punctuation), (r'&', Punctuation), (r'\|', Punctuation), (r'\s+', Text), - (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), (r'\d+(?= |\Z)', Number), - (r'\$#?(\w+|.)', Name.Variable), + (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), (r'<', Text), ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double), + include('interp'), + ], 'curly': [ - (r'}', Keyword, '#pop'), + (r'\}', String.Interpol, '#pop'), (r':-', Keyword), - (r'[a-zA-Z0-9_]+', Name.Variable), - (r'[^}:"\'`$]+', Punctuation), + (r'\w+', Name.Variable), + (r'[^}:"\'`$\\]+', Punctuation), (r':', Punctuation), include('root'), ], @@ -91,6 +101,8 @@ class BashLexer(RegexLexer): 'math': [ (r'\)\)', Keyword, '#pop'), (r'[-+*/%^|&]|\*\*|\|\|', Operator), + (r'\d+#\d+', Number), + (r'\d+#(?! )', Number), (r'\d+', Number), include('root'), ], @@ -111,7 +123,7 @@ class BashSessionLexer(Lexer): """ Lexer for simplistic shell sessions. - *New in Pygments 1.1.* + .. versionadded:: 1.1 """ name = 'Bash Session' @@ -162,7 +174,7 @@ class ShellSessionLexer(Lexer): """ Lexer for shell sessions that works with different command prompts - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'Shell Session' @@ -179,7 +191,7 @@ class ShellSessionLexer(Lexer): for match in line_re.finditer(text): line = match.group() - m = re.match(r'^((?:\[?\S+@[^$#%]+)[$#%])(.*\n?)', line) + m = re.match(r'^((?:\[?\S+@[^$#%]+\]?\s*)[$#%])(.*\n?)', line) if m: # To support output lexers (say diff output), the output # needs to be broken by prompts whenever the output lexer @@ -208,10 +220,10 @@ class BatchLexer(RegexLexer): """ Lexer for the DOS/Windows Batch file format. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'Batchfile' - aliases = ['bat', 'dosbatch', 'winbatch'] + aliases = ['bat', 'batch', 'dosbatch', 'winbatch'] filenames = ['*.bat', '*.cmd'] mimetypes = ['application/x-dos-batch'] @@ -228,9 +240,9 @@ class BatchLexer(RegexLexer): # like %~$VAR:zlt (r'%%?[~$:\w]+%?', Name.Variable), (r'::.*', Comment), # Technically :: only works at BOL - (r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)), - (r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)), - (r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), + (r'\b(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)), + (r'\b(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)), + (r'\b(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), (r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|' r'setlocal|shift|errorlevel|exist|defined|cmdextversion|' r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword), @@ -264,7 +276,7 @@ class TcshLexer(RegexLexer): """ Lexer for tcsh scripts. - *New in Pygments 0.10.* + .. versionadded:: 0.10 """ name = 'Tcsh' @@ -276,7 +288,7 @@ class TcshLexer(RegexLexer): 'root': [ include('basic'), (r'\$\(', Keyword, 'paren'), - (r'\${#?', Keyword, 'curly'), + (r'\$\{#?', Keyword, 'curly'), (r'`', String.Backtick, 'backticks'), include('data'), ], @@ -294,24 +306,25 @@ class TcshLexer(RegexLexer): r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|' r'ver|wait|warp|watchlog|where|which)\s*\b', Name.Builtin), - (r'#.*\n', Comment), + (r'#.*', Comment), (r'\\[\w\W]', String.Escape), (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), (r'[\[\]{}()=]+', Operator), (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), + (r';', Punctuation), ], 'data': [ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), (r'\s+', Text), - (r'[^=\s\[\]{}()$"\'`\\]+', Text), + (r'[^=\s\[\]{}()$"\'`\\;#]+', Text), (r'\d+(?= |\Z)', Number), (r'\$#?(\w+|.)', Name.Variable), ], 'curly': [ - (r'}', Keyword, '#pop'), + (r'\}', Keyword, '#pop'), (r':-', Keyword), - (r'[a-zA-Z0-9_]+', Name.Variable), + (r'\w+', Name.Variable), (r'[^}:"\'`$]+', Punctuation), (r':', Punctuation), include('root'), @@ -331,7 +344,7 @@ class PowerShellLexer(RegexLexer): """ For Windows PowerShell code. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PowerShell' aliases = ['powershell', 'posh', 'ps1', 'psm1'] @@ -387,13 +400,13 @@ class PowerShellLexer(RegexLexer): (r'`[\'"$@-]', Punctuation), (r'"', String.Double, 'string'), (r"'([^']|'')*'", String.Single), - (r'(\$|@@|@)((global|script|private|env):)?[a-z0-9_]+', + (r'(\$|@@|@)((global|script|private|env):)?\w+', Name.Variable), (r'(%s)\b' % '|'.join(keywords), Keyword), (r'-(%s)\b' % '|'.join(operators), Operator), - (r'(%s)-[a-z_][a-z0-9_]*\b' % '|'.join(verbs), Name.Builtin), - (r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s - (r'-[a-z_][a-z0-9_]*', Name), + (r'(%s)-[a-z_]\w*\b' % '|'.join(verbs), Name.Builtin), + (r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant), # .net [type]s + (r'-[a-z_]\w*', Name), (r'\w+', Name), (r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation), ], @@ -408,7 +421,7 @@ class PowerShellLexer(RegexLexer): (r'[#&.]', Comment.Multiline), ], 'string': [ - (r"`[0abfnrtv'\"\$]", String.Escape), + (r"`[0abfnrtv'\"$`]", String.Escape), (r'[^$`"]+', String.Double), (r'\$\(', Punctuation, 'child'), (r'""', String.Double), diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/smalltalk.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/smalltalk.py new file mode 100644 index 0000000..4e78ac0 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/smalltalk.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.smalltalk + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Smalltalk and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['SmalltalkLexer', 'NewspeakLexer'] + + +class SmalltalkLexer(RegexLexer): + """ + For `Smalltalk `_ syntax. + Contributed by Stefan Matthias Aust. + Rewritten by Nils Winter. + + .. versionadded:: 0.10 + """ + name = 'Smalltalk' + filenames = ['*.st'] + aliases = ['smalltalk', 'squeak', 'st'] + mimetypes = ['text/x-smalltalk'] + + tokens = { + 'root': [ + (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), + include('squeak fileout'), + include('whitespaces'), + include('method definition'), + (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), + include('objects'), + (r'\^|\:=|\_', Operator), + # temporaries + (r'[\]({}.;!]', Text), + ], + 'method definition': [ + # Not perfect can't allow whitespaces at the beginning and the + # without breaking everything + (r'([a-zA-Z]+\w*:)(\s*)(\w+)', + bygroups(Name.Function, Text, Name.Variable)), + (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), + (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', + bygroups(Name.Function, Text, Name.Variable, Text)), + ], + 'blockvariables': [ + include('whitespaces'), + (r'(:)(\s*)(\w+)', + bygroups(Operator, Text, Name.Variable)), + (r'\|', Operator, '#pop'), + default('#pop'), # else pop + ], + 'literals': [ + (r"'(''|[^'])*'", String, 'afterobject'), + (r'\$.', String.Char, 'afterobject'), + (r'#\(', String.Symbol, 'parenth'), + (r'\)', Text, 'afterobject'), + (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), + ], + '_parenth_helper': [ + include('whitespaces'), + (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), + (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), + # literals + (r"'(''|[^'])*'", String), + (r'\$.', String.Char), + (r'#*\(', String.Symbol, 'inner_parenth'), + ], + 'parenth': [ + # This state is a bit tricky since + # we can't just pop this state + (r'\)', String.Symbol, ('root', 'afterobject')), + include('_parenth_helper'), + ], + 'inner_parenth': [ + (r'\)', String.Symbol, '#pop'), + include('_parenth_helper'), + ], + 'whitespaces': [ + # skip whitespace and comments + (r'\s+', Text), + (r'"(""|[^"])*"', Comment), + ], + 'objects': [ + (r'\[', Text, 'blockvariables'), + (r'\]', Text, 'afterobject'), + (r'\b(self|super|true|false|nil|thisContext)\b', + Name.Builtin.Pseudo, 'afterobject'), + (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), + (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), + (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', + String.Symbol, 'afterobject'), + include('literals'), + ], + 'afterobject': [ + (r'! !$', Keyword, '#pop'), # squeak chunk delimiter + include('whitespaces'), + (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', + Name.Builtin, '#pop'), + (r'\b(new\b(?!:))', Name.Builtin), + (r'\:=|\_', Operator, '#pop'), + (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), + (r'\b[a-zA-Z]+\w*', Name.Function), + (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), + (r'\.', Punctuation, '#pop'), + (r';', Punctuation), + (r'[\])}]', Text), + (r'[\[({]', Text, '#pop'), + ], + 'squeak fileout': [ + # Squeak fileout format (optional) + (r'^"(""|[^"])*"!', Keyword), + (r"^'(''|[^'])*'!", Keyword), + (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', + bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), + (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", + bygroups(Keyword, Name.Class, Keyword, String, Keyword)), + (r'^(\w+)( subclass: )(#\w+)' + r'(\s+instanceVariableNames: )(.*?)' + r'(\s+classVariableNames: )(.*?)' + r'(\s+poolDictionaries: )(.*?)' + r'(\s+category: )(.*?)(!)', + bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, + String, Keyword, String, Keyword, String, Keyword)), + (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', + bygroups(Name.Class, Keyword, String, Keyword)), + (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), + (r'! !$', Keyword), + ], + } + + +class NewspeakLexer(RegexLexer): + """ + For `Newspeak ` syntax. + + .. versionadded:: 1.1 + """ + name = 'Newspeak' + filenames = ['*.ns2'] + aliases = ['newspeak', ] + mimetypes = ['text/x-newspeak'] + + tokens = { + 'root': [ + (r'\b(Newsqueak2)\b', Keyword.Declaration), + (r"'[^']*'", String), + (r'\b(class)(\s+)(\w+)(\s*)', + bygroups(Keyword.Declaration, Text, Name.Class, Text)), + (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', + Keyword), + (r'(\w+\:)(\s*)([a-zA-Z_]\w+)', + bygroups(Name.Function, Text, Name.Variable)), + (r'(\w+)(\s*)(=)', + bygroups(Name.Attribute, Text, Operator)), + (r'<\w+>', Comment.Special), + include('expressionstat'), + include('whitespace') + ], + + 'expressionstat': [ + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'\d+', Number.Integer), + (r':\w+', Name.Variable), + (r'(\w+)(::)', bygroups(Name.Variable, Operator)), + (r'\w+:', Name.Function), + (r'\w+', Name.Variable), + (r'\(|\)', Punctuation), + (r'\[|\]', Punctuation), + (r'\{|\}', Punctuation), + + (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), + (r'\.|;', Punctuation), + include('whitespace'), + include('literals'), + ], + 'literals': [ + (r'\$.', String), + (r"'[^']*'", String), + (r"#'[^']*'", String.Symbol), + (r"#\w+:?", String.Symbol), + (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) + ], + 'whitespace': [ + (r'\s+', Text), + (r'"[^"]*"', Comment) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/snobol.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/snobol.py new file mode 100644 index 0000000..88455f9 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/snobol.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.snobol + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the SNOBOL language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['SnobolLexer'] + + +class SnobolLexer(RegexLexer): + """ + Lexer for the SNOBOL4 programming language. + + Recognizes the common ASCII equivalents of the original SNOBOL4 operators. + Does not require spaces around binary operators. + + .. versionadded:: 1.5 + """ + + name = "Snobol" + aliases = ["snobol"] + filenames = ['*.snobol'] + mimetypes = ['text/x-snobol'] + + tokens = { + # root state, start of line + # comments, continuation lines, and directives start in column 1 + # as do labels + 'root': [ + (r'\*.*\n', Comment), + (r'[+.] ', Punctuation, 'statement'), + (r'-.*\n', Comment), + (r'END\s*\n', Name.Label, 'heredoc'), + (r'[A-Za-z$][\w$]*', Name.Label, 'statement'), + (r'\s+', Text, 'statement'), + ], + # statement state, line after continuation or label + 'statement': [ + (r'\s*\n', Text, '#pop'), + (r'\s+', Text), + (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' + r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' + r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' + r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', + Name.Builtin), + (r'[A-Za-z][\w.]*', Name), + # ASCII equivalents of original operators + # | for the EBCDIC equivalent, ! likewise + # \ for EBCDIC negation + (r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator), + (r'"[^"]*"', String), + (r"'[^']*'", String), + # Accept SPITBOL syntax for real numbers + # as well as Macro SNOBOL4 + (r'[0-9]+(?=[^.EeDd])', Number.Integer), + (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), + # Goto + (r':', Punctuation, 'goto'), + (r'[()<>,;]', Punctuation), + ], + # Goto block + 'goto': [ + (r'\s*\n', Text, "#pop:2"), + (r'\s+', Text), + (r'F|S', Keyword), + (r'(\()([A-Za-z][\w.]*)(\))', + bygroups(Punctuation, Name.Label, Punctuation)) + ], + # everything after the END statement is basically one + # big heredoc. + 'heredoc': [ + (r'.*\n', String.Heredoc) + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/special.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/special.py similarity index 85% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/special.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/special.py index 9b3cd50..bd86904 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/special.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/special.py @@ -5,16 +5,15 @@ Special lexers. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re -import cStringIO from pygments.lexer import Lexer from pygments.token import Token, Error, Text -from pygments.util import get_choice_opt, b +from pygments.util import get_choice_opt, text_type, BytesIO __all__ = ['TextLexer', 'RawTokenLexer'] @@ -35,7 +34,8 @@ class TextLexer(Lexer): _ttype_cache = {} -line_re = re.compile(b('.*?\n')) +line_re = re.compile(b'.*?\n') + class RawTokenLexer(Lexer): """ @@ -60,12 +60,12 @@ class RawTokenLexer(Lexer): Lexer.__init__(self, **options) def get_tokens(self, text): - if isinstance(text, unicode): + if isinstance(text, text_type): # raw token stream never has any non-ASCII characters text = text.encode('ascii') if self.compress == 'gz': import gzip - gzipfile = gzip.GzipFile('', 'rb', 9, cStringIO.StringIO(text)) + gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text)) text = gzipfile.read() elif self.compress == 'bz2': import bz2 @@ -73,7 +73,7 @@ class RawTokenLexer(Lexer): # do not call Lexer.get_tokens() because we do not want Unicode # decoding to occur, and stripping is not optional. - text = text.strip(b('\n')) + b('\n') + text = text.strip(b'\n') + b'\n' for i, t, v in self.get_tokens_unprocessed(text): yield t, v @@ -81,9 +81,9 @@ class RawTokenLexer(Lexer): length = 0 for match in line_re.finditer(text): try: - ttypestr, val = match.group().split(b('\t'), 1) + ttypestr, val = match.group().split(b'\t', 1) except ValueError: - val = match.group().decode(self.encoding) + val = match.group().decode('ascii', 'replace') ttype = Error else: ttype = _ttype_cache.get(ttypestr) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/sql.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/sql.py similarity index 59% rename from plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/sql.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/sql.py index 546d1f8..98425cd 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments2/pygments/lexers/sql.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/sql.py @@ -34,28 +34,30 @@ The ``tests/examplefiles`` contains a few test files with data to be parsed by these lexers. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re -from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups +from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words from pygments.token import Punctuation, \ - Text, Comment, Operator, Keyword, Name, String, Number, Generic + Text, Comment, Operator, Keyword, Name, String, Number, Generic from pygments.lexers import get_lexer_by_name, ClassNotFound +from pygments.util import iteritems from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \ - PSEUDO_TYPES, PLPGSQL_KEYWORDS + PSEUDO_TYPES, PLPGSQL_KEYWORDS __all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer', - 'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer'] + 'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'RqlLexer'] line_re = re.compile('.*?\n') language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE) + def language_callback(lexer, match): """Parse the content of a $-string using a lexer @@ -101,7 +103,7 @@ class PostgresBase(object): if lang.lower() == 'sql': return get_lexer_by_name('postgresql', **self.options) - tries = [ lang ] + tries = [lang] if lang.startswith('pl'): tries.append(lang[2:]) if lang.endswith('u'): @@ -124,7 +126,7 @@ class PostgresLexer(PostgresBase, RegexLexer): """ Lexer for the PostgreSQL dialect of SQL. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PostgreSQL SQL dialect' @@ -137,29 +139,29 @@ class PostgresLexer(PostgresBase, RegexLexer): (r'\s+', Text), (r'--.*?\n', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), - (r'(' + '|'.join([s.replace(" ", "\s+") - for s in DATATYPES + PSEUDO_TYPES]) - + r')\b', Name.Builtin), - (r'(' + '|'.join(KEYWORDS) + r')\b', Keyword), + (r'(' + '|'.join(s.replace(" ", "\s+") + for s in DATATYPES + PSEUDO_TYPES) + + r')\b', Name.Builtin), + (words(KEYWORDS, suffix=r'\b'), Keyword), (r'[+*/<>=~!@#%^&|`?-]+', Operator), (r'::', Operator), # cast (r'\$\d+', Name.Variable), (r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float), (r'[0-9]+', Number.Integer), (r"(E|U&)?'(''|[^'])*'", String.Single), - (r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier - (r'(?s)(\$[^\$]*\$)(.*?)(\1)', language_callback), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), + (r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier + (r'(?s)(\$[^$]*\$)(.*?)(\1)', language_callback), + (r'[a-z_]\w*', Name), # psql variable in SQL - (r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable), + (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable), - (r'[;:()\[\]\{\},\.]', Punctuation), + (r'[;:()\[\]{},.]', Punctuation), ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), + (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ], } @@ -169,20 +171,20 @@ class PlPgsqlLexer(PostgresBase, RegexLexer): """ Handle the extra syntax in Pl/pgSQL language. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PL/pgSQL' aliases = ['plpgsql'] mimetypes = ['text/x-plpgsql'] flags = re.IGNORECASE - tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems()) + tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens)) # extend the keywords list for i, pattern in enumerate(tokens['root']): if pattern[1] == Keyword: tokens['root'][i] = ( - r'(' + '|'.join(KEYWORDS + PLPGSQL_KEYWORDS) + r')\b', + words(KEYWORDS + PLPGSQL_KEYWORDS, suffix=r'\b'), Keyword) del i break @@ -191,10 +193,10 @@ class PlPgsqlLexer(PostgresBase, RegexLexer): # Add specific PL/pgSQL rules (before the SQL ones) tokens['root'][:0] = [ - (r'\%[a-z][a-z0-9_]*\b', Name.Builtin), # actually, a datatype + (r'\%[a-z]\w*\b', Name.Builtin), # actually, a datatype (r':=', Operator), - (r'\<\<[a-z][a-z0-9_]*\>\>', Name.Label), - (r'\#[a-z][a-z0-9_]*\b', Keyword.Pseudo), # #variable_conflict + (r'\<\<[a-z]\w*\>\>', Name.Label), + (r'\#[a-z]\w*\b', Keyword.Pseudo), # #variable_conflict ] @@ -210,7 +212,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer): aliases = [] # not public flags = re.IGNORECASE - tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems()) + tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens)) tokens['root'].append( (r'\\[^\s]+', Keyword.Pseudo, 'psql-command')) @@ -218,7 +220,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer): (r'\n', Text, 'root'), (r'\s+', Text), (r'\\[^\s]+', Keyword.Pseudo), - (r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable), + (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable), (r"'(''|[^'])*'", String.Single), (r"`([^`])*`", String.Backtick), (r"[^\s]+", String.Symbol), @@ -239,24 +241,28 @@ class lookahead(object): def __init__(self, x): self.iter = iter(x) self._nextitem = None + def __iter__(self): return self + def send(self, i): self._nextitem = i return i - def next(self): + + def __next__(self): if self._nextitem is not None: ni = self._nextitem self._nextitem = None return ni - return self.iter.next() + return next(self.iter) + next = __next__ class PostgresConsoleLexer(Lexer): """ Lexer for psql sessions. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'PostgreSQL console (psql)' @@ -277,7 +283,7 @@ class PostgresConsoleLexer(Lexer): insertions = [] while 1: try: - line = lines.next() + line = next(lines) except StopIteration: # allow the emission of partially collected items # the repl loop will be broken below @@ -303,18 +309,18 @@ class PostgresConsoleLexer(Lexer): # TODO: better handle multiline comments at the end with # a lexer with an external state? if re_psql_command.match(curcode) \ - or re_end_command.search(curcode): + or re_end_command.search(curcode): break # Emit the combined stream of command and prompt(s) for item in do_insertions(insertions, - sql.get_tokens_unprocessed(curcode)): + sql.get_tokens_unprocessed(curcode)): yield item # Emit the output lines out_token = Generic.Output while 1: - line = lines.next() + line = next(lines) mprompt = re_prompt.match(line) if mprompt is not None: # push the line back to have it processed by the prompt @@ -324,7 +330,7 @@ class PostgresConsoleLexer(Lexer): mmsg = re_message.match(line) if mmsg is not None: if mmsg.group(1).startswith("ERROR") \ - or mmsg.group(1).startswith("FATAL"): + or mmsg.group(1).startswith("FATAL"): out_token = Generic.Error yield (mmsg.start(1), Generic.Strong, mmsg.group(1)) yield (mmsg.start(2), out_token, mmsg.group(2)) @@ -349,97 +355,100 @@ class SqlLexer(RegexLexer): (r'\s+', Text), (r'--.*?\n', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), - (r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|' - r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|' - r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|' - r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|' - r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|' - r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|' - r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|' - r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|' - r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|' - r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|' - r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|' - r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|' - r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|' - r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|' - r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|' - r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|' - r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|' - r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|' - r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|' - r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|' - r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|' - r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|' - r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|' - r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|' - r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|' - r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|' - r'EXCEPTION|EXCEPT|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|' - r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|' - r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|' - r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|' - r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|' - r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|' - r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|' - r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|' - r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|' - r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|' - r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|' - r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|' - r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|' - r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|' - r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|' - r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|' - r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|' - r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|' - r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|' - r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|' - r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|' - r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|' - r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|' - r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|' - r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|' - r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|' - r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|' - r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|' - r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|' - r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|' - r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|' - r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|' - r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|' - r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|' - r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|' - r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|' - r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|' - r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|' - r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|' - r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|' - r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|' - r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|' - r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|' - r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|' - r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|' - r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|' - r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|' - r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|' - r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword), - (r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|' - r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|' - r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b', + (words(( + 'ABORT', 'ABS', 'ABSOLUTE', 'ACCESS', 'ADA', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', + 'ALIAS', 'ALL', 'ALLOCATE', 'ALTER', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARE', 'AS', + 'ASC', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT', 'ASYMMETRIC', 'AT', 'ATOMIC', + 'AUTHORIZATION', 'AVG', 'BACKWARD', 'BEFORE', 'BEGIN', 'BETWEEN', 'BITVAR', + 'BIT_LENGTH', 'BOTH', 'BREADTH', 'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY', + 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CATALOG_NAME', 'CHAIN', + 'CHARACTERISTICS', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG', + 'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHAR_LENGTH', 'CHECK', + 'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE', 'CLUSTER', + 'COALSECE', 'COBOL', 'COLLATE', 'COLLATION', 'COLLATION_CATALOG', + 'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLUMN', 'COLUMN_NAME', + 'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT', 'COMMIT', + 'COMMITTED', 'COMPLETION', 'CONDITION_NUMBER', 'CONNECT', 'CONNECTION', + 'CONNECTION_NAME', 'CONSTRAINT', 'CONSTRAINTS', 'CONSTRAINT_CATALOG', + 'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA', 'CONSTRUCTOR', 'CONTAINS', + 'CONTINUE', 'CONVERSION', 'CONVERT', 'COPY', 'CORRESPONTING', 'COUNT', + 'CREATE', 'CREATEDB', 'CREATEUSER', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_DATE', + 'CURRENT_PATH', 'CURRENT_ROLE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', + 'CURRENT_USER', 'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE', + 'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY', + 'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED', + 'DEFINED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DEREF', 'DESC', + 'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR', 'DETERMINISTIC', + 'DIAGNOSTICS', 'DICTIONARY', 'DISCONNECT', 'DISPATCH', 'DISTINCT', 'DO', + 'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE', + 'EACH', 'ELSE', 'ENCODING', 'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY', + 'EXCEPTION', 'EXCEPT', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING', + 'EXISTS', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FOR', + 'FORCE', 'FOREIGN', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE', 'FREEZE', 'FROM', 'FULL', + 'FUNCTION', 'G', 'GENERAL', 'GENERATED', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GRANTED', + 'GROUP', 'GROUPING', 'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY', + 'IGNORE', 'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT', 'IN', + 'INCLUDING', 'INCREMENT', 'INDEX', 'INDITCATOR', 'INFIX', 'INHERITS', 'INITIALIZE', + 'INITIALLY', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTANTIABLE', + 'INSTEAD', 'INTERSECT', 'INTO', 'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'ITERATE', 'JOIN', + 'KEY', 'KEY_MEMBER', 'KEY_TYPE', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST', + 'LATERAL', 'LEADING', 'LEFT', 'LENGTH', 'LESS', 'LEVEL', 'LIKE', 'LIMIT', 'LISTEN', 'LOAD', + 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION', 'LOCATOR', 'LOCK', 'LOWER', + 'MAP', 'MATCH', 'MAX', 'MAXVALUE', 'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH', + 'MESSAGE_TEXT', 'METHOD', 'MIN', 'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES', + 'MODIFY', 'MONTH', 'MORE', 'MOVE', 'MUMPS', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', + 'NCLOB', 'NEW', 'NEXT', 'NO', 'NOCREATEDB', 'NOCREATEUSER', 'NONE', 'NOT', 'NOTHING', + 'NOTIFY', 'NOTNULL', 'NULL', 'NULLABLE', 'NULLIF', 'OBJECT', 'OCTET_LENGTH', 'OF', 'OFF', + 'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY', 'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS', + 'OR', 'ORDER', 'ORDINALITY', 'OUT', 'OUTER', 'OUTPUT', 'OVERLAPS', 'OVERLAY', 'OVERRIDING', + 'OWNER', 'PAD', 'PARAMETER', 'PARAMETERS', 'PARAMETER_MODE', 'PARAMATER_NAME', + 'PARAMATER_ORDINAL_POSITION', 'PARAMETER_SPECIFIC_CATALOG', + 'PARAMETER_SPECIFIC_NAME', 'PARAMATER_SPECIFIC_SCHEMA', 'PARTIAL', + 'PASCAL', 'PENDANT', 'PLACING', 'PLI', 'POSITION', 'POSTFIX', 'PRECISION', 'PREFIX', + 'PREORDER', 'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL', + 'PROCEDURE', 'PUBLIC', 'READ', 'READS', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES', + 'REFERENCING', 'REINDEX', 'RELATIVE', 'RENAME', 'REPEATABLE', 'REPLACE', 'RESET', + 'RESTART', 'RESTRICT', 'RESULT', 'RETURN', 'RETURNED_LENGTH', + 'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE', 'RETURNS', 'REVOKE', 'RIGHT', + 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINE_CATALOG', 'ROUTINE_NAME', + 'ROUTINE_SCHEMA', 'ROW', 'ROWS', 'ROW_COUNT', 'RULE', 'SAVE_POINT', 'SCALE', 'SCHEMA', + 'SCHEMA_NAME', 'SCOPE', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SELF', + 'SENSITIVE', 'SERIALIZABLE', 'SERVER_NAME', 'SESSION', 'SESSION_USER', 'SET', + 'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SIZE', 'SOME', 'SOURCE', 'SPACE', + 'SPECIFIC', 'SPECIFICTYPE', 'SPECIFIC_NAME', 'SQL', 'SQLCODE', 'SQLERROR', + 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNINIG', 'STABLE', 'START', 'STATE', 'STATEMENT', + 'STATIC', 'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRUCTURE', 'STYPE', + 'SUBCLASS_ORIGIN', 'SUBLIST', 'SUBSTRING', 'SUM', 'SYMMETRIC', 'SYSID', 'SYSTEM', + 'SYSTEM_USER', 'TABLE', 'TABLE_NAME', ' TEMP', 'TEMPLATE', 'TEMPORARY', 'TERMINATE', + 'THAN', 'THEN', 'TIMESTAMP', 'TIMEZONE_HOUR', 'TIMEZONE_MINUTE', 'TO', 'TOAST', + 'TRAILING', 'TRANSATION', 'TRANSACTIONS_COMMITTED', + 'TRANSACTIONS_ROLLED_BACK', 'TRANSATION_ACTIVE', 'TRANSFORM', + 'TRANSFORMS', 'TRANSLATE', 'TRANSLATION', 'TREAT', 'TRIGGER', 'TRIGGER_CATALOG', + 'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE', + 'UNCOMMITTED', 'UNDER', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', + 'UNNAMED', 'UNNEST', 'UNTIL', 'UPDATE', 'UPPER', 'USAGE', 'USER', + 'USER_DEFINED_TYPE_CATALOG', 'USER_DEFINED_TYPE_NAME', + 'USER_DEFINED_TYPE_SCHEMA', 'USING', 'VACUUM', 'VALID', 'VALIDATOR', 'VALUES', + 'VARIABLE', 'VERBOSE', 'VERSION', 'VIEW', 'VOLATILE', 'WHEN', 'WHENEVER', 'WHERE', + 'WITH', 'WITHOUT', 'WORK', 'WRITE', 'YEAR', 'ZONE'), suffix=r'\b'), + Keyword), + (words(( + 'ARRAY', 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'CHARACTER', 'DATE', + 'DEC', 'DECIMAL', 'FLOAT', 'INT', 'INTEGER', 'INTERVAL', 'NUMBER', 'NUMERIC', 'REAL', + 'SERIAL', 'SMALLINT', 'VARCHAR', 'VARYING', 'INT8', 'SERIAL8', 'TEXT'), suffix=r'\b'), Name.Builtin), (r'[+*/<>=~!@#%^&|`?-]', Operator), (r'[0-9]+', Number.Integer), # TODO: Backslash escapes? (r"'(''|[^'])*'", String.Single), - (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - (r'[;:()\[\],\.]', Punctuation) + (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL + (r'[a-z_][\w$]*', Name), # allow $s in strings for Oracle + (r'[;:()\[\],.]', Punctuation) ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), + (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } @@ -462,10 +471,9 @@ class MySqlLexer(RegexLexer): (r'/\*', Comment.Multiline, 'multiline-comments'), (r'[0-9]+', Number.Integer), (r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float), - # TODO: add backslash escapes - (r"'(''|[^'])*'", String.Single), - (r'"(""|[^"])*"', String.Double), - (r"`(``|[^`])*`", String.Symbol), + (r"'(\\\\|\\'|''|[^'])*'", String.Single), + (r'"(\\\\|\\"|""|[^"])*"', String.Double), + (r"`(\\\\|\\`|``|[^`])*`", String.Symbol), (r'[+*/<>=~!@#%^&|`?-]', Operator), (r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|' r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|' @@ -504,16 +512,16 @@ class MySqlLexer(RegexLexer): # TODO: this list is not complete (r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo), (r'(true|false|null)', Name.Constant), - (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', + (r'([a-z_]\w*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), - (r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable), - (r'[;:()\[\],\.]', Punctuation) + (r'[a-z_]\w*', Name), + (r'@[a-z0-9]*[._]*[a-z0-9]*', Name.Variable), + (r'[;:()\[\],.]', Punctuation) ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), + (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } @@ -523,7 +531,7 @@ class SqliteConsoleLexer(Lexer): """ Lexer for example sessions using sqlite3. - *New in Pygments 0.11.* + .. versionadded:: 0.11 """ name = 'sqlite3con' @@ -557,3 +565,34 @@ class SqliteConsoleLexer(Lexer): for item in do_insertions(insertions, sql.get_tokens_unprocessed(curcode)): yield item + + +class RqlLexer(RegexLexer): + """ + Lexer for Relation Query Language. + + `RQL `_ + + .. versionadded:: 2.0 + """ + name = 'RQL' + aliases = ['rql'] + filenames = ['*.rql'] + mimetypes = ['text/x-rql'] + + flags = re.IGNORECASE + tokens = { + 'root': [ + (r'\s+', Text), + (r'(DELETE|SET|INSERT|UNION|DISTINCT|WITH|WHERE|BEING|OR' + r'|AND|NOT|GROUPBY|HAVING|ORDERBY|ASC|DESC|LIMIT|OFFSET' + r'|TODAY|NOW|TRUE|FALSE|NULL|EXISTS)\b', Keyword), + (r'[+*/<>=%-]', Operator), + (r'(Any|is|instance_of|CWEType|CWRelation)\b', Name.Builtin), + (r'[0-9]+', Number.Integer), + (r'[A-Z_]\w*\??', Name), + (r"'(''|[^'])*'", String.Single), + (r'"(""|[^"])*"', String.Single), + (r'[;:()\[\],.]', Punctuation) + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/tcl.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/tcl.py new file mode 100644 index 0000000..966dc24 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/tcl.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.tcl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for Tcl and related languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number +from pygments.util import shebang_matches + +__all__ = ['TclLexer'] + + +class TclLexer(RegexLexer): + """ + For Tcl source code. + + .. versionadded:: 0.10 + """ + + keyword_cmds_re = words(( + 'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error', + 'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return', + 'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable', + 'vwait', 'while'), prefix=r'\b', suffix=r'\b') + + builtin_cmds_re = words(( + 'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict', + 'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file', + 'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp', + 'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk', + 'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc', + 'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex', + 'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan', + 'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string', + 'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b') + + name = 'Tcl' + aliases = ['tcl'] + filenames = ['*.tcl', '*.rvt'] + mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl'] + + def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""): + return [ + (keyword_cmds_re, Keyword, 'params' + context), + (builtin_cmds_re, Name.Builtin, 'params' + context), + (r'([\w.-]+)', Name.Variable, 'params' + context), + (r'#', Comment, 'comment'), + ] + + tokens = { + 'root': [ + include('command'), + include('basic'), + include('data'), + (r'\}', Keyword), # HACK: somehow we miscounted our braces + ], + 'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re), + 'command-in-brace': _gen_command_rules(keyword_cmds_re, + builtin_cmds_re, + "-in-brace"), + 'command-in-bracket': _gen_command_rules(keyword_cmds_re, + builtin_cmds_re, + "-in-bracket"), + 'command-in-paren': _gen_command_rules(keyword_cmds_re, + builtin_cmds_re, + "-in-paren"), + 'basic': [ + (r'\(', Keyword, 'paren'), + (r'\[', Keyword, 'bracket'), + (r'\{', Keyword, 'brace'), + (r'"', String.Double, 'string'), + (r'(eq|ne|in|ni)\b', Operator.Word), + (r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator), + ], + 'data': [ + (r'\s+', Text), + (r'0x[a-fA-F0-9]+', Number.Hex), + (r'0[0-7]+', Number.Oct), + (r'\d+\.\d+', Number.Float), + (r'\d+', Number.Integer), + (r'\$([\w.:-]+)', Name.Variable), + (r'([\w.:-]+)', Text), + ], + 'params': [ + (r';', Keyword, '#pop'), + (r'\n', Text, '#pop'), + (r'(else|elseif|then)\b', Keyword), + include('basic'), + include('data'), + ], + 'params-in-brace': [ + (r'\}', Keyword, ('#pop', '#pop')), + include('params') + ], + 'params-in-paren': [ + (r'\)', Keyword, ('#pop', '#pop')), + include('params') + ], + 'params-in-bracket': [ + (r'\]', Keyword, ('#pop', '#pop')), + include('params') + ], + 'string': [ + (r'\[', String.Double, 'string-square'), + (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double), + (r'"', String.Double, '#pop') + ], + 'string-square': [ + (r'\[', String.Double, 'string-square'), + (r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double), + (r'\]', String.Double, '#pop') + ], + 'brace': [ + (r'\}', Keyword, '#pop'), + include('command-in-brace'), + include('basic'), + include('data'), + ], + 'paren': [ + (r'\)', Keyword, '#pop'), + include('command-in-paren'), + include('basic'), + include('data'), + ], + 'bracket': [ + (r'\]', Keyword, '#pop'), + include('command-in-bracket'), + include('basic'), + include('data'), + ], + 'comment': [ + (r'.*[^\\]\n', Comment, '#pop'), + (r'.*\\\n', Comment), + ], + } + + def analyse_text(text): + return shebang_matches(text, r'(tcl)') diff --git a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/templates.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/templates.py similarity index 73% rename from plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/templates.py rename to plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/templates.py index 63fc5f3..c153f51 100644 --- a/plugin/packages/wakatime/wakatime/packages/pygments3/pygments/lexers/templates.py +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/templates.py @@ -5,21 +5,24 @@ Lexers for various template engines' markup. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re -from pygments.lexers.web import \ - PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer, LassoLexer -from pygments.lexers.agile import PythonLexer, PerlLexer -from pygments.lexers.compiled import JavaLexer -from pygments.lexers.jvm import TeaLangLexer +from pygments.lexers.html import HtmlLexer, XmlLexer +from pygments.lexers.javascript import JavascriptLexer, LassoLexer +from pygments.lexers.css import CssLexer +from pygments.lexers.php import PhpLexer +from pygments.lexers.python import PythonLexer +from pygments.lexers.perl import PerlLexer +from pygments.lexers.jvm import JavaLexer, TeaLangLexer +from pygments.lexers.data import YamlLexer from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \ - include, using, this -from pygments.token import Error, Punctuation, \ - Text, Comment, Operator, Keyword, Name, String, Number, Other, Token + include, using, this, default, combined +from pygments.token import Error, Punctuation, Whitespace, \ + Text, Comment, Operator, Keyword, Name, String, Number, Other, Token from pygments.util import html_doctype_matches, looks_like_xml __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer', @@ -36,9 +39,12 @@ __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer', 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer', 'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer', - 'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer', - 'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer', - 'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer'] + 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer', + 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer', + 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer', + 'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer', + 'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer', + 'TwigLexer', 'TwigHtmlLexer'] class ErbLexer(Lexer): @@ -59,7 +65,7 @@ class ErbLexer(Lexer): _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M) def __init__(self, **options): - from pygments.lexers.agile import RubyLexer + from pygments.lexers.ruby import RubyLexer self.ruby_lexer = RubyLexer(**options) Lexer.__init__(self, **options) @@ -102,7 +108,7 @@ class ErbLexer(Lexer): data = tokens.pop() r_idx = 0 for r_idx, r_token, r_value in \ - self.ruby_lexer.get_tokens_unprocessed(data): + self.ruby_lexer.get_tokens_unprocessed(data): yield r_idx + idx, r_token, r_value idx += len(data) state = 2 @@ -115,7 +121,7 @@ class ErbLexer(Lexer): yield idx, Comment.Preproc, tag[0] r_idx = 0 for r_idx, r_token, r_value in \ - self.ruby_lexer.get_tokens_unprocessed(tag[1:]): + self.ruby_lexer.get_tokens_unprocessed(tag[1:]): yield idx + 1 + r_idx, r_token, r_value idx += len(tag) state = 0 @@ -159,22 +165,23 @@ class SmartyLexer(RegexLexer): (r'(\{php\})(.*?)(\{/php\})', bygroups(Comment.Preproc, using(PhpLexer, startinline=True), Comment.Preproc)), - (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)', + (r'(\{)(/?[a-zA-Z_]\w*)(\s*)', bygroups(Comment.Preproc, Name.Function, Text), 'smarty'), (r'\{', Comment.Preproc, 'smarty') ], 'smarty': [ (r'\s+', Text), + (r'\{', Comment.Preproc, '#push'), (r'\}', Comment.Preproc, '#pop'), - (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable), - (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable), - (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator), + (r'#[a-zA-Z_]\w*#', Name.Variable), + (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable), + (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator), (r'(true|false|null)\b', Keyword.Constant), (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" r"0[xX][0-9a-fA-F]+[Ll]?", Number), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute) + (r'[a-zA-Z_]\w*', Name.Attribute) ] } @@ -201,11 +208,11 @@ class VelocityLexer(RegexLexer): name = 'Velocity' aliases = ['velocity'] - filenames = ['*.vm','*.fhtml'] + filenames = ['*.vm', '*.fhtml'] flags = re.MULTILINE | re.DOTALL - identifier = r'[a-zA-Z_][a-zA-Z0-9_]*' + identifier = r'[a-zA-Z_]\w*' tokens = { 'root': [ @@ -227,10 +234,10 @@ class VelocityLexer(RegexLexer): (r'(\.)(' + identifier + r')', bygroups(Punctuation, Name.Variable), '#push'), (r'\}', Punctuation, '#pop'), - (r'', Other, '#pop') + default('#pop') ], 'directiveparams': [ - (r'(&&|\|\||==?|!=?|[-<>+*%&\|\^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b', + (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b', Operator), (r'\[', Operator, 'rangeoperator'), (r'\b' + identifier + r'\b', Name.Function), @@ -251,7 +258,9 @@ class VelocityLexer(RegexLexer): (r"\b[0-9]+\b", Number), (r'(true|false|null)\b', Keyword.Constant), (r'\(', Punctuation, '#push'), - (r'\)', Punctuation, '#pop') + (r'\)', Punctuation, '#pop'), + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), ] } @@ -263,39 +272,39 @@ class VelocityLexer(RegexLexer): rv += 0.15 if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text): rv += 0.15 - if re.search(r'\$\{?[a-zA-Z_][a-zA-Z0-9_]*(\([^)]*\))?' - r'(\.[a-zA-Z0-9_]+(\([^)]*\))?)*\}?', text): + if re.search(r'\$\{?[a-zA-Z_]\w*(\([^)]*\))?' + r'(\.\w+(\([^)]*\))?)*\}?', text): rv += 0.01 return rv class VelocityHtmlLexer(DelegatingLexer): """ - Subclass of the `VelocityLexer` that highlights unlexer data + Subclass of the `VelocityLexer` that highlights unlexed data with the `HtmlLexer`. """ name = 'HTML+Velocity' aliases = ['html+velocity'] - alias_filenames = ['*.html','*.fhtml'] + alias_filenames = ['*.html', '*.fhtml'] mimetypes = ['text/html+velocity'] def __init__(self, **options): super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer, - **options) + **options) class VelocityXmlLexer(DelegatingLexer): """ - Subclass of the `VelocityLexer` that highlights unlexer data + Subclass of the `VelocityLexer` that highlights unlexed data with the `XmlLexer`. """ name = 'XML+Velocity' aliases = ['xml+velocity'] - alias_filenames = ['*.xml','*.vm'] + alias_filenames = ['*.xml', '*.vm'] mimetypes = ['application/xml+velocity'] def __init__(self, **options): @@ -305,7 +314,7 @@ class VelocityXmlLexer(DelegatingLexer): def analyse_text(text): rv = VelocityLexer.analyse_text(text) - 0.01 if looks_like_xml(text): - rv += 0.5 + rv += 0.4 return rv @@ -343,25 +352,25 @@ class DjangoLexer(RegexLexer): Text, Comment.Preproc, Text, Keyword, Text, Comment.Preproc)), # filter blocks - (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)', bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), 'block'), - (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(\{%)(-?\s*)([a-zA-Z_]\w*)', bygroups(Comment.Preproc, Text, Keyword), 'block'), (r'\{', Other) ], 'varnames': [ - (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(\|)(\s*)([a-zA-Z_]\w*)', bygroups(Operator, Text, Name.Function)), - (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)', + (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)', bygroups(Keyword, Text, Keyword, Text, Name.Function)), (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo), (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|' r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b', Keyword), (r'(loop|block|super|forloop)\b', Name.Builtin), - (r'[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable), - (r'\.[a-zA-Z0-9_]+', Name.Variable), + (r'[a-zA-Z][\w-]*', Name.Variable), + (r'\.\w+', Name.Variable), (r':?"(\\\\|\\"|[^"])*"', String.Double), (r":?'(\\\\|\\'|[^'])*'", String.Single), (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator), @@ -397,7 +406,7 @@ class MyghtyLexer(RegexLexer): Generic `myghty templates`_ lexer. Code that isn't Myghty markup is yielded as `Token.Other`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 .. _myghty templates: http://www.myghty.org/ """ @@ -442,10 +451,10 @@ class MyghtyLexer(RegexLexer): class MyghtyHtmlLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `HtmlLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'HTML+Myghty' @@ -459,10 +468,10 @@ class MyghtyHtmlLexer(DelegatingLexer): class MyghtyXmlLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `XmlLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'XML+Myghty' @@ -476,10 +485,10 @@ class MyghtyXmlLexer(DelegatingLexer): class MyghtyJavascriptLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `JavascriptLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'JavaScript+Myghty' @@ -495,10 +504,10 @@ class MyghtyJavascriptLexer(DelegatingLexer): class MyghtyCssLexer(DelegatingLexer): """ - Subclass of the `MyghtyLexer` that highlights unlexer data + Subclass of the `MyghtyLexer` that highlights unlexed data with the `CssLexer`. - *New in Pygments 0.6.* + .. versionadded:: 0.6 """ name = 'CSS+Myghty' @@ -517,7 +526,7 @@ class MasonLexer(RegexLexer): .. _mason templates: http://www.masonhq.com/ - *New in Pygments 1.4.* + .. versionadded:: 1.4 """ name = 'Mason' aliases = ['mason'] @@ -570,7 +579,7 @@ class MakoLexer(RegexLexer): Generic `mako templates`_ lexer. Code that isn't Mako markup is yielded as `Token.Other`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 .. _mako templates: http://www.makotemplates.org/ """ @@ -589,11 +598,11 @@ class MakoLexer(RegexLexer): (r'(\s*)(##[^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, Other)), (r'(?s)<%doc>.*?', Comment.Preproc), - (r'(<%)([\w\.\:]+)', + (r'(<%)([\w.:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'), - (r'()', + (r'()', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), - (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'), + (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'), (r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(\$\{)(.*?)(\})', @@ -638,7 +647,7 @@ class MakoHtmlLexer(DelegatingLexer): Subclass of the `MakoLexer` that highlights unlexed data with the `HtmlLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'HTML+Mako' @@ -647,14 +656,15 @@ class MakoHtmlLexer(DelegatingLexer): def __init__(self, **options): super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, - **options) + **options) + class MakoXmlLexer(DelegatingLexer): """ - Subclass of the `MakoLexer` that highlights unlexer data + Subclass of the `MakoLexer` that highlights unlexed data with the `XmlLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'XML+Mako' @@ -663,14 +673,15 @@ class MakoXmlLexer(DelegatingLexer): def __init__(self, **options): super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, - **options) + **options) + class MakoJavascriptLexer(DelegatingLexer): """ - Subclass of the `MakoLexer` that highlights unlexer data + Subclass of the `MakoLexer` that highlights unlexed data with the `JavascriptLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'JavaScript+Mako' @@ -681,14 +692,15 @@ class MakoJavascriptLexer(DelegatingLexer): def __init__(self, **options): super(MakoJavascriptLexer, self).__init__(JavascriptLexer, - MakoLexer, **options) + MakoLexer, **options) + class MakoCssLexer(DelegatingLexer): """ - Subclass of the `MakoLexer` that highlights unlexer data + Subclass of the `MakoLexer` that highlights unlexed data with the `CssLexer`. - *New in Pygments 0.7.* + .. versionadded:: 0.7 """ name = 'CSS+Mako' @@ -697,7 +709,7 @@ class MakoCssLexer(DelegatingLexer): def __init__(self, **options): super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, - **options) + **options) # Genshi and Cheetah lexers courtesy of Matt Good. @@ -741,7 +753,7 @@ class CheetahLexer(RegexLexer): (bygroups(Comment.Preproc, using(CheetahPythonLexer), Comment.Preproc))), # TODO support other Python syntax like $foo['bar'] - (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])', + (r'(\$)([a-zA-Z_][\w.]*\w)', bygroups(Comment.Preproc, using(CheetahPythonLexer))), (r'(\$\{!?)(.*?)(\})(?s)', bygroups(Comment.Preproc, using(CheetahPythonLexer), @@ -749,7 +761,7 @@ class CheetahLexer(RegexLexer): (r'''(?sx) (.+?) # anything, followed by: (?: - (?=[#][#a-zA-Z]*) | # an eval comment + (?=\#[#a-zA-Z]*) | # an eval comment (?=\$[a-zA-Z_{]) | # a substitution \Z # end of string ) @@ -761,7 +773,7 @@ class CheetahLexer(RegexLexer): class CheetahHtmlLexer(DelegatingLexer): """ - Subclass of the `CheetahLexer` that highlights unlexer data + Subclass of the `CheetahLexer` that highlights unlexed data with the `HtmlLexer`. """ @@ -776,7 +788,7 @@ class CheetahHtmlLexer(DelegatingLexer): class CheetahXmlLexer(DelegatingLexer): """ - Subclass of the `CheetahLexer` that highlights unlexer data + Subclass of the `CheetahLexer` that highlights unlexed data with the `XmlLexer`. """ @@ -791,7 +803,7 @@ class CheetahXmlLexer(DelegatingLexer): class CheetahJavascriptLexer(DelegatingLexer): """ - Subclass of the `CheetahLexer` that highlights unlexer data + Subclass of the `CheetahLexer` that highlights unlexed data with the `JavascriptLexer`. """ @@ -822,11 +834,11 @@ class GenshiTextLexer(RegexLexer): tokens = { 'root': [ - (r'[^#\$\s]+', Other), + (r'[^#$\s]+', Other), (r'^(\s*)(##.*)$', bygroups(Text, Comment)), (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'), include('variable'), - (r'[#\$\s]', Other), + (r'[#$\s]', Other), ], 'directive': [ (r'\n', Text, '#pop'), @@ -839,7 +851,7 @@ class GenshiTextLexer(RegexLexer): 'variable': [ (r'(?)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), # yield style and script blocks as Other @@ -863,11 +875,11 @@ class GenshiMarkupLexer(RegexLexer): (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'), (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'), include('variable'), - (r'[<\$]', Other), + (r'[<$]', Other), ], 'pytag': [ (r'\s+', Text), - (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'), + (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'), (r'/?\s*>', Name.Tag, '#pop'), ], 'pyattr': [ @@ -877,8 +889,8 @@ class GenshiMarkupLexer(RegexLexer): ], 'tag': [ (r'\s+', Text), - (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'), - (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'), + (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'), + (r'[\w:-]+\s*=', Name.Attribute, 'attr'), (r'/?\s*>', Name.Tag, '#pop'), ], 'attr': [ @@ -903,7 +915,7 @@ class GenshiMarkupLexer(RegexLexer): 'variable': [ (r'(?=|<|>', Operator), + (r'<=|>=|<|>|==', Operator), (r'mod\b', Operator), (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator), (r'\|\||&&', Operator), + (r'\?', Operator), (r'"', String.Double, 'string'), # There is a special rule for allowing html in single quoted # strings, evidently. (r"'.*?'", String.Single), (r'\d+', Number), - (r'(if|else|len|var|case|default|break|switch)\b', Keyword), - (r'([A-Za-z_$][A-Za-z0-9_.]*)(\s*)(\()', + (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|' + r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|' + r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword), + (r'(true|false|null)\b', Keyword.Constant), + (r'(application|session|client|cookie|super|this|variables|arguments)\b', + Name.Constant), + (r'([a-z_$][\w.]*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), - (r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable), + (r'[a-z_$][\w.]*', Name.Variable), (r'[()\[\]{};:,.\\]', Punctuation), (r'\s+', Text), ], @@ -1525,7 +1547,7 @@ class ColdfusionMarkupLexer(RegexLexer): (r'<[^<>]*', Other), ], 'tags': [ - (r'(?s)', Comment.Multiline), + (r'', Comment), (r'', Name.Builtin, 'cfoutput'), (r'(?s)()(.+?)()', @@ -1541,12 +1563,17 @@ class ColdfusionMarkupLexer(RegexLexer): (r'[^#<]+', Other), (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer), Punctuation)), - #(r'', Name.Builtin, '#push'), + # (r'', Name.Builtin, '#push'), (r'', Name.Builtin, '#pop'), include('tags'), (r'(?s)<[^<>]*', Other), (r'#', Other), ], + 'cfcomment': [ + (r'', Comment.Multiline, '#pop'), + (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline), + ], } @@ -1556,7 +1583,7 @@ class ColdfusionHtmlLexer(DelegatingLexer): """ name = 'Coldfusion HTML' aliases = ['cfm'] - filenames = ['*.cfm', '*.cfml', '*.cfc'] + filenames = ['*.cfm', '*.cfml'] mimetypes = ['application/x-coldfusion'] def __init__(self, **options): @@ -1564,11 +1591,27 @@ class ColdfusionHtmlLexer(DelegatingLexer): **options) +class ColdfusionCFCLexer(DelegatingLexer): + """ + Coldfusion markup/script components + + .. versionadded:: 2.0 + """ + name = 'Coldfusion CFC' + aliases = ['cfc'] + filenames = ['*.cfc'] + mimetypes = [] + + def __init__(self, **options): + super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer, + **options) + + class SspLexer(DelegatingLexer): """ Lexer for Scalate Server Pages. - *New in Pygments 1.4.* + .. versionadded:: 1.4 """ name = 'Scalate Server Page' aliases = ['ssp'] @@ -1594,7 +1637,7 @@ class TeaTemplateRootLexer(RegexLexer): Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of code blocks. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ tokens = { @@ -1602,20 +1645,20 @@ class TeaTemplateRootLexer(RegexLexer): (r'<%\S?', Keyword, 'sec'), (r'[^<]+', Other), (r'<', Other), - ], + ], 'sec': [ (r'%>', Keyword, '#pop'), # note: '\w\W' != '.' without DOTALL. (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)), - ], - } + ], + } class TeaTemplateLexer(DelegatingLexer): """ Lexer for `Tea Templates `_. - *New in Pygments 1.5.* + .. versionadded:: 1.5 """ name = 'Tea' aliases = ['tea'] @@ -1642,7 +1685,7 @@ class LassoHtmlLexer(DelegatingLexer): Nested JavaScript and CSS is also highlighted. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'HTML+Lasso' @@ -1658,9 +1701,7 @@ class LassoHtmlLexer(DelegatingLexer): def analyse_text(text): rv = LassoLexer.analyse_text(text) - 0.01 - if re.search(r'<\w+>', text, re.I): - rv += 0.2 - if html_doctype_matches(text): + if html_doctype_matches(text): # same as HTML lexer rv += 0.5 return rv @@ -1670,7 +1711,7 @@ class LassoXmlLexer(DelegatingLexer): Subclass of the `LassoLexer` which highlights unhandled data with the `XmlLexer`. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'XML+Lasso' @@ -1694,7 +1735,7 @@ class LassoCssLexer(DelegatingLexer): Subclass of the `LassoLexer` which highlights unhandled data with the `CssLexer`. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'CSS+Lasso' @@ -1720,7 +1761,7 @@ class LassoJavascriptLexer(DelegatingLexer): Subclass of the `LassoLexer` which highlights unhandled data with the `JavascriptLexer`. - *New in Pygments 1.6.* + .. versionadded:: 1.6 """ name = 'JavaScript+Lasso' @@ -1740,3 +1781,394 @@ class LassoJavascriptLexer(DelegatingLexer): if 'function' in text: rv += 0.2 return rv + + +class HandlebarsLexer(RegexLexer): + """ + Generic `handlebars ` template lexer. + + Highlights only the Handlebars template tags (stuff between `{{` and `}}`). + Everything else is left for a delegating lexer. + + .. versionadded:: 2.0 + """ + + name = "Handlebars" + aliases = ['handlebars'] + + tokens = { + 'root': [ + (r'[^{]+', Other), + + (r'\{\{!.*\}\}', Comment), + + (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'), + (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'), + ], + + 'tag': [ + (r'\s+', Text), + (r'\}\}\}', Comment.Special, '#pop'), + (r'\}\}', Comment.Preproc, '#pop'), + + # Handlebars + (r'([#/]*)(each|if|unless|else|with|log|in)', bygroups(Keyword, + Keyword)), + + # General {{#block}} + (r'([#/])([\w-]+)', bygroups(Name.Function, Name.Function)), + + # {{opt=something}} + (r'([\w-]+)(=)', bygroups(Name.Attribute, Operator)), + + # borrowed from DjangoLexer + (r':?"(\\\\|\\"|[^"])*"', String.Double), + (r":?'(\\\\|\\'|[^'])*'", String.Single), + (r'[a-zA-Z][\w-]*', Name.Variable), + (r'\.[\w-]+', Name.Variable), + (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" + r"0[xX][0-9a-fA-F]+[Ll]?", Number), + ] + } + + +class HandlebarsHtmlLexer(DelegatingLexer): + """ + Subclass of the `HandlebarsLexer` that highlights unlexed data with the + `HtmlLexer`. + + .. versionadded:: 2.0 + """ + + name = "HTML+Handlebars" + aliases = ["html+handlebars"] + filenames = ['*.handlebars', '*.hbs'] + mimetypes = ['text/html+handlebars', 'text/x-handlebars-template'] + + def __init__(self, **options): + super(HandlebarsHtmlLexer, self).__init__(HtmlLexer, HandlebarsLexer, **options) + + +class YamlJinjaLexer(DelegatingLexer): + """ + Subclass of the `DjangoLexer` that highighlights unlexed data with the + `YamlLexer`. + + Commonly used in Saltstack salt states. + + .. versionadded:: 2.0 + """ + + name = 'YAML+Jinja' + aliases = ['yaml+jinja', 'salt', 'sls'] + filenames = ['*.sls'] + mimetypes = ['text/x-yaml+jinja', 'text/x-sls'] + + def __init__(self, **options): + super(YamlJinjaLexer, self).__init__(YamlLexer, DjangoLexer, **options) + + +class LiquidLexer(RegexLexer): + """ + Lexer for `Liquid templates + `_. + + .. versionadded:: 2.0 + """ + name = 'liquid' + aliases = ['liquid'] + filenames = ['*.liquid'] + + tokens = { + 'root': [ + (r'[^{]+', Text), + # tags and block tags + (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'), + # output tags + (r'(\{\{)(\s*)([^\s}]+)', + bygroups(Punctuation, Whitespace, using(this, state = 'generic')), + 'output'), + (r'\{', Text) + ], + + 'tag-or-block': [ + # builtin logic blocks + (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'), + (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace), + combined('end-of-block', 'whitespace', 'generic')), + (r'(else)(\s*)(%\})', + bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'), + + # other builtin blocks + (r'(capture)(\s+)([^\s%]+)(\s*)(%\})', + bygroups(Name.Tag, Whitespace, using(this, state = 'variable'), + Whitespace, Punctuation), '#pop'), + (r'(comment)(\s*)(%\})', + bygroups(Name.Tag, Whitespace, Punctuation), 'comment'), + (r'(raw)(\s*)(%\})', + bygroups(Name.Tag, Whitespace, Punctuation), 'raw'), + + # end of block + (r'(end(case|unless|if))(\s*)(%\})', + bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'), + (r'(end([^\s%]+))(\s*)(%\})', + bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'), + + # builtin tags (assign and include are handled together with usual tags) + (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)', + bygroups(Name.Tag, Whitespace, + using(this, state='generic'), Punctuation, Whitespace), + 'variable-tag-markup'), + + # other tags or blocks + (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup') + ], + + 'output': [ + include('whitespace'), + ('\}\}', Punctuation, '#pop'), # end of output + + (r'\|', Punctuation, 'filters') + ], + + 'filters': [ + include('whitespace'), + (r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output + + (r'([^\s|:]+)(:?)(\s*)', + bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup') + ], + + 'filter-markup': [ + (r'\|', Punctuation, '#pop'), + include('end-of-tag'), + include('default-param-markup') + ], + + 'condition': [ + include('end-of-block'), + include('whitespace'), + + (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})', + bygroups(using(this, state = 'generic'), Whitespace, Operator, + Whitespace, using(this, state = 'generic'), Whitespace, + Punctuation)), + (r'\b!', Operator), + (r'\bnot\b', Operator.Word), + (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)', + bygroups(using(this, state = 'generic'), Whitespace, Operator.Word, + Whitespace, using(this, state = 'generic'))), + + include('generic'), + include('whitespace') + ], + + 'generic-value': [ + include('generic'), + include('end-at-whitespace') + ], + + 'operator': [ + (r'(\s*)((=|!|>|<)=?)(\s*)', + bygroups(Whitespace, Operator, None, Whitespace), '#pop'), + (r'(\s*)(\bcontains\b)(\s*)', + bygroups(Whitespace, Operator.Word, Whitespace), '#pop'), + ], + + 'end-of-tag': [ + (r'\}\}', Punctuation, '#pop') + ], + + 'end-of-block': [ + (r'%\}', Punctuation, ('#pop', '#pop')) + ], + + 'end-at-whitespace': [ + (r'\s+', Whitespace, '#pop') + ], + + # states for unknown markup + 'param-markup': [ + include('whitespace'), + # params with colons or equals + (r'([^\s=:]+)(\s*)(=|:)', + bygroups(Name.Attribute, Whitespace, Operator)), + # explicit variables + (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})', + bygroups(Punctuation, Whitespace, using(this, state = 'variable'), + Whitespace, Punctuation)), + + include('string'), + include('number'), + include('keyword'), + (r',', Punctuation) + ], + + 'default-param-markup': [ + include('param-markup'), + (r'.', Text) # fallback for switches / variables / un-quoted strings / ... + ], + + 'variable-param-markup': [ + include('param-markup'), + include('variable'), + (r'.', Text) # fallback + ], + + 'tag-markup': [ + (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag + include('default-param-markup') + ], + + 'variable-tag-markup': [ + (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag + include('variable-param-markup') + ], + + # states for different values types + 'keyword': [ + (r'\b(false|true)\b', Keyword.Constant) + ], + + 'variable': [ + (r'[a-zA-Z_]\w*', Name.Variable), + (r'(?<=\w)\.(?=\w)', Punctuation) + ], + + 'string': [ + (r"'[^']*'", String.Single), + (r'"[^"]*"', String.Double) + ], + + 'number': [ + (r'\d+\.\d+', Number.Float), + (r'\d+', Number.Integer) + ], + + 'generic': [ # decides for variable, string, keyword or number + include('keyword'), + include('string'), + include('number'), + include('variable') + ], + + 'whitespace': [ + (r'[ \t]+', Whitespace) + ], + + # states for builtin blocks + 'comment': [ + (r'(\{%)(\s*)(endcomment)(\s*)(%\})', + bygroups(Punctuation, Whitespace, Name.Tag, Whitespace, + Punctuation), ('#pop', '#pop')), + (r'.', Comment) + ], + + 'raw': [ + (r'[^{]+', Text), + (r'(\{%)(\s*)(endraw)(\s*)(%\})', + bygroups(Punctuation, Whitespace, Name.Tag, Whitespace, + Punctuation), '#pop'), + (r'\{', Text) + ], + } + + +class TwigLexer(RegexLexer): + """ + `Twig `_ template lexer. + + It just highlights Twig code between the preprocessor directives, + other data is left untouched by the lexer. + + .. versionadded:: 2.0 + """ + + name = 'Twig' + aliases = ['twig'] + mimetypes = ['application/x-twig'] + + flags = re.M | re.S + + # Note that a backslash is included in the following two patterns + # PHP uses a backslash as a namespace separator + _ident_char = r'[\\\w-]|[^\x00-\x7f]' + _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])' + _ident_end = r'(?:' + _ident_char + ')*' + _ident_inner = _ident_begin + _ident_end + + tokens = { + 'root': [ + (r'[^{]+', Other), + (r'\{\{', Comment.Preproc, 'var'), + # twig comments + (r'\{\#.*?\#\}', Comment), + # raw twig blocks + (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)' + r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})', + bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, + Other, Comment.Preproc, Text, Keyword, Text, + Comment.Preproc)), + (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)' + r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})', + bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, + Other, Comment.Preproc, Text, Keyword, Text, + Comment.Preproc)), + # filter blocks + (r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner, + bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), + 'tag'), + (r'(\{%)(-?\s*)([a-zA-Z_]\w*)', + bygroups(Comment.Preproc, Text, Keyword), 'tag'), + (r'\{', Other), + ], + 'varnames': [ + (r'(\|)(\s*)(%s)' % _ident_inner, + bygroups(Operator, Text, Name.Function)), + (r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner, + bygroups(Keyword, Text, Keyword, Text, Name.Function)), + (r'(?i)(true|false|none|null)\b', Keyword.Pseudo), + (r'(in|not|and|b-and|or|b-or|b-xor|is' + r'if|elseif|else|import' + r'constant|defined|divisibleby|empty|even|iterable|odd|sameas' + r'matches|starts\s+with|ends\s+with)\b', + Keyword), + (r'(loop|block|parent)\b', Name.Builtin), + (_ident_inner, Name.Variable), + (r'\.' + _ident_inner, Name.Variable), + (r'\.[0-9]+', Number), + (r':?"(\\\\|\\"|[^"])*"', String.Double), + (r":?'(\\\\|\\'|[^'])*'", String.Single), + (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator), + (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" + r"0[xX][0-9a-fA-F]+[Ll]?", Number), + ], + 'var': [ + (r'\s+', Text), + (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'), + include('varnames') + ], + 'tag': [ + (r'\s+', Text), + (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'), + include('varnames'), + (r'.', Punctuation), + ], + } + + +class TwigHtmlLexer(DelegatingLexer): + """ + Subclass of the `TwigLexer` that highlights unlexed data with the + `HtmlLexer`. + + .. versionadded:: 2.0 + """ + + name = "HTML+Twig" + aliases = ["html+twig"] + filenames = ['*.twig'] + mimetypes = ['text/html+twig'] + + def __init__(self, **options): + super(TwigHtmlLexer, self).__init__(HtmlLexer, TwigLexer, **options) diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/testing.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/testing.py new file mode 100644 index 0000000..07e3cb3 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/testing.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.testing + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for testing languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Comment, Keyword, Name, String + +__all__ = ['GherkinLexer'] + + +class GherkinLexer(RegexLexer): + """ + For `Gherkin ` syntax. + + .. versionadded:: 1.2 + """ + name = 'Gherkin' + aliases = ['cucumber', 'gherkin'] + filenames = ['*.feature'] + mimetypes = ['text/x-gherkin'] + + feature_keywords = u'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$' + feature_element_keywords = u'^(\\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$' + examples_keywords = u'^(\\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$' + step_keywords = u'^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )' + + tokens = { + 'comments': [ + (r'^\s*#.*$', Comment), + ], + 'feature_elements': [ + (step_keywords, Keyword, "step_content_stack"), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'feature_elements_on_stack': [ + (step_keywords, Keyword, "#pop:2"), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'examples_table': [ + (r"\s+\|", Keyword, 'examples_table_header'), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'examples_table_header': [ + (r"\s+\|\s*$", Keyword, "#pop:2"), + include('comments'), + (r"\\\|", Name.Variable), + (r"\s*\|", Keyword), + (r"[^|]", Name.Variable), + ], + 'scenario_sections_on_stack': [ + (feature_element_keywords, + bygroups(Name.Function, Keyword, Keyword, Name.Function), + "feature_elements_on_stack"), + ], + 'narrative': [ + include('scenario_sections_on_stack'), + include('comments'), + (r"(\s|.)", Name.Function), + ], + 'table_vars': [ + (r'(<[^>]+>)', Name.Variable), + ], + 'numbers': [ + (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String), + ], + 'string': [ + include('table_vars'), + (r'(\s|.)', String), + ], + 'py_string': [ + (r'"""', Keyword, "#pop"), + include('string'), + ], + 'step_content_root': [ + (r"$", Keyword, "#pop"), + include('step_content'), + ], + 'step_content_stack': [ + (r"$", Keyword, "#pop:2"), + include('step_content'), + ], + 'step_content': [ + (r'"', Name.Function, "double_string"), + include('table_vars'), + include('numbers'), + include('comments'), + (r'(\s|.)', Name.Function), + ], + 'table_content': [ + (r"\s+\|\s*$", Keyword, "#pop"), + include('comments'), + (r"\\\|", String), + (r"\s*\|", Keyword), + include('string'), + ], + 'double_string': [ + (r'"', Name.Function, "#pop"), + include('string'), + ], + 'root': [ + (r'\n', Name.Function), + include('comments'), + (r'"""', Keyword, "py_string"), + (r'\s+\|', Keyword, 'table_content'), + (r'"', Name.Function, "double_string"), + include('table_vars'), + include('numbers'), + (r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)), + (step_keywords, bygroups(Name.Function, Keyword), + 'step_content_root'), + (feature_keywords, bygroups(Keyword, Keyword, Name.Function), + 'narrative'), + (feature_element_keywords, + bygroups(Name.Function, Keyword, Keyword, Name.Function), + 'feature_elements'), + (examples_keywords, + bygroups(Name.Function, Keyword, Keyword, Name.Function), + 'examples_table'), + (r'(\s|.)', Name.Function), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/text.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/text.py new file mode 100644 index 0000000..3e543af --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/text.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.text + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for non-source code file types. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.configs import ApacheConfLexer, NginxConfLexer, \ + SquidConfLexer, LighttpdConfLexer, IniLexer, RegeditLexer, PropertiesLexer +from pygments.lexers.console import PyPyLogLexer +from pygments.lexers.textedit import VimLexer +from pygments.lexers.markup import BBCodeLexer, MoinWikiLexer, RstLexer, \ + TexLexer, GroffLexer +from pygments.lexers.installers import DebianControlLexer, SourcesListLexer +from pygments.lexers.make import MakefileLexer, BaseMakefileLexer, CMakeLexer +from pygments.lexers.haxe import HxmlLexer +from pygments.lexers.diff import DiffLexer, DarcsPatchLexer +from pygments.lexers.data import YamlLexer +from pygments.lexers.textfmts import IrcLogsLexer, GettextLexer, HttpLexer + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/textedit.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/textedit.py new file mode 100644 index 0000000..af6f02b --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/textedit.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.textedit + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for languages related to text processing. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from bisect import bisect + +from pygments.lexer import RegexLexer, include, default, bygroups, using, this +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +from pygments.lexers.python import PythonLexer + +__all__ = ['AwkLexer', 'VimLexer'] + + +class AwkLexer(RegexLexer): + """ + For Awk scripts. + + .. versionadded:: 1.5 + """ + + name = 'Awk' + aliases = ['awk', 'gawk', 'mawk', 'nawk'] + filenames = ['*.awk'] + mimetypes = ['application/x-awk'] + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'#.*$', Comment.Single) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'\B', String.Regex, '#pop'), + (r'(?=/)', Text, ('#pop', 'badregex')), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + (r'^(?=\s|/)', Text, 'slashstartsregex'), + include('commentsandwhitespace'), + (r'\+\+|--|\|\||&&|in\b|\$|!?~|' + r'(\*\*|[-<>+*%\^/!=|])=?', Operator, 'slashstartsregex'), + (r'[{(\[;,]', Punctuation, 'slashstartsregex'), + (r'[})\].]', Punctuation), + (r'(break|continue|do|while|exit|for|if|else|' + r'return)\b', Keyword, 'slashstartsregex'), + (r'function\b', Keyword.Declaration, 'slashstartsregex'), + (r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|' + r'length|match|split|sprintf|sub|substr|tolower|toupper|close|' + r'fflush|getline|next|nextfile|print|printf|strftime|systime|' + r'delete|system)\b', Keyword.Reserved), + (r'(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|' + r'FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|' + r'RSTART|RT|SUBSEP)\b', Name.Builtin), + (r'[$a-zA-Z_]\w*', Name.Other), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + ] + } + + +class VimLexer(RegexLexer): + """ + Lexer for VimL script files. + + .. versionadded:: 0.8 + """ + name = 'VimL' + aliases = ['vim'] + filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc', + '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'] + mimetypes = ['text/x-vim'] + flags = re.MULTILINE + + _python = r'py(?:t(?:h(?:o(?:n)?)?)?)?' + + tokens = { + 'root': [ + (r'^([ \t:]*)(' + _python + r')([ \t]*)(<<)([ \t]*)(.*)((?:\n|.)*)(\6)', + bygroups(using(this), Keyword, Text, Operator, Text, Text, + using(PythonLexer), Text)), + (r'^([ \t:]*)(' + _python + r')([ \t])(.*)', + bygroups(using(this), Keyword, Text, using(PythonLexer))), + + (r'^\s*".*', Comment), + + (r'[ \t]+', Text), + # TODO: regexes can have other delims + (r'/(\\\\|\\/|[^\n/])*/', String.Regex), + (r'"(\\\\|\\"|[^\n"])*"', String.Double), + (r"'(''|[^\n'])*'", String.Single), + + # Who decided that doublequote was a good comment character?? + (r'(?<=\s)"[^\-:.%#=*].*', Comment), + (r'-?\d+', Number), + (r'#[0-9a-f]{6}', Number.Hex), + (r'^:', Punctuation), + (r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent. + (r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b', + Keyword), + (r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin), + (r'\b\w+\b', Name.Other), # These are postprocessed below + (r'.', Text), + ], + } + + def __init__(self, **options): + from pygments.lexers._vim_builtins import command, option, auto + self._cmd = command + self._opt = option + self._aut = auto + + RegexLexer.__init__(self, **options) + + def is_in(self, w, mapping): + r""" + It's kind of difficult to decide if something might be a keyword + in VimL because it allows you to abbreviate them. In fact, + 'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are + valid ways to call it so rather than making really awful regexps + like:: + + \bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b + + we match `\b\w+\b` and then call is_in() on those tokens. See + `scripts/get_vimkw.py` for how the lists are extracted. + """ + p = bisect(mapping, (w,)) + if p > 0: + if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \ + mapping[p-1][1][:len(w)] == w: + return True + if p < len(mapping): + return mapping[p][0] == w[:len(mapping[p][0])] and \ + mapping[p][1][:len(w)] == w + return False + + def get_tokens_unprocessed(self, text): + # TODO: builtins are only subsequent tokens on lines + # and 'keywords' only happen at the beginning except + # for :au ones + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name.Other: + if self.is_in(value, self._cmd): + yield index, Keyword, value + elif self.is_in(value, self._opt) or \ + self.is_in(value, self._aut): + yield index, Name.Builtin, value + else: + yield index, Text, value + else: + yield index, token, value diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/textfmts.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/textfmts.py new file mode 100644 index 0000000..189d334 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/textfmts.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.textfmts + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for various text formats. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Generic, Literal +from pygments.util import ClassNotFound + +__all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer'] + + +class IrcLogsLexer(RegexLexer): + """ + Lexer for IRC logs in *irssi*, *xchat* or *weechat* style. + """ + + name = 'IRC logs' + aliases = ['irc'] + filenames = ['*.weechatlog'] + mimetypes = ['text/x-irclog'] + + flags = re.VERBOSE | re.MULTILINE + timestamp = r""" + ( + # irssi / xchat and others + (?: \[|\()? # Opening bracket or paren for the timestamp + (?: # Timestamp + (?: (?:\d{1,4} [-/])* # Date as - or /-separated groups of digits + (?:\d{1,4}) + [T ])? # Date/time separator: T or space + (?: \d?\d [:.])* # Time as :/.-separated groups of 1 or 2 digits + (?: \d?\d [:.]) + ) + (?: \]|\))?\s+ # Closing bracket or paren for the timestamp + | + # weechat + \d{4}\s\w{3}\s\d{2}\s # Date + \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace + | + # xchat + \w{3}\s\d{2}\s # Date + \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace + )? + """ + tokens = { + 'root': [ + # log start/end + (r'^\*\*\*\*(.*)\*\*\*\*$', Comment), + # hack + ("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)), + # normal msgs + ("^" + timestamp + r""" + (\s*<.*?>\s*) # Nick """, + bygroups(Comment.Preproc, Name.Tag), 'msg'), + # /me msgs + ("^" + timestamp + r""" + (\s*[*]\s+) # Star + (\S+\s+.*?\n) # Nick + rest of message """, + bygroups(Comment.Preproc, Keyword, Generic.Inserted)), + # join/part msgs + ("^" + timestamp + r""" + (\s*(?:\*{3}|?)\s*) # Star(s) or symbols + (\S+\s+) # Nick + Space + (.*?\n) # Rest of message """, + bygroups(Comment.Preproc, Keyword, String, Comment)), + (r"^.*?\n", Text), + ], + 'msg': [ + (r"\S+:(?!//)", Name.Attribute), # Prefix + (r".*\n", Text, '#pop'), + ], + } + + +class GettextLexer(RegexLexer): + """ + Lexer for Gettext catalog files. + + .. versionadded:: 0.9 + """ + name = 'Gettext Catalog' + aliases = ['pot', 'po'] + filenames = ['*.pot', '*.po'] + mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext'] + + tokens = { + 'root': [ + (r'^#,\s.*?$', Keyword.Type), + (r'^#:\s.*?$', Keyword.Declaration), + # (r'^#$', Comment), + (r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single), + (r'^(")([A-Za-z-]+:)(.*")$', + bygroups(String, Name.Property, String)), + (r'^".*"$', String), + (r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$', + bygroups(Name.Variable, Text, String)), + (r'^(msgstr\[)(\d)(\])(\s+)(".*")$', + bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)), + ] + } + + +class HttpLexer(RegexLexer): + """ + Lexer for HTTP sessions. + + .. versionadded:: 1.5 + """ + + name = 'HTTP' + aliases = ['http'] + + flags = re.DOTALL + + def header_callback(self, match): + if match.group(1).lower() == 'content-type': + content_type = match.group(5).strip() + if ';' in content_type: + content_type = content_type[:content_type.find(';')].strip() + self.content_type = content_type + yield match.start(1), Name.Attribute, match.group(1) + yield match.start(2), Text, match.group(2) + yield match.start(3), Operator, match.group(3) + yield match.start(4), Text, match.group(4) + yield match.start(5), Literal, match.group(5) + yield match.start(6), Text, match.group(6) + + def continuous_header_callback(self, match): + yield match.start(1), Text, match.group(1) + yield match.start(2), Literal, match.group(2) + yield match.start(3), Text, match.group(3) + + def content_callback(self, match): + content_type = getattr(self, 'content_type', None) + content = match.group() + offset = match.start() + if content_type: + from pygments.lexers import get_lexer_for_mimetype + possible_lexer_mimetypes = [content_type] + if '+' in content_type: + # application/calendar+xml can be treated as application/xml + # if there's not a better match. + general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2', + content_type) + possible_lexer_mimetypes.append(general_type) + + for i in possible_lexer_mimetypes: + try: + lexer = get_lexer_for_mimetype(i) + except ClassNotFound: + pass + else: + for idx, token, value in lexer.get_tokens_unprocessed(content): + yield offset + idx, token, value + return + yield offset, Text, content + + tokens = { + 'root': [ + (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)' + r'(HTTP)(/)(1\.[01])(\r?\n|\Z)', + bygroups(Name.Function, Text, Name.Namespace, Text, + Keyword.Reserved, Operator, Number, Text), + 'headers'), + (r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|\Z)', + bygroups(Keyword.Reserved, Operator, Number, Text, Number, + Text, Name.Exception, Text), + 'headers'), + ], + 'headers': [ + (r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)', header_callback), + (r'([\t ]+)([^\r\n]+)(\r?\n|\Z)', continuous_header_callback), + (r'\r?\n', Text, 'content') + ], + 'content': [ + (r'.+', content_callback) + ] + } + + def analyse_text(text): + return text.startswith(('GET /', 'POST /', 'PUT /', 'DELETE /', 'HEAD /', + 'OPTIONS /', 'TRACE /', 'PATCH /')) + + +class TodotxtLexer(RegexLexer): + """ + Lexer for `Todo.txt `_ todo list format. + + .. versionadded:: 2.0 + """ + + name = 'Todotxt' + aliases = ['todotxt'] + # *.todotxt is not a standard extension for Todo.txt files; including it + # makes testing easier, and also makes autodetecting file type easier. + filenames = ['todo.txt', '*.todotxt'] + mimetypes = ['text/x-todo'] + + # Aliases mapping standard token types of Todo.txt format concepts + CompleteTaskText = Operator # Chosen to de-emphasize complete tasks + IncompleteTaskText = Text # Incomplete tasks should look like plain text + + # Priority should have most emphasis to indicate importance of tasks + Priority = Generic.Heading + # Dates should have next most emphasis because time is important + Date = Generic.Subheading + + # Project and context should have equal weight, and be in different colors + Project = Generic.Error + Context = String + + # If tag functionality is added, it should have the same weight as Project + # and Context, and a different color. Generic.Traceback would work well. + + # Regex patterns for building up rules; dates, priorities, projects, and + # contexts are all atomic + # TODO: Make date regex more ISO 8601 compliant + date_regex = r'\d{4,}-\d{2}-\d{2}' + priority_regex = r'\([A-Z]\)' + project_regex = r'\+\S+' + context_regex = r'@\S+' + + # Compound regex expressions + complete_one_date_regex = r'(x )(' + date_regex + r')' + complete_two_date_regex = (complete_one_date_regex + r'( )(' + + date_regex + r')') + priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')' + + tokens = { + # Should parse starting at beginning of line; each line is a task + 'root': [ + # Complete task entry points: two total: + # 1. Complete task with two dates + (complete_two_date_regex, bygroups(CompleteTaskText, Date, + CompleteTaskText, Date), + 'complete'), + # 2. Complete task with one date + (complete_one_date_regex, bygroups(CompleteTaskText, Date), + 'complete'), + + # Incomplete task entry points: six total: + # 1. Priority plus date + (priority_date_regex, bygroups(Priority, IncompleteTaskText, Date), + 'incomplete'), + # 2. Priority only + (priority_regex, Priority, 'incomplete'), + # 3. Leading date + (date_regex, Date, 'incomplete'), + # 4. Leading context + (context_regex, Context, 'incomplete'), + # 5. Leading project + (project_regex, Project, 'incomplete'), + # 6. Non-whitespace catch-all + ('\S+', IncompleteTaskText, 'incomplete'), + ], + + # Parse a complete task + 'complete': [ + # Newline indicates end of task, should return to root + (r'\s*\n', CompleteTaskText, '#pop'), + # Tokenize contexts and projects + (context_regex, Context), + (project_regex, Project), + # Tokenize non-whitespace text + ('\S+', CompleteTaskText), + # Tokenize whitespace not containing a newline + ('\s+', CompleteTaskText), + ], + + # Parse an incomplete task + 'incomplete': [ + # Newline indicates end of task, should return to root + (r'\s*\n', IncompleteTaskText, '#pop'), + # Tokenize contexts and projects + (context_regex, Context), + (project_regex, Project), + # Tokenize non-whitespace text + ('\S+', IncompleteTaskText), + # Tokenize whitespace not containing a newline + ('\s+', IncompleteTaskText), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/theorem.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/theorem.py new file mode 100644 index 0000000..585c6df --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/theorem.py @@ -0,0 +1,466 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.theorem + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for theorem-proving languages. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic + +__all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer'] + + +class CoqLexer(RegexLexer): + """ + For the `Coq `_ theorem prover. + + .. versionadded:: 1.5 + """ + + name = 'Coq' + aliases = ['coq'] + filenames = ['*.v'] + mimetypes = ['text/x-coq'] + + keywords1 = ( + # Vernacular commands + 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable', + 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis', + 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope', + 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac', + 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit', + 'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex', + 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure', + 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary', + 'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save', + 'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', + 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside', + 'outside', 'Check', + ) + keywords2 = ( + # Gallina + 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct', + 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else', + 'for', 'of', 'nosimpl', 'with', 'as', + ) + keywords3 = ( + # Sorts + 'Type', 'Prop', + ) + keywords4 = ( + # Tactics + 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro', + 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct', + 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite', + 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold', + 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog', + 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial', + 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto', + 'split', 'left', 'right', 'autorewrite', 'tauto', + ) + keywords5 = ( + # Terminators + 'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega', + 'assumption', 'solve', 'contradiction', 'discriminate', + ) + keywords6 = ( + # Control + 'do', 'last', 'first', 'try', 'idtac', 'repeat', + ) + # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', + # 'downto', 'else', 'end', 'exception', 'external', 'false', + # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', + # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', + # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', + # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', + # 'type', 'val', 'virtual', 'when', 'while', 'with' + keyopts = ( + '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.', + '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-', + '<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', + r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>', + r'/\\', r'\\/', + u'Π', u'λ', + ) + operators = r'[!$%&*+\./:<=>?@^|~-]' + word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or') + prefix_syms = r'[!?~]' + infix_syms = r'[=<>@^|&+\*/$%-]' + primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', + 'array') + + tokens = { + 'root': [ + (r'\s+', Text), + (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), + (r'\(\*', Comment, 'comment'), + (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type), + (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), + (words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), + (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), + (r'\b([A-Z][\w\']*)', Name.Class), + (r'(%s)' % '|'.join(keyopts[::-1]), Operator), + (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), + (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), + (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), + + (r"[^\W\d][\w']*", Name), + + (r'\d[\d_]*', Number.Integer), + (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), + (r'0[oO][0-7][0-7_]*', Number.Oct), + (r'0[bB][01][01_]*', Number.Bin), + (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), + + (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", + String.Char), + (r"'.'", String.Char), + (r"'", Keyword), # a stray quote is another syntax element + + (r'"', String.Double, 'string'), + + (r'[~?][a-z][\w\']*:', Name.Variable), + ], + 'comment': [ + (r'[^(*)]+', Comment), + (r'\(\*', Comment, '#push'), + (r'\*\)', Comment, '#pop'), + (r'[(*)]', Comment), + ], + 'string': [ + (r'[^"]+', String.Double), + (r'""', String.Double), + (r'"', String.Double, '#pop'), + ], + 'dotted': [ + (r'\s+', Text), + (r'\.', Punctuation), + (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), + (r'[A-Z][\w\']*', Name.Class, '#pop'), + (r'[a-z][a-z0-9_\']*', Name, '#pop'), + default('#pop') + ], + } + + def analyse_text(text): + if text.startswith('(*'): + return True + + +class IsabelleLexer(RegexLexer): + """ + For the `Isabelle `_ proof assistant. + + .. versionadded:: 2.0 + """ + + name = 'Isabelle' + aliases = ['isabelle'] + filenames = ['*.thy'] + mimetypes = ['text/x-isabelle'] + + keyword_minor = ( + 'and', 'assumes', 'attach', 'avoids', 'binder', 'checking', + 'class_instance', 'class_relation', 'code_module', 'congs', + 'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes', + 'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in', + 'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy', + 'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes', + 'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive', + 'pervasive', 'rep_compat', 'shows', 'structure', 'type_class', + 'type_constructor', 'unchecked', 'unsafe', 'where', + ) + + keyword_diag = ( + 'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms', + 'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms', + 'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf', + 'print_abbrevs', 'print_antiquotations', 'print_attributes', + 'print_binds', 'print_bnfs', 'print_bundles', + 'print_case_translations', 'print_cases', 'print_claset', + 'print_classes', 'print_codeproc', 'print_codesetup', + 'print_coercions', 'print_commands', 'print_context', + 'print_defn_rules', 'print_dependencies', 'print_facts', + 'print_induct_rules', 'print_inductives', 'print_interps', + 'print_locale', 'print_locales', 'print_methods', 'print_options', + 'print_orders', 'print_quot_maps', 'print_quotconsts', + 'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3', + 'print_rules', 'print_simpset', 'print_state', 'print_statement', + 'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules', + 'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status', + 'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps', + 'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome', + 'print_ML_antiquotations', 'print_term_bindings', 'values_prolog', + ) + + keyword_thy = ('theory', 'begin', 'end') + + keyword_section = ('header', 'chapter') + + keyword_subsection = ( + 'section', 'subsection', 'subsubsection', 'sect', 'subsect', + 'subsubsect', + ) + + keyword_theory_decl = ( + 'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities', + 'atom_decl', 'attribute_setup', 'axiomatization', 'bundle', + 'case_of_simps', 'class', 'classes', 'classrel', 'codatatype', + 'code_abort', 'code_class', 'code_const', 'code_datatype', + 'code_identifier', 'code_include', 'code_instance', 'code_modulename', + 'code_monad', 'code_printing', 'code_reflect', 'code_reserved', + 'code_type', 'coinductive', 'coinductive_set', 'consts', 'context', + 'datatype', 'datatype_new', 'datatype_new_compat', 'declaration', + 'declare', 'default_sort', 'defer_recdef', 'definition', 'defs', + 'domain', 'domain_isomorphism', 'domaindef', 'equivariance', + 'export_code', 'extract', 'extract_type', 'fixrec', 'fun', + 'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type', + 'import_const_map', 'import_file', 'import_tptp', 'import_type_map', + 'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas', + 'lifting_forget', 'lifting_update', 'local_setup', 'locale', + 'method_setup', 'nitpick_params', 'no_adhoc_overloading', + 'no_notation', 'no_syntax', 'no_translations', 'no_type_notation', + 'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle', + 'overloading', 'parse_ast_translation', 'parse_translation', + 'partial_function', 'primcorec', 'primrec', 'primrec_new', + 'print_ast_translation', 'print_translation', 'quickcheck_generator', + 'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record', + 'refute_params', 'setup', 'setup_lifting', 'simproc_setup', + 'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open', + 'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions', + 'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text', + 'text_raw', 'theorems', 'translations', 'type_notation', + 'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate', + 'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types', + 'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate', + 'bnf_axiomatization', 'cartouche', 'datatype_compat', + 'free_constructors', 'functor', 'nominal_function', + 'nominal_termination', 'permanent_interpretation', + 'binds', 'defining', 'smt2_status', 'term_cartouche', + 'boogie_file', 'text_cartouche', + ) + + keyword_theory_script = ('inductive_cases', 'inductive_simps') + + keyword_theory_goal = ( + 'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef', + 'crunch', 'crunch_ignore', + 'enriched_type', 'function', 'instance', 'interpretation', 'lemma', + 'lift_definition', 'nominal_inductive', 'nominal_inductive2', + 'nominal_primrec', 'pcpodef', 'primcorecursive', + 'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype', + 'schematic_corollary', 'schematic_lemma', 'schematic_theorem', + 'spark_vc', 'specification', 'subclass', 'sublocale', 'termination', + 'theorem', 'typedef', 'wrap_free_constructors', + ) + + keyword_qed = ('by', 'done', 'qed') + keyword_abandon_proof = ('sorry', 'oops') + + keyword_proof_goal = ('have', 'hence', 'interpret') + + keyword_proof_block = ('next', 'proof') + + keyword_proof_chain = ( + 'finally', 'from', 'then', 'ultimately', 'with', + ) + + keyword_proof_decl = ( + 'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note', + 'txt', 'txt_raw', 'unfolding', 'using', 'write', + ) + + keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume') + + keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus') + + keyword_proof_script = ( + 'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer', + ) + + operators = ( + '::', ':', '(', ')', '[', ']', '_', '=', ',', '|', + '+', '-', '!', '?', + ) + + proof_operators = ('{', '}', '.', '..') + + tokens = { + 'root': [ + (r'\s+', Text), + (r'\(\*', Comment, 'comment'), + (r'\{\*', Comment, 'text'), + + (words(operators), Operator), + (words(proof_operators), Operator.Word), + + (words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), + + (words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type), + + (words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword), + + (words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading), + (words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading), + + (words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + (words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + + (words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error), + + (words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword), + + (words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword), + + (words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), + + (r'\\<\w*>', Text.Symbol), + + (r"[^\W\d][.\w']*", Name), + (r"\?[^\W\d][.\w']*", Name), + (r"'[^\W\d][.\w']*", Name.Type), + + (r'\d[\d_]*', Name), # display numbers as name + (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), + (r'0[oO][0-7][0-7_]*', Number.Oct), + (r'0[bB][01][01_]*', Number.Bin), + + (r'"', String, 'string'), + (r'`', String.Other, 'fact'), + ], + 'comment': [ + (r'[^(*)]+', Comment), + (r'\(\*', Comment, '#push'), + (r'\*\)', Comment, '#pop'), + (r'[(*)]', Comment), + ], + 'text': [ + (r'[^*}]+', Comment), + (r'\*\}', Comment, '#pop'), + (r'\*', Comment), + (r'\}', Comment), + ], + 'string': [ + (r'[^"\\]+', String), + (r'\\<\w*>', String.Symbol), + (r'\\"', String), + (r'\\', String), + (r'"', String, '#pop'), + ], + 'fact': [ + (r'[^`\\]+', String.Other), + (r'\\<\w*>', String.Symbol), + (r'\\`', String.Other), + (r'\\', String.Other), + (r'`', String.Other, '#pop'), + ], + } + + +class LeanLexer(RegexLexer): + """ + For the `Lean `_ + theorem prover. + + .. versionadded:: 2.0 + """ + name = 'Lean' + aliases = ['lean'] + filenames = ['*.lean'] + mimetypes = ['text/x-lean'] + + flags = re.MULTILINE | re.UNICODE + + keywords1 = ('import', 'abbreviation', 'opaque_hint', 'tactic_hint', 'definition', 'renaming', + 'inline', 'hiding', 'exposing', 'parameter', 'parameters', 'conjecture', + 'hypothesis', 'lemma', 'corollary', 'variable', 'variables', 'print', 'theorem', + 'axiom', 'inductive', 'structure', 'universe', 'alias', 'help', + 'options', 'precedence', 'postfix', 'prefix', 'calc_trans', 'calc_subst', 'calc_refl', + 'infix', 'infixl', 'infixr', 'notation', 'eval', 'check', 'exit', 'coercion', 'end', + 'private', 'using', 'namespace', 'including', 'instance', 'section', 'context', + 'protected', 'expose', 'export', 'set_option', 'add_rewrite', 'extends') + + keywords2 = ( + 'forall', 'exists', 'fun', 'Pi', 'obtain', 'from', 'have', 'show', 'assume', 'take', + 'let', 'if', 'else', 'then', 'by', 'in', 'with', 'begin', 'proof', 'qed', 'calc' + ) + + keywords3 = ( + # Sorts + 'Type', 'Prop', + ) + + keywords4 = ( + # Tactics + 'apply', 'and_then', 'or_else', 'append', 'interleave', 'par', 'fixpoint', 'repeat', + 'at_most', 'discard', 'focus_at', 'rotate', 'try_for', 'now', 'assumption', 'eassumption', + 'state', 'intro', 'generalize', 'exact', 'unfold', 'beta', 'trace', 'focus', 'repeat1', + 'determ', 'destruct', 'try', 'auto', 'intros' + ) + + operators = ( + '!=', '#', '&', '&&', '*', '+', '-', '/', '@', + '-.', '->', '.', '..', '...', '::', ':>', ';', ';;', '<', + '<-', '=', '==', '>', '_', '`', '|', '||', '~', '=>', '<=', '>=', + '/\\', '\\/', u'∀', u'Π', u'λ', u'↔', u'∧', u'∨', u'≠', u'≤', u'≥', + u'¬', u'⁻¹', u'⬝', u'▸', u'→', u'∃', u'ℕ', u'ℤ', u'≈' + ) + + word_operators = ('and', 'or', 'not', 'iff', 'eq') + + punctuation = ('(', ')', ':', '{', '}', '[', ']', u'⦃', u'⦄', ':=', ',') + + primitives = ('unit', 'int', 'bool', 'string', 'char', 'list', + 'array', 'prod', 'sum', 'pair', 'real', 'nat', 'num', 'path') + + tokens = { + 'root': [ + (r'\s+', Text), + (r'\b(false|true)\b|\(\)|\[\]', Name.Builtin.Pseudo), + (r'/-', Comment, 'comment'), + (r'--.*?$', Comment.Single), + (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), + (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword), + (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type), + (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword), + (words(operators), Name.Builtin.Pseudo), + (words(word_operators, prefix=r'\b', suffix=r'\b'), Name.Builtin.Pseudo), + (words(punctuation), Operator), + (words(primitives, prefix=r'\b', suffix=r'\b'), Keyword.Type), + (u"[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]" + u"[A-Za-z_'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079" + u"\u207f-\u2089\u2090-\u209c\u2100-\u214f]*", Name), + (r'\d+', Number.Integer), + (r'"', String.Double, 'string'), + (r'[~?][a-z][\w\']*:', Name.Variable) + ], + 'comment': [ + # Multiline Comments + (r'[^/-]', Comment.Multiline), + (r'/-', Comment.Multiline, '#push'), + (r'-/', Comment.Multiline, '#pop'), + (r'[/-]', Comment.Multiline) + ], + 'string': [ + (r'[^\\"]+', String.Double), + (r'\\[n"\\]', String.Escape), + ('"', String.Double, '#pop'), + ], + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/urbi.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/urbi.py new file mode 100644 index 0000000..b8ac151 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/urbi.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.urbi + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for UrbiScript language. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import ExtendedRegexLexer, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['UrbiscriptLexer'] + + +class UrbiscriptLexer(ExtendedRegexLexer): + """ + For UrbiScript source code. + + .. versionadded:: 1.5 + """ + + name = 'UrbiScript' + aliases = ['urbiscript'] + filenames = ['*.u'] + mimetypes = ['application/x-urbiscript'] + + flags = re.DOTALL + + # TODO + # - handle Experimental and deprecated tags with specific tokens + # - handle Angles and Durations with specific tokens + + def blob_callback(lexer, match, ctx): + text_before_blob = match.group(1) + blob_start = match.group(2) + blob_size_str = match.group(3) + blob_size = int(blob_size_str) + yield match.start(), String, text_before_blob + ctx.pos += len(text_before_blob) + + # if blob size doesn't match blob format (example : "\B(2)(aaa)") + # yield blob as a string + if ctx.text[match.end() + blob_size] != ")": + result = "\\B(" + blob_size_str + ")(" + yield match.start(), String, result + ctx.pos += len(result) + return + + # if blob is well formated, yield as Escape + blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")" + yield match.start(), String.Escape, blob_text + ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")" + + tokens = { + 'root': [ + (r'\s+', Text), + # comments + (r'//.*?\n', Comment), + (r'/\*', Comment.Multiline, 'comment'), + (r'(every|for|loop|while)(?:;|&|\||,)', Keyword), + (words(( + 'assert', 'at', 'break', 'case', 'catch', 'closure', 'compl', + 'continue', 'default', 'else', 'enum', 'every', 'external', + 'finally', 'for', 'freezeif', 'if', 'new', 'onleave', 'return', + 'stopif', 'switch', 'this', 'throw', 'timeout', 'try', + 'waituntil', 'whenever', 'while'), suffix=r'\b'), + Keyword), + (words(( + 'asm', 'auto', 'bool', 'char', 'const_cast', 'delete', 'double', + 'dynamic_cast', 'explicit', 'export', 'extern', 'float', 'friend', + 'goto', 'inline', 'int', 'long', 'mutable', 'namespace', 'register', + 'reinterpret_cast', 'short', 'signed', 'sizeof', 'static_cast', + 'struct', 'template', 'typedef', 'typeid', 'typename', 'union', + 'unsigned', 'using', 'virtual', 'volatile', 'wchar_t'), suffix=r'\b'), + Keyword.Reserved), + # deprecated keywords, use a meaningfull token when available + (r'(emit|foreach|internal|loopn|static)\b', Keyword), + # ignored keywords, use a meaningfull token when available + (r'(private|protected|public)\b', Keyword), + (r'(var|do|const|function|class)\b', Keyword.Declaration), + (r'(true|false|nil|void)\b', Keyword.Constant), + (words(( + 'Barrier', 'Binary', 'Boolean', 'CallMessage', 'Channel', 'Code', + 'Comparable', 'Container', 'Control', 'Date', 'Dictionary', 'Directory', + 'Duration', 'Enumeration', 'Event', 'Exception', 'Executable', 'File', + 'Finalizable', 'Float', 'FormatInfo', 'Formatter', 'Global', 'Group', + 'Hash', 'InputStream', 'IoService', 'Job', 'Kernel', 'Lazy', 'List', + 'Loadable', 'Lobby', 'Location', 'Logger', 'Math', 'Mutex', 'nil', + 'Object', 'Orderable', 'OutputStream', 'Pair', 'Path', 'Pattern', + 'Position', 'Primitive', 'Process', 'Profile', 'PseudoLazy', 'PubSub', + 'RangeIterable', 'Regexp', 'Semaphore', 'Server', 'Singleton', 'Socket', + 'StackFrame', 'Stream', 'String', 'System', 'Tag', 'Timeout', + 'Traceable', 'TrajectoryGenerator', 'Triplet', 'Tuple', 'UObject', + 'UValue', 'UVar'), suffix=r'\b'), + Name.Builtin), + (r'(?:this)\b', Name.Builtin.Pseudo), + # don't match single | and & + (r'(?:[-=+*%/<>~^:]+|\.&?|\|\||&&)', Operator), + (r'(?:and_eq|and|bitand|bitor|in|not|not_eq|or_eq|or|xor_eq|xor)\b', + Operator.Word), + (r'[{}\[\]()]+', Punctuation), + (r'(?:;|\||,|&|\?|!)+', Punctuation), + (r'[$a-zA-Z_]\w*', Name.Other), + (r'0x[0-9a-fA-F]+', Number.Hex), + # Float, Integer, Angle and Duration + (r'(?:[0-9]+(?:(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?)?' + r'((?:rad|deg|grad)|(?:ms|s|min|h|d))?)\b', Number.Float), + # handle binary blob in strings + (r'"', String.Double, "string.double"), + (r"'", String.Single, "string.single"), + ], + 'string.double': [ + (r'((?:\\\\|\\"|[^"])*?)(\\B\((\d+)\)\()', blob_callback), + (r'(\\\\|\\"|[^"])*?"', String.Double, '#pop'), + ], + 'string.single': [ + (r"((?:\\\\|\\'|[^'])*?)(\\B\((\d+)\)\()", blob_callback), + (r"(\\\\|\\'|[^'])*?'", String.Single, '#pop'), + ], + # from http://pygments.org/docs/lexerdevelopment/#changing-states + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ] + } diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/web.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/web.py new file mode 100644 index 0000000..8db0da5 --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/web.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.web + ~~~~~~~~~~~~~~~~~~~ + + Just export previously exported lexers. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.html import HtmlLexer, DtdLexer, XmlLexer, XsltLexer, \ + HamlLexer, ScamlLexer, JadeLexer +from pygments.lexers.css import CssLexer, SassLexer, ScssLexer +from pygments.lexers.javascript import JavascriptLexer, LiveScriptLexer, \ + DartLexer, TypeScriptLexer, LassoLexer, ObjectiveJLexer, CoffeeScriptLexer +from pygments.lexers.actionscript import ActionScriptLexer, \ + ActionScript3Lexer, MxmlLexer +from pygments.lexers.php import PhpLexer +from pygments.lexers.webmisc import DuelLexer, XQueryLexer, SlimLexer, QmlLexer +from pygments.lexers.data import JsonLexer +JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5 + +__all__ = [] diff --git a/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/webmisc.py b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/webmisc.py new file mode 100644 index 0000000..331d78d --- /dev/null +++ b/plugin/packages/wakatime/wakatime/packages/pygments_py3/pygments/lexers/webmisc.py @@ -0,0 +1,920 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.webmisc + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for misc. web stuff. + + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ + default, using +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal +from pygments.util import unirange + +from pygments.lexers.css import _indentation, _starts_block +from pygments.lexers.html import HtmlLexer +from pygments.lexers.javascript import JavascriptLexer +from pygments.lexers.ruby import RubyLexer + +__all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer'] + + +class DuelLexer(RegexLexer): + """ + Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks. + See http://duelengine.org/. + See http://jsonml.org/jbst/. + + .. versionadded:: 1.4 + """ + + name = 'Duel' + aliases = ['duel', 'jbst', 'jsonml+bst'] + filenames = ['*.duel', '*.jbst'] + mimetypes = ['text/x-duel', 'text/x-jbst'] + + flags = re.DOTALL + + tokens = { + 'root': [ + (r'(<%[@=#!:]?)(.*?)(%>)', + bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)), + (r'(<%\$)(.*?)(:)(.*?)(%>)', + bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)), + (r'(<%--)(.*?)(--%>)', + bygroups(Name.Tag, Comment.Multiline, Name.Tag)), + (r'()(.*?)()', + bygroups(using(HtmlLexer), + using(JavascriptLexer), using(HtmlLexer))), + (r'(.+?)(?=<)', using(HtmlLexer)), + (r'.+', using(HtmlLexer)), + ], + } + + +class XQueryLexer(ExtendedRegexLexer): + """ + An XQuery lexer, parsing a stream and outputting the tokens needed to + highlight xquery code. + + .. versionadded:: 1.4 + """ + name = 'XQuery' + aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm'] + filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'] + mimetypes = ['text/xquery', 'application/xquery'] + + xquery_parse_state = [] + + # FIX UNICODE LATER + # ncnamestartchar = ( + # ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|" + # ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|" + # ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|" + # ur"[\u10000-\uEFFFF]" + # ) + ncnamestartchar = r"(?:[A-Z]|_|[a-z])" + # FIX UNICODE LATER + # ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|" + # ur"[\u203F-\u2040]") + ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])" + ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar) + pitarget_namestartchar = r"(?:[A-KN-WYZ]|_|:|[a-kn-wyz])" + pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])" + pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar) + prefixedname = "%s:%s" % (ncname, ncname) + unprefixedname = ncname + qname = "(?:%s|%s)" % (prefixedname, unprefixedname) + + entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)' + charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)' + + stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")' + stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')" + + # FIX UNICODE LATER + # elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' + # ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') + elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_\'`|~]' + # quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|' + # ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]') + quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%()*+,\-./:;=?@\[\\\]^_\'`|~]' + # aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' + # ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') + aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_`|~]' + + # CHAR elements - fix the above elementcontentchar, quotattrcontentchar, + # aposattrcontentchar + # x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + + flags = re.DOTALL | re.MULTILINE | re.UNICODE + + def punctuation_root_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + # transition to root always - don't pop off stack + ctx.stack = ['root'] + ctx.pos = match.end() + + def operator_root_callback(lexer, match, ctx): + yield match.start(), Operator, match.group(1) + # transition to root always - don't pop off stack + ctx.stack = ['root'] + ctx.pos = match.end() + + def popstate_tag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + ctx.stack.append(lexer.xquery_parse_state.pop()) + ctx.pos = match.end() + + def popstate_xmlcomment_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append(lexer.xquery_parse_state.pop()) + ctx.pos = match.end() + + def popstate_kindtest_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + next_state = lexer.xquery_parse_state.pop() + if next_state == 'occurrenceindicator': + if re.match("[?*+]+", match.group(2)): + yield match.start(), Punctuation, match.group(2) + ctx.stack.append('operator') + ctx.pos = match.end() + else: + ctx.stack.append('operator') + ctx.pos = match.end(1) + else: + ctx.stack.append(next_state) + ctx.pos = match.end(1) + + def popstate_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + # if we have run out of our state stack, pop whatever is on the pygments + # state stack + if len(lexer.xquery_parse_state) == 0: + ctx.stack.pop() + elif len(ctx.stack) > 1: + ctx.stack.append(lexer.xquery_parse_state.pop()) + else: + # i don't know if i'll need this, but in case, default back to root + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_element_content_starttag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + lexer.xquery_parse_state.append('element_content') + ctx.stack.append('start_tag') + ctx.pos = match.end() + + def pushstate_cdata_section_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('cdata_section') + lexer.xquery_parse_state.append(ctx.state.pop) + ctx.pos = match.end() + + def pushstate_starttag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + lexer.xquery_parse_state.append(ctx.state.pop) + ctx.stack.append('start_tag') + ctx.pos = match.end() + + def pushstate_operator_order_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + ctx.stack = ['root'] + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_operator_root_validate(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + ctx.stack = ['root'] + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_operator_root_validate_withmode(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Keyword, match.group(3) + ctx.stack = ['root'] + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_operator_processing_instruction_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('processing_instruction') + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_element_content_processing_instruction_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('processing_instruction') + lexer.xquery_parse_state.append('element_content') + ctx.pos = match.end() + + def pushstate_element_content_cdata_section_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('cdata_section') + lexer.xquery_parse_state.append('element_content') + ctx.pos = match.end() + + def pushstate_operator_cdata_section_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('cdata_section') + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_element_content_xmlcomment_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('xml_comment') + lexer.xquery_parse_state.append('element_content') + ctx.pos = match.end() + + def pushstate_operator_xmlcomment_callback(lexer, match, ctx): + yield match.start(), String.Doc, match.group(1) + ctx.stack.append('xml_comment') + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + def pushstate_kindtest_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('kindtest') + ctx.stack.append('kindtest') + ctx.pos = match.end() + + def pushstate_operator_kindtestforpi_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.stack.append('kindtestforpi') + ctx.pos = match.end() + + def pushstate_operator_kindtest_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.stack.append('kindtest') + ctx.pos = match.end() + + def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('occurrenceindicator') + ctx.stack.append('kindtest') + ctx.pos = match.end() + + def pushstate_operator_starttag_callback(lexer, match, ctx): + yield match.start(), Name.Tag, match.group(1) + lexer.xquery_parse_state.append('operator') + ctx.stack.append('start_tag') + ctx.pos = match.end() + + def pushstate_operator_root_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + lexer.xquery_parse_state.append('operator') + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_operator_root_construct_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_root_callback(lexer, match, ctx): + yield match.start(), Punctuation, match.group(1) + cur_state = ctx.stack.pop() + lexer.xquery_parse_state.append(cur_state) + ctx.stack = ['root'] + ctx.pos = match.end() + + def pushstate_operator_attribute_callback(lexer, match, ctx): + yield match.start(), Name.Attribute, match.group(1) + ctx.stack.append('operator') + ctx.pos = match.end() + + def pushstate_operator_callback(lexer, match, ctx): + yield match.start(), Keyword, match.group(1) + yield match.start(), Text, match.group(2) + yield match.start(), Punctuation, match.group(3) + lexer.xquery_parse_state.append('operator') + ctx.pos = match.end() + + tokens = { + 'comment': [ + # xquery comments + (r'(:\))', Comment, '#pop'), + (r'(\(:)', Comment, '#push'), + (r'[^:)]', Comment), + (r'([^:)]|:|\))', Comment), + ], + 'whitespace': [ + (r'\s+', Text), + ], + 'operator': [ + include('whitespace'), + (r'(\})', popstate_callback), + (r'\(:', Comment, 'comment'), + + (r'(\{)', pushstate_root_callback), + (r'then|else|external|at|div|except', Keyword, 'root'), + (r'order by', Keyword, 'root'), + (r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'), + (r'and|or', Operator.Word, 'root'), + (r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)', + Operator.Word, 'root'), + (r'return|satisfies|to|union|where|preserve\s+strip', + Keyword, 'root'), + (r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=)', + operator_root_callback), + (r'(::|;|\[|//|/|,)', + punctuation_root_callback), + (r'(castable|cast)(\s+)(as)\b', + bygroups(Keyword, Text, Keyword), 'singletype'), + (r'(instance)(\s+)(of)\b', + bygroups(Keyword, Text, Keyword), 'itemtype'), + (r'(treat)(\s+)(as)\b', + bygroups(Keyword, Text, Keyword), 'itemtype'), + (r'(case|as)\b', Keyword, 'itemtype'), + (r'(\))(\s*)(as)', + bygroups(Punctuation, Text, Keyword), 'itemtype'), + (r'\$', Name.Variable, 'varname'), + (r'(for|let)(\s+)(\$)', + bygroups(Keyword, Text, Name.Variable), 'varname'), + # (r'\)|\?|\]', Punctuation, '#push'), + (r'\)|\?|\]', Punctuation), + (r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)), + (r'ascending|descending|default', Keyword, '#push'), + (r'external', Keyword), + (r'collation', Keyword, 'uritooperator'), + # finally catch all string literals and stay in operator state + (stringdouble, String.Double), + (stringsingle, String.Single), + + (r'(catch)(\s*)', bygroups(Keyword, Text), 'root'), + ], + 'uritooperator': [ + (stringdouble, String.Double, '#pop'), + (stringsingle, String.Single, '#pop'), + ], + 'namespacedecl': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)), + (r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)), + (stringdouble, String.Double), + (stringsingle, String.Single), + (r',', Punctuation), + (r'=', Operator), + (r';', Punctuation, 'root'), + (ncname, Name.Namespace), + ], + 'namespacekeyword': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (stringdouble, String.Double, 'namespacedecl'), + (stringsingle, String.Single, 'namespacedecl'), + (r'inherit|no-inherit', Keyword, 'root'), + (r'namespace', Keyword, 'namespacedecl'), + (r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)), + (r'preserve|no-preserve', Keyword), + (r',', Punctuation), + ], + 'varname': [ + (r'\(:', Comment, 'comment'), + (qname, Name.Variable, 'operator'), + ], + 'singletype': [ + (r'\(:', Comment, 'comment'), + (ncname + r'(:\*)', Name.Variable, 'operator'), + (qname, Name.Variable, 'operator'), + ], + 'itemtype': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (r'\$', Punctuation, 'varname'), + (r'(void)(\s*)(\()(\s*)(\))', + bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'), + (r'(element|attribute|schema-element|schema-attribute|comment|text|' + r'node|binary|document-node|empty-sequence)(\s*)(\()', + pushstate_occurrenceindicator_kindtest_callback), + # Marklogic specific type? + (r'(processing-instruction)(\s*)(\()', + bygroups(Keyword, Text, Punctuation), + ('occurrenceindicator', 'kindtestforpi')), + (r'(item)(\s*)(\()(\s*)(\))(?=[*+?])', + bygroups(Keyword, Text, Punctuation, Text, Punctuation), + 'occurrenceindicator'), + (r'\(\#', Punctuation, 'pragma'), + (r';', Punctuation, '#pop'), + (r'then|else', Keyword, '#pop'), + (r'(at)(\s+)(' + stringdouble + ')', + bygroups(Keyword, Text, String.Double), 'namespacedecl'), + (r'(at)(\s+)(' + stringsingle + ')', + bygroups(Keyword, Text, String.Single), 'namespacedecl'), + (r'except|intersect|in|is|return|satisfies|to|union|where', + Keyword, 'root'), + (r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'), + (r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'), + (r'external|at', Keyword, 'root'), + (r'(stable)(\s+)(order)(\s+)(by)', + bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'), + (r'(castable|cast)(\s+)(as)', + bygroups(Keyword, Text, Keyword), 'singletype'), + (r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)), + (r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)), + (r'case|as', Keyword, 'itemtype'), + (r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), + (ncname + r':\*', Keyword.Type, 'operator'), + (qname, Keyword.Type, 'occurrenceindicator'), + ], + 'kindtest': [ + (r'\(:', Comment, 'comment'), + (r'\{', Punctuation, 'root'), + (r'(\))([*+?]?)', popstate_kindtest_callback), + (r'\*', Name, 'closekindtest'), + (qname, Name, 'closekindtest'), + (r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback), + ], + 'kindtestforpi': [ + (r'\(:', Comment, 'comment'), + (r'\)', Punctuation, '#pop'), + (ncname, Name.Variable), + (stringdouble, String.Double), + (stringsingle, String.Single), + ], + 'closekindtest': [ + (r'\(:', Comment, 'comment'), + (r'(\))', popstate_callback), + (r',', Punctuation), + (r'(\{)', pushstate_operator_root_callback), + (r'\?', Punctuation), + ], + 'xml_comment': [ + (r'(-->)', popstate_xmlcomment_callback), + (r'[^-]{1,2}', Literal), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + ], + 'processing_instruction': [ + (r'\s+', Text, 'processing_instruction_content'), + (r'\?>', String.Doc, '#pop'), + (pitarget, Name), + ], + 'processing_instruction_content': [ + (r'\?>', String.Doc, '#pop'), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + ], + 'cdata_section': [ + (r']]>', String.Doc, '#pop'), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + ], + 'start_tag': [ + include('whitespace'), + (r'(/>)', popstate_tag_callback), + (r'>', Name.Tag, 'element_content'), + (r'"', Punctuation, 'quot_attribute_content'), + (r"'", Punctuation, 'apos_attribute_content'), + (r'=', Operator), + (qname, Name.Tag), + ], + 'quot_attribute_content': [ + (r'"', Punctuation, 'start_tag'), + (r'(\{)', pushstate_root_callback), + (r'""', Name.Attribute), + (quotattrcontentchar, Name.Attribute), + (entityref, Name.Attribute), + (charref, Name.Attribute), + (r'\{\{|\}\}', Name.Attribute), + ], + 'apos_attribute_content': [ + (r"'", Punctuation, 'start_tag'), + (r'\{', Punctuation, 'root'), + (r"''", Name.Attribute), + (aposattrcontentchar, Name.Attribute), + (entityref, Name.Attribute), + (charref, Name.Attribute), + (r'\{\{|\}\}', Name.Attribute), + ], + 'element_content': [ + (r')', popstate_tag_callback), + (qname, Name.Tag), + ], + 'xmlspace_decl': [ + (r'\(:', Comment, 'comment'), + (r'preserve|strip', Keyword, '#pop'), + ], + 'declareordering': [ + (r'\(:', Comment, 'comment'), + include('whitespace'), + (r'ordered|unordered', Keyword, '#pop'), + ], + 'xqueryversion': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (stringdouble, String.Double), + (stringsingle, String.Single), + (r'encoding', Keyword), + (r';', Punctuation, '#pop'), + ], + 'pragma': [ + (qname, Name.Variable, 'pragmacontents'), + ], + 'pragmacontents': [ + (r'#\)', Punctuation, 'operator'), + (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + + unirange(0x10000, 0x10ffff), Literal), + (r'(\s+)', Text), + ], + 'occurrenceindicator': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + (r'\*|\?|\+', Operator, 'operator'), + (r':=', Operator, 'root'), + default('operator'), + ], + 'option': [ + include('whitespace'), + (qname, Name.Variable, '#pop'), + ], + 'qname_braren': [ + include('whitespace'), + (r'(\{)', pushstate_operator_root_callback), + (r'(\()', Punctuation, 'root'), + ], + 'element_qname': [ + (qname, Name.Variable, 'root'), + ], + 'attribute_qname': [ + (qname, Name.Variable, 'root'), + ], + 'root': [ + include('whitespace'), + (r'\(:', Comment, 'comment'), + + # handle operator state + # order on numbers matters - handle most complex first + (r'\d+(\.\d*)?[eE][+-]?\d+', Number.Float, 'operator'), + (r'(\.\d+)[eE][+-]?\d+', Number.Float, 'operator'), + (r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'), + (r'(\d+)', Number.Integer, 'operator'), + (r'(\.\.|\.|\))', Punctuation, 'operator'), + (r'(declare)(\s+)(construction)', + bygroups(Keyword, Text, Keyword), 'operator'), + (r'(declare)(\s+)(default)(\s+)(order)', + bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'), + (ncname + ':\*', Name, 'operator'), + ('\*:'+ncname, Name.Tag, 'operator'), + ('\*', Name.Tag, 'operator'), + (stringdouble, String.Double, 'operator'), + (stringsingle, String.Single, 'operator'), + + (r'(\})', popstate_callback), + + # NAMESPACE DECL + (r'(declare)(\s+)(default)(\s+)(collation)', + bygroups(Keyword, Text, Keyword, Text, Keyword)), + (r'(module|declare)(\s+)(namespace)', + bygroups(Keyword, Text, Keyword), 'namespacedecl'), + (r'(declare)(\s+)(base-uri)', + bygroups(Keyword, Text, Keyword), 'namespacedecl'), + + # NAMESPACE KEYWORD + (r'(declare)(\s+)(default)(\s+)(element|function)', + bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'), + (r'(import)(\s+)(schema|module)', + bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'), + (r'(declare)(\s+)(copy-namespaces)', + bygroups(Keyword, Text, Keyword), 'namespacekeyword'), + + # VARNAMEs + (r'(for|let|some|every)(\s+)(\$)', + bygroups(Keyword, Text, Name.Variable), 'varname'), + (r'\$', Name.Variable, 'varname'), + (r'(declare)(\s+)(variable)(\s+)(\$)', + bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'), + + # ITEMTYPE + (r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), + + (r'(element|attribute|schema-element|schema-attribute|comment|' + r'text|node|document-node|empty-sequence)(\s+)(\()', + pushstate_operator_kindtest_callback), + + (r'(processing-instruction)(\s+)(\()', + pushstate_operator_kindtestforpi_callback), + + (r'(