upgraded wakatime package
This commit is contained in:
parent
c3e08623c1
commit
b836f26226
155 changed files with 48554 additions and 2 deletions
91
packages/wakatime/wakatime/packages/pygments2/__init__.py
Normal file
91
packages/wakatime/wakatime/packages/pygments2/__init__.py
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Pygments
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
Pygments is a syntax highlighting package written in Python.
|
||||||
|
|
||||||
|
It is a generic syntax highlighter for general use in all kinds of software
|
||||||
|
such as forum systems, wikis or other applications that need to prettify
|
||||||
|
source code. Highlights are:
|
||||||
|
|
||||||
|
* a wide range of common languages and markup formats is supported
|
||||||
|
* special attention is paid to details, increasing quality by a fair amount
|
||||||
|
* support for new languages and formats are added easily
|
||||||
|
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
|
||||||
|
formats that PIL supports, and ANSI sequences
|
||||||
|
* it is usable as a command-line tool and as a library
|
||||||
|
* ... and it highlights even Brainfuck!
|
||||||
|
|
||||||
|
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
|
||||||
|
|
||||||
|
.. _Pygments tip:
|
||||||
|
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = '1.6'
|
||||||
|
__docformat__ = 'restructuredtext'
|
||||||
|
|
||||||
|
__all__ = ['lex', 'format', 'highlight']
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from pygments.util import StringIO, BytesIO
|
||||||
|
|
||||||
|
|
||||||
|
def lex(code, lexer):
|
||||||
|
"""
|
||||||
|
Lex ``code`` with ``lexer`` and return an iterable of tokens.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return lexer.get_tokens(code)
|
||||||
|
except TypeError, err:
|
||||||
|
if isinstance(err.args[0], str) and \
|
||||||
|
'unbound method get_tokens' in err.args[0]:
|
||||||
|
raise TypeError('lex() argument must be a lexer instance, '
|
||||||
|
'not a class')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def format(tokens, formatter, outfile=None):
|
||||||
|
"""
|
||||||
|
Format a tokenlist ``tokens`` with the formatter ``formatter``.
|
||||||
|
|
||||||
|
If ``outfile`` is given and a valid file object (an object
|
||||||
|
with a ``write`` method), the result will be written to it, otherwise
|
||||||
|
it is returned as a string.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if not outfile:
|
||||||
|
#print formatter, 'using', formatter.encoding
|
||||||
|
realoutfile = formatter.encoding and BytesIO() or StringIO()
|
||||||
|
formatter.format(tokens, realoutfile)
|
||||||
|
return realoutfile.getvalue()
|
||||||
|
else:
|
||||||
|
formatter.format(tokens, outfile)
|
||||||
|
except TypeError, err:
|
||||||
|
if isinstance(err.args[0], str) and \
|
||||||
|
'unbound method format' in err.args[0]:
|
||||||
|
raise TypeError('format() argument must be a formatter instance, '
|
||||||
|
'not a class')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def highlight(code, lexer, formatter, outfile=None):
|
||||||
|
"""
|
||||||
|
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
|
||||||
|
|
||||||
|
If ``outfile`` is given and a valid file object (an object
|
||||||
|
with a ``write`` method), the result will be written to it, otherwise
|
||||||
|
it is returned as a string.
|
||||||
|
"""
|
||||||
|
return format(lex(code, lexer), formatter, outfile)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
from pygments.cmdline import main
|
||||||
|
sys.exit(main(sys.argv))
|
441
packages/wakatime/wakatime/packages/pygments2/cmdline.py
Normal file
441
packages/wakatime/wakatime/packages/pygments2/cmdline.py
Normal file
|
@ -0,0 +1,441 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.cmdline
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Command line interface.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
import getopt
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
|
from pygments import __version__, highlight
|
||||||
|
from pygments.util import ClassNotFound, OptionError, docstring_headline
|
||||||
|
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
|
||||||
|
find_lexer_class, guess_lexer, TextLexer
|
||||||
|
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
|
||||||
|
get_formatter_for_filename, find_formatter_class, \
|
||||||
|
TerminalFormatter # pylint:disable-msg=E0611
|
||||||
|
from pygments.filters import get_all_filters, find_filter_class
|
||||||
|
from pygments.styles import get_all_styles, get_style_by_name
|
||||||
|
|
||||||
|
|
||||||
|
USAGE = """\
|
||||||
|
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
|
||||||
|
[-O <options>] [-P <option=value>] [-o <outfile>] [<infile>]
|
||||||
|
|
||||||
|
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
|
||||||
|
%s -L [<which> ...]
|
||||||
|
%s -N <filename>
|
||||||
|
%s -H <type> <name>
|
||||||
|
%s -h | -V
|
||||||
|
|
||||||
|
Highlight the input file and write the result to <outfile>.
|
||||||
|
|
||||||
|
If no input file is given, use stdin, if -o is not given, use stdout.
|
||||||
|
|
||||||
|
<lexer> is a lexer name (query all lexer names with -L). If -l is not
|
||||||
|
given, the lexer is guessed from the extension of the input file name
|
||||||
|
(this obviously doesn't work if the input is stdin). If -g is passed,
|
||||||
|
attempt to guess the lexer from the file contents, or pass through as
|
||||||
|
plain text if this fails (this can work for stdin).
|
||||||
|
|
||||||
|
Likewise, <formatter> is a formatter name, and will be guessed from
|
||||||
|
the extension of the output file name. If no output file is given,
|
||||||
|
the terminal formatter will be used by default.
|
||||||
|
|
||||||
|
With the -O option, you can give the lexer and formatter a comma-
|
||||||
|
separated list of options, e.g. ``-O bg=light,python=cool``.
|
||||||
|
|
||||||
|
The -P option adds lexer and formatter options like the -O option, but
|
||||||
|
you can only give one option per -P. That way, the option value may
|
||||||
|
contain commas and equals signs, which it can't with -O, e.g.
|
||||||
|
``-P "heading=Pygments, the Python highlighter".
|
||||||
|
|
||||||
|
With the -F option, you can add filters to the token stream, you can
|
||||||
|
give options in the same way as for -O after a colon (note: there must
|
||||||
|
not be spaces around the colon).
|
||||||
|
|
||||||
|
The -O, -P and -F options can be given multiple times.
|
||||||
|
|
||||||
|
With the -S option, print out style definitions for style <style>
|
||||||
|
for formatter <formatter>. The argument given by -a is formatter
|
||||||
|
dependent.
|
||||||
|
|
||||||
|
The -L option lists lexers, formatters, styles or filters -- set
|
||||||
|
`which` to the thing you want to list (e.g. "styles"), or omit it to
|
||||||
|
list everything.
|
||||||
|
|
||||||
|
The -N option guesses and prints out a lexer name based solely on
|
||||||
|
the given filename. It does not take input or highlight anything.
|
||||||
|
If no specific lexer can be determined "text" is returned.
|
||||||
|
|
||||||
|
The -H option prints detailed help for the object <name> of type <type>,
|
||||||
|
where <type> is one of "lexer", "formatter" or "filter".
|
||||||
|
|
||||||
|
The -h option prints this help.
|
||||||
|
The -V option prints the package version.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_options(o_strs):
|
||||||
|
opts = {}
|
||||||
|
if not o_strs:
|
||||||
|
return opts
|
||||||
|
for o_str in o_strs:
|
||||||
|
if not o_str:
|
||||||
|
continue
|
||||||
|
o_args = o_str.split(',')
|
||||||
|
for o_arg in o_args:
|
||||||
|
o_arg = o_arg.strip()
|
||||||
|
try:
|
||||||
|
o_key, o_val = o_arg.split('=')
|
||||||
|
o_key = o_key.strip()
|
||||||
|
o_val = o_val.strip()
|
||||||
|
except ValueError:
|
||||||
|
opts[o_arg] = True
|
||||||
|
else:
|
||||||
|
opts[o_key] = o_val
|
||||||
|
return opts
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_filters(f_strs):
|
||||||
|
filters = []
|
||||||
|
if not f_strs:
|
||||||
|
return filters
|
||||||
|
for f_str in f_strs:
|
||||||
|
if ':' in f_str:
|
||||||
|
fname, fopts = f_str.split(':', 1)
|
||||||
|
filters.append((fname, _parse_options([fopts])))
|
||||||
|
else:
|
||||||
|
filters.append((f_str, {}))
|
||||||
|
return filters
|
||||||
|
|
||||||
|
|
||||||
|
def _print_help(what, name):
|
||||||
|
try:
|
||||||
|
if what == 'lexer':
|
||||||
|
cls = find_lexer_class(name)
|
||||||
|
print "Help on the %s lexer:" % cls.name
|
||||||
|
print dedent(cls.__doc__)
|
||||||
|
elif what == 'formatter':
|
||||||
|
cls = find_formatter_class(name)
|
||||||
|
print "Help on the %s formatter:" % cls.name
|
||||||
|
print dedent(cls.__doc__)
|
||||||
|
elif what == 'filter':
|
||||||
|
cls = find_filter_class(name)
|
||||||
|
print "Help on the %s filter:" % name
|
||||||
|
print dedent(cls.__doc__)
|
||||||
|
except AttributeError:
|
||||||
|
print >>sys.stderr, "%s not found!" % what
|
||||||
|
|
||||||
|
|
||||||
|
def _print_list(what):
|
||||||
|
if what == 'lexer':
|
||||||
|
print
|
||||||
|
print "Lexers:"
|
||||||
|
print "~~~~~~~"
|
||||||
|
|
||||||
|
info = []
|
||||||
|
for fullname, names, exts, _ in get_all_lexers():
|
||||||
|
tup = (', '.join(names)+':', fullname,
|
||||||
|
exts and '(filenames ' + ', '.join(exts) + ')' or '')
|
||||||
|
info.append(tup)
|
||||||
|
info.sort()
|
||||||
|
for i in info:
|
||||||
|
print ('* %s\n %s %s') % i
|
||||||
|
|
||||||
|
elif what == 'formatter':
|
||||||
|
print
|
||||||
|
print "Formatters:"
|
||||||
|
print "~~~~~~~~~~~"
|
||||||
|
|
||||||
|
info = []
|
||||||
|
for cls in get_all_formatters():
|
||||||
|
doc = docstring_headline(cls)
|
||||||
|
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
|
||||||
|
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
|
||||||
|
info.append(tup)
|
||||||
|
info.sort()
|
||||||
|
for i in info:
|
||||||
|
print ('* %s\n %s %s') % i
|
||||||
|
|
||||||
|
elif what == 'filter':
|
||||||
|
print
|
||||||
|
print "Filters:"
|
||||||
|
print "~~~~~~~~"
|
||||||
|
|
||||||
|
for name in get_all_filters():
|
||||||
|
cls = find_filter_class(name)
|
||||||
|
print "* " + name + ':'
|
||||||
|
print " %s" % docstring_headline(cls)
|
||||||
|
|
||||||
|
elif what == 'style':
|
||||||
|
print
|
||||||
|
print "Styles:"
|
||||||
|
print "~~~~~~~"
|
||||||
|
|
||||||
|
for name in get_all_styles():
|
||||||
|
cls = get_style_by_name(name)
|
||||||
|
print "* " + name + ':'
|
||||||
|
print " %s" % docstring_headline(cls)
|
||||||
|
|
||||||
|
|
||||||
|
def main(args=sys.argv):
|
||||||
|
"""
|
||||||
|
Main command line entry point.
|
||||||
|
"""
|
||||||
|
# pylint: disable-msg=R0911,R0912,R0915
|
||||||
|
|
||||||
|
usage = USAGE % ((args[0],) * 6)
|
||||||
|
|
||||||
|
if sys.platform in ['win32', 'cygwin']:
|
||||||
|
try:
|
||||||
|
# Provide coloring under Windows, if possible
|
||||||
|
import colorama
|
||||||
|
colorama.init()
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
opts = {}
|
||||||
|
O_opts = []
|
||||||
|
P_opts = []
|
||||||
|
F_opts = []
|
||||||
|
for opt, arg in popts:
|
||||||
|
if opt == '-O':
|
||||||
|
O_opts.append(arg)
|
||||||
|
elif opt == '-P':
|
||||||
|
P_opts.append(arg)
|
||||||
|
elif opt == '-F':
|
||||||
|
F_opts.append(arg)
|
||||||
|
opts[opt] = arg
|
||||||
|
|
||||||
|
if not opts and not args:
|
||||||
|
print usage
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if opts.pop('-h', None) is not None:
|
||||||
|
print usage
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if opts.pop('-V', None) is not None:
|
||||||
|
print 'Pygments version %s, (c) 2006-2013 by Georg Brandl.' % __version__
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# handle ``pygmentize -L``
|
||||||
|
L_opt = opts.pop('-L', None)
|
||||||
|
if L_opt is not None:
|
||||||
|
if opts:
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
|
||||||
|
# print version
|
||||||
|
main(['', '-V'])
|
||||||
|
if not args:
|
||||||
|
args = ['lexer', 'formatter', 'filter', 'style']
|
||||||
|
for arg in args:
|
||||||
|
_print_list(arg.rstrip('s'))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# handle ``pygmentize -H``
|
||||||
|
H_opt = opts.pop('-H', None)
|
||||||
|
if H_opt is not None:
|
||||||
|
if opts or len(args) != 2:
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
|
||||||
|
what, name = args
|
||||||
|
if what not in ('lexer', 'formatter', 'filter'):
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
|
||||||
|
_print_help(what, name)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# parse -O options
|
||||||
|
parsed_opts = _parse_options(O_opts)
|
||||||
|
opts.pop('-O', None)
|
||||||
|
|
||||||
|
# parse -P options
|
||||||
|
for p_opt in P_opts:
|
||||||
|
try:
|
||||||
|
name, value = p_opt.split('=', 1)
|
||||||
|
except ValueError:
|
||||||
|
parsed_opts[p_opt] = True
|
||||||
|
else:
|
||||||
|
parsed_opts[name] = value
|
||||||
|
opts.pop('-P', None)
|
||||||
|
|
||||||
|
# handle ``pygmentize -N``
|
||||||
|
infn = opts.pop('-N', None)
|
||||||
|
if infn is not None:
|
||||||
|
try:
|
||||||
|
lexer = get_lexer_for_filename(infn, **parsed_opts)
|
||||||
|
except ClassNotFound, err:
|
||||||
|
lexer = TextLexer()
|
||||||
|
except OptionError, err:
|
||||||
|
print >>sys.stderr, 'Error:', err
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print lexer.aliases[0]
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# handle ``pygmentize -S``
|
||||||
|
S_opt = opts.pop('-S', None)
|
||||||
|
a_opt = opts.pop('-a', None)
|
||||||
|
if S_opt is not None:
|
||||||
|
f_opt = opts.pop('-f', None)
|
||||||
|
if not f_opt:
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
if opts or args:
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
|
||||||
|
try:
|
||||||
|
parsed_opts['style'] = S_opt
|
||||||
|
fmter = get_formatter_by_name(f_opt, **parsed_opts)
|
||||||
|
except ClassNotFound, err:
|
||||||
|
print >>sys.stderr, err
|
||||||
|
return 1
|
||||||
|
|
||||||
|
arg = a_opt or ''
|
||||||
|
try:
|
||||||
|
print fmter.get_style_defs(arg)
|
||||||
|
except Exception, err:
|
||||||
|
print >>sys.stderr, 'Error:', err
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# if no -S is given, -a is not allowed
|
||||||
|
if a_opt is not None:
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
|
||||||
|
# parse -F options
|
||||||
|
F_opts = _parse_filters(F_opts)
|
||||||
|
opts.pop('-F', None)
|
||||||
|
|
||||||
|
# select formatter
|
||||||
|
outfn = opts.pop('-o', None)
|
||||||
|
fmter = opts.pop('-f', None)
|
||||||
|
if fmter:
|
||||||
|
try:
|
||||||
|
fmter = get_formatter_by_name(fmter, **parsed_opts)
|
||||||
|
except (OptionError, ClassNotFound), err:
|
||||||
|
print >>sys.stderr, 'Error:', err
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if outfn:
|
||||||
|
if not fmter:
|
||||||
|
try:
|
||||||
|
fmter = get_formatter_for_filename(outfn, **parsed_opts)
|
||||||
|
except (OptionError, ClassNotFound), err:
|
||||||
|
print >>sys.stderr, 'Error:', err
|
||||||
|
return 1
|
||||||
|
try:
|
||||||
|
outfile = open(outfn, 'wb')
|
||||||
|
except Exception, err:
|
||||||
|
print >>sys.stderr, 'Error: cannot open outfile:', err
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
if not fmter:
|
||||||
|
fmter = TerminalFormatter(**parsed_opts)
|
||||||
|
outfile = sys.stdout
|
||||||
|
|
||||||
|
# select lexer
|
||||||
|
lexer = opts.pop('-l', None)
|
||||||
|
if lexer:
|
||||||
|
try:
|
||||||
|
lexer = get_lexer_by_name(lexer, **parsed_opts)
|
||||||
|
except (OptionError, ClassNotFound), err:
|
||||||
|
print >>sys.stderr, 'Error:', err
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if args:
|
||||||
|
if len(args) > 1:
|
||||||
|
print >>sys.stderr, usage
|
||||||
|
return 2
|
||||||
|
|
||||||
|
infn = args[0]
|
||||||
|
try:
|
||||||
|
code = open(infn, 'rb').read()
|
||||||
|
except Exception, err:
|
||||||
|
print >>sys.stderr, 'Error: cannot read infile:', err
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if not lexer:
|
||||||
|
try:
|
||||||
|
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
|
||||||
|
except ClassNotFound, err:
|
||||||
|
if '-g' in opts:
|
||||||
|
try:
|
||||||
|
lexer = guess_lexer(code, **parsed_opts)
|
||||||
|
except ClassNotFound:
|
||||||
|
lexer = TextLexer(**parsed_opts)
|
||||||
|
else:
|
||||||
|
print >>sys.stderr, 'Error:', err
|
||||||
|
return 1
|
||||||
|
except OptionError, err:
|
||||||
|
print >>sys.stderr, 'Error:', err
|
||||||
|
return 1
|
||||||
|
|
||||||
|
else:
|
||||||
|
if '-g' in opts:
|
||||||
|
code = sys.stdin.read()
|
||||||
|
try:
|
||||||
|
lexer = guess_lexer(code, **parsed_opts)
|
||||||
|
except ClassNotFound:
|
||||||
|
lexer = TextLexer(**parsed_opts)
|
||||||
|
elif not lexer:
|
||||||
|
print >>sys.stderr, 'Error: no lexer name given and reading ' + \
|
||||||
|
'from stdin (try using -g or -l <lexer>)'
|
||||||
|
return 2
|
||||||
|
else:
|
||||||
|
code = sys.stdin.read()
|
||||||
|
|
||||||
|
# No encoding given? Use latin1 if output file given,
|
||||||
|
# stdin/stdout encoding otherwise.
|
||||||
|
# (This is a compromise, I'm not too happy with it...)
|
||||||
|
if 'encoding' not in parsed_opts and 'outencoding' not in parsed_opts:
|
||||||
|
if outfn:
|
||||||
|
# encoding pass-through
|
||||||
|
fmter.encoding = 'latin1'
|
||||||
|
else:
|
||||||
|
if sys.version_info < (3,):
|
||||||
|
# use terminal encoding; Python 3's terminals already do that
|
||||||
|
lexer.encoding = getattr(sys.stdin, 'encoding',
|
||||||
|
None) or 'ascii'
|
||||||
|
fmter.encoding = getattr(sys.stdout, 'encoding',
|
||||||
|
None) or 'ascii'
|
||||||
|
elif not outfn and sys.version_info > (3,):
|
||||||
|
# output to terminal with encoding -> use .buffer
|
||||||
|
outfile = sys.stdout.buffer
|
||||||
|
|
||||||
|
# ... and do it!
|
||||||
|
try:
|
||||||
|
# process filters
|
||||||
|
for fname, fopts in F_opts:
|
||||||
|
lexer.add_filter(fname, **fopts)
|
||||||
|
highlight(code, lexer, fmter, outfile)
|
||||||
|
except Exception, err:
|
||||||
|
import traceback
|
||||||
|
info = traceback.format_exception(*sys.exc_info())
|
||||||
|
msg = info[-1].strip()
|
||||||
|
if len(info) >= 3:
|
||||||
|
# extract relevant file and position info
|
||||||
|
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
|
||||||
|
print >>sys.stderr
|
||||||
|
print >>sys.stderr, '*** Error while highlighting:'
|
||||||
|
print >>sys.stderr, msg
|
||||||
|
return 1
|
||||||
|
|
||||||
|
return 0
|
|
@ -0,0 +1,356 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.filters
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Module containing filter lookup functions and default
|
||||||
|
filters.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
|
||||||
|
string_to_tokentype
|
||||||
|
from pygments.filter import Filter
|
||||||
|
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
|
||||||
|
get_choice_opt, ClassNotFound, OptionError
|
||||||
|
from pygments.plugin import find_plugin_filters
|
||||||
|
|
||||||
|
|
||||||
|
def find_filter_class(filtername):
|
||||||
|
"""
|
||||||
|
Lookup a filter by name. Return None if not found.
|
||||||
|
"""
|
||||||
|
if filtername in FILTERS:
|
||||||
|
return FILTERS[filtername]
|
||||||
|
for name, cls in find_plugin_filters():
|
||||||
|
if name == filtername:
|
||||||
|
return cls
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_filter_by_name(filtername, **options):
|
||||||
|
"""
|
||||||
|
Return an instantiated filter. Options are passed to the filter
|
||||||
|
initializer if wanted. Raise a ClassNotFound if not found.
|
||||||
|
"""
|
||||||
|
cls = find_filter_class(filtername)
|
||||||
|
if cls:
|
||||||
|
return cls(**options)
|
||||||
|
else:
|
||||||
|
raise ClassNotFound('filter %r not found' % filtername)
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_filters():
|
||||||
|
"""
|
||||||
|
Return a generator of all filter names.
|
||||||
|
"""
|
||||||
|
for name in FILTERS:
|
||||||
|
yield name
|
||||||
|
for name, _ in find_plugin_filters():
|
||||||
|
yield name
|
||||||
|
|
||||||
|
|
||||||
|
def _replace_special(ttype, value, regex, specialttype,
|
||||||
|
replacefunc=lambda x: x):
|
||||||
|
last = 0
|
||||||
|
for match in regex.finditer(value):
|
||||||
|
start, end = match.start(), match.end()
|
||||||
|
if start != last:
|
||||||
|
yield ttype, value[last:start]
|
||||||
|
yield specialttype, replacefunc(value[start:end])
|
||||||
|
last = end
|
||||||
|
if last != len(value):
|
||||||
|
yield ttype, value[last:]
|
||||||
|
|
||||||
|
|
||||||
|
class CodeTagFilter(Filter):
|
||||||
|
"""
|
||||||
|
Highlight special code tags in comments and docstrings.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`codetags` : list of strings
|
||||||
|
A list of strings that are flagged as code tags. The default is to
|
||||||
|
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
tags = get_list_opt(options, 'codetags',
|
||||||
|
['XXX', 'TODO', 'BUG', 'NOTE'])
|
||||||
|
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
|
||||||
|
re.escape(tag) for tag in tags if tag
|
||||||
|
]))
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
regex = self.tag_re
|
||||||
|
for ttype, value in stream:
|
||||||
|
if ttype in String.Doc or \
|
||||||
|
ttype in Comment and \
|
||||||
|
ttype not in Comment.Preproc:
|
||||||
|
for sttype, svalue in _replace_special(ttype, value, regex,
|
||||||
|
Comment.Special):
|
||||||
|
yield sttype, svalue
|
||||||
|
else:
|
||||||
|
yield ttype, value
|
||||||
|
|
||||||
|
|
||||||
|
class KeywordCaseFilter(Filter):
|
||||||
|
"""
|
||||||
|
Convert keywords to lowercase or uppercase or capitalize them, which
|
||||||
|
means first letter uppercase, rest lowercase.
|
||||||
|
|
||||||
|
This can be useful e.g. if you highlight Pascal code and want to adapt the
|
||||||
|
code to your styleguide.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`case` : string
|
||||||
|
The casing to convert keywords to. Must be one of ``'lower'``,
|
||||||
|
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
|
||||||
|
self.convert = getattr(unicode, case)
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
for ttype, value in stream:
|
||||||
|
if ttype in Keyword:
|
||||||
|
yield ttype, self.convert(value)
|
||||||
|
else:
|
||||||
|
yield ttype, value
|
||||||
|
|
||||||
|
|
||||||
|
class NameHighlightFilter(Filter):
|
||||||
|
"""
|
||||||
|
Highlight a normal Name (and Name.*) token with a different token type.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
filter = NameHighlightFilter(
|
||||||
|
names=['foo', 'bar', 'baz'],
|
||||||
|
tokentype=Name.Function,
|
||||||
|
)
|
||||||
|
|
||||||
|
This would highlight the names "foo", "bar" and "baz"
|
||||||
|
as functions. `Name.Function` is the default token type.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`names` : list of strings
|
||||||
|
A list of names that should be given the different token type.
|
||||||
|
There is no default.
|
||||||
|
`tokentype` : TokenType or string
|
||||||
|
A token type or a string containing a token type name that is
|
||||||
|
used for highlighting the strings in `names`. The default is
|
||||||
|
`Name.Function`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
self.names = set(get_list_opt(options, 'names', []))
|
||||||
|
tokentype = options.get('tokentype')
|
||||||
|
if tokentype:
|
||||||
|
self.tokentype = string_to_tokentype(tokentype)
|
||||||
|
else:
|
||||||
|
self.tokentype = Name.Function
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
for ttype, value in stream:
|
||||||
|
if ttype in Name and value in self.names:
|
||||||
|
yield self.tokentype, value
|
||||||
|
else:
|
||||||
|
yield ttype, value
|
||||||
|
|
||||||
|
|
||||||
|
class ErrorToken(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class RaiseOnErrorTokenFilter(Filter):
|
||||||
|
"""
|
||||||
|
Raise an exception when the lexer generates an error token.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`excclass` : Exception class
|
||||||
|
The exception class to raise.
|
||||||
|
The default is `pygments.filters.ErrorToken`.
|
||||||
|
|
||||||
|
*New in Pygments 0.8.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
self.exception = options.get('excclass', ErrorToken)
|
||||||
|
try:
|
||||||
|
# issubclass() will raise TypeError if first argument is not a class
|
||||||
|
if not issubclass(self.exception, Exception):
|
||||||
|
raise TypeError
|
||||||
|
except TypeError:
|
||||||
|
raise OptionError('excclass option is not an exception class')
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
for ttype, value in stream:
|
||||||
|
if ttype is Error:
|
||||||
|
raise self.exception(value)
|
||||||
|
yield ttype, value
|
||||||
|
|
||||||
|
|
||||||
|
class VisibleWhitespaceFilter(Filter):
|
||||||
|
"""
|
||||||
|
Convert tabs, newlines and/or spaces to visible characters.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`spaces` : string or bool
|
||||||
|
If this is a one-character string, spaces will be replaces by this string.
|
||||||
|
If it is another true value, spaces will be replaced by ``·`` (unicode
|
||||||
|
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
|
||||||
|
default is ``False``.
|
||||||
|
`tabs` : string or bool
|
||||||
|
The same as for `spaces`, but the default replacement character is ``»``
|
||||||
|
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
|
||||||
|
is ``False``. Note: this will not work if the `tabsize` option for the
|
||||||
|
lexer is nonzero, as tabs will already have been expanded then.
|
||||||
|
`tabsize` : int
|
||||||
|
If tabs are to be replaced by this filter (see the `tabs` option), this
|
||||||
|
is the total number of characters that a tab should be expanded to.
|
||||||
|
The default is ``8``.
|
||||||
|
`newlines` : string or bool
|
||||||
|
The same as for `spaces`, but the default replacement character is ``¶``
|
||||||
|
(unicode PILCROW SIGN). The default value is ``False``.
|
||||||
|
`wstokentype` : bool
|
||||||
|
If true, give whitespace the special `Whitespace` token type. This allows
|
||||||
|
styling the visible whitespace differently (e.g. greyed out), but it can
|
||||||
|
disrupt background colors. The default is ``True``.
|
||||||
|
|
||||||
|
*New in Pygments 0.8.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
for name, default in {'spaces': u'·', 'tabs': u'»', 'newlines': u'¶'}.items():
|
||||||
|
opt = options.get(name, False)
|
||||||
|
if isinstance(opt, basestring) and len(opt) == 1:
|
||||||
|
setattr(self, name, opt)
|
||||||
|
else:
|
||||||
|
setattr(self, name, (opt and default or ''))
|
||||||
|
tabsize = get_int_opt(options, 'tabsize', 8)
|
||||||
|
if self.tabs:
|
||||||
|
self.tabs += ' '*(tabsize-1)
|
||||||
|
if self.newlines:
|
||||||
|
self.newlines += '\n'
|
||||||
|
self.wstt = get_bool_opt(options, 'wstokentype', True)
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
if self.wstt:
|
||||||
|
spaces = self.spaces or ' '
|
||||||
|
tabs = self.tabs or '\t'
|
||||||
|
newlines = self.newlines or '\n'
|
||||||
|
regex = re.compile(r'\s')
|
||||||
|
def replacefunc(wschar):
|
||||||
|
if wschar == ' ':
|
||||||
|
return spaces
|
||||||
|
elif wschar == '\t':
|
||||||
|
return tabs
|
||||||
|
elif wschar == '\n':
|
||||||
|
return newlines
|
||||||
|
return wschar
|
||||||
|
|
||||||
|
for ttype, value in stream:
|
||||||
|
for sttype, svalue in _replace_special(ttype, value, regex,
|
||||||
|
Whitespace, replacefunc):
|
||||||
|
yield sttype, svalue
|
||||||
|
else:
|
||||||
|
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
|
||||||
|
# simpler processing
|
||||||
|
for ttype, value in stream:
|
||||||
|
if spaces:
|
||||||
|
value = value.replace(' ', spaces)
|
||||||
|
if tabs:
|
||||||
|
value = value.replace('\t', tabs)
|
||||||
|
if newlines:
|
||||||
|
value = value.replace('\n', newlines)
|
||||||
|
yield ttype, value
|
||||||
|
|
||||||
|
|
||||||
|
class GobbleFilter(Filter):
|
||||||
|
"""
|
||||||
|
Gobbles source code lines (eats initial characters).
|
||||||
|
|
||||||
|
This filter drops the first ``n`` characters off every line of code. This
|
||||||
|
may be useful when the source code fed to the lexer is indented by a fixed
|
||||||
|
amount of space that isn't desired in the output.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`n` : int
|
||||||
|
The number of characters to gobble.
|
||||||
|
|
||||||
|
*New in Pygments 1.2.*
|
||||||
|
"""
|
||||||
|
def __init__(self, **options):
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
self.n = get_int_opt(options, 'n', 0)
|
||||||
|
|
||||||
|
def gobble(self, value, left):
|
||||||
|
if left < len(value):
|
||||||
|
return value[left:], 0
|
||||||
|
else:
|
||||||
|
return '', left - len(value)
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
n = self.n
|
||||||
|
left = n # How many characters left to gobble.
|
||||||
|
for ttype, value in stream:
|
||||||
|
# Remove ``left`` tokens from first line, ``n`` from all others.
|
||||||
|
parts = value.split('\n')
|
||||||
|
(parts[0], left) = self.gobble(parts[0], left)
|
||||||
|
for i in range(1, len(parts)):
|
||||||
|
(parts[i], left) = self.gobble(parts[i], n)
|
||||||
|
value = '\n'.join(parts)
|
||||||
|
|
||||||
|
if value != '':
|
||||||
|
yield ttype, value
|
||||||
|
|
||||||
|
|
||||||
|
class TokenMergeFilter(Filter):
|
||||||
|
"""
|
||||||
|
Merges consecutive tokens with the same token type in the output stream of a
|
||||||
|
lexer.
|
||||||
|
|
||||||
|
*New in Pygments 1.2.*
|
||||||
|
"""
|
||||||
|
def __init__(self, **options):
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
current_type = None
|
||||||
|
current_value = None
|
||||||
|
for ttype, value in stream:
|
||||||
|
if ttype is current_type:
|
||||||
|
current_value += value
|
||||||
|
else:
|
||||||
|
if current_type is not None:
|
||||||
|
yield current_type, current_value
|
||||||
|
current_type = ttype
|
||||||
|
current_value = value
|
||||||
|
if current_type is not None:
|
||||||
|
yield current_type, current_value
|
||||||
|
|
||||||
|
|
||||||
|
FILTERS = {
|
||||||
|
'codetagify': CodeTagFilter,
|
||||||
|
'keywordcase': KeywordCaseFilter,
|
||||||
|
'highlight': NameHighlightFilter,
|
||||||
|
'raiseonerror': RaiseOnErrorTokenFilter,
|
||||||
|
'whitespace': VisibleWhitespaceFilter,
|
||||||
|
'gobble': GobbleFilter,
|
||||||
|
'tokenmerge': TokenMergeFilter,
|
||||||
|
}
|
95
packages/wakatime/wakatime/packages/pygments2/formatter.py
Normal file
95
packages/wakatime/wakatime/packages/pygments2/formatter.py
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatter
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Base formatter class.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
|
||||||
|
from pygments.util import get_bool_opt
|
||||||
|
from pygments.styles import get_style_by_name
|
||||||
|
|
||||||
|
__all__ = ['Formatter']
|
||||||
|
|
||||||
|
|
||||||
|
def _lookup_style(style):
|
||||||
|
if isinstance(style, basestring):
|
||||||
|
return get_style_by_name(style)
|
||||||
|
return style
|
||||||
|
|
||||||
|
|
||||||
|
class Formatter(object):
|
||||||
|
"""
|
||||||
|
Converts a token stream to text.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
``style``
|
||||||
|
The style to use, can be a string or a Style subclass
|
||||||
|
(default: "default"). Not used by e.g. the
|
||||||
|
TerminalFormatter.
|
||||||
|
``full``
|
||||||
|
Tells the formatter to output a "full" document, i.e.
|
||||||
|
a complete self-contained document. This doesn't have
|
||||||
|
any effect for some formatters (default: false).
|
||||||
|
``title``
|
||||||
|
If ``full`` is true, the title that should be used to
|
||||||
|
caption the document (default: '').
|
||||||
|
``encoding``
|
||||||
|
If given, must be an encoding name. This will be used to
|
||||||
|
convert the Unicode token strings to byte strings in the
|
||||||
|
output. If it is "" or None, Unicode strings will be written
|
||||||
|
to the output file, which most file-like objects do not
|
||||||
|
support (default: None).
|
||||||
|
``outencoding``
|
||||||
|
Overrides ``encoding`` if given.
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: Name of the formatter
|
||||||
|
name = None
|
||||||
|
|
||||||
|
#: Shortcuts for the formatter
|
||||||
|
aliases = []
|
||||||
|
|
||||||
|
#: fn match rules
|
||||||
|
filenames = []
|
||||||
|
|
||||||
|
#: If True, this formatter outputs Unicode strings when no encoding
|
||||||
|
#: option is given.
|
||||||
|
unicodeoutput = True
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
self.style = _lookup_style(options.get('style', 'default'))
|
||||||
|
self.full = get_bool_opt(options, 'full', False)
|
||||||
|
self.title = options.get('title', '')
|
||||||
|
self.encoding = options.get('encoding', None) or None
|
||||||
|
if self.encoding == 'guess':
|
||||||
|
# can happen for pygmentize -O encoding=guess
|
||||||
|
self.encoding = 'utf-8'
|
||||||
|
self.encoding = options.get('outencoding', None) or self.encoding
|
||||||
|
self.options = options
|
||||||
|
|
||||||
|
def get_style_defs(self, arg=''):
|
||||||
|
"""
|
||||||
|
Return the style definitions for the current style as a string.
|
||||||
|
|
||||||
|
``arg`` is an additional argument whose meaning depends on the
|
||||||
|
formatter used. Note that ``arg`` can also be a list or tuple
|
||||||
|
for some formatters like the html formatter.
|
||||||
|
"""
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def format(self, tokensource, outfile):
|
||||||
|
"""
|
||||||
|
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
||||||
|
tuples and write it into ``outfile``.
|
||||||
|
"""
|
||||||
|
if self.encoding:
|
||||||
|
# wrap the outfile in a StreamWriter
|
||||||
|
outfile = codecs.lookup(self.encoding)[3](outfile)
|
||||||
|
return self.format_unencoded(tokensource, outfile)
|
92
packages/wakatime/wakatime/packages/pygments2/formatters/_mapping.py
Executable file
92
packages/wakatime/wakatime/packages/pygments2/formatters/_mapping.py
Executable file
|
@ -0,0 +1,92 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters._mapping
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Formatter mapping defintions. This file is generated by itself. Everytime
|
||||||
|
you change something on a builtin formatter defintion, run this script from
|
||||||
|
the formatters folder to update it.
|
||||||
|
|
||||||
|
Do not alter the FORMATTERS dictionary by hand.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# start
|
||||||
|
from pygments.formatters.bbcode import BBCodeFormatter
|
||||||
|
from pygments.formatters.html import HtmlFormatter
|
||||||
|
from pygments.formatters.img import BmpImageFormatter
|
||||||
|
from pygments.formatters.img import GifImageFormatter
|
||||||
|
from pygments.formatters.img import ImageFormatter
|
||||||
|
from pygments.formatters.img import JpgImageFormatter
|
||||||
|
from pygments.formatters.latex import LatexFormatter
|
||||||
|
from pygments.formatters.other import NullFormatter
|
||||||
|
from pygments.formatters.other import RawTokenFormatter
|
||||||
|
from pygments.formatters.rtf import RtfFormatter
|
||||||
|
from pygments.formatters.svg import SvgFormatter
|
||||||
|
from pygments.formatters.terminal import TerminalFormatter
|
||||||
|
from pygments.formatters.terminal256 import Terminal256Formatter
|
||||||
|
|
||||||
|
FORMATTERS = {
|
||||||
|
BBCodeFormatter: ('BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
|
||||||
|
BmpImageFormatter: ('img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||||
|
GifImageFormatter: ('img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||||
|
HtmlFormatter: ('HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
|
||||||
|
ImageFormatter: ('img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||||
|
JpgImageFormatter: ('img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||||
|
LatexFormatter: ('LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
|
||||||
|
NullFormatter: ('Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
|
||||||
|
RawTokenFormatter: ('Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
|
||||||
|
RtfFormatter: ('RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft\xc2\xae Word\xc2\xae documents.'),
|
||||||
|
SvgFormatter: ('SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
|
||||||
|
Terminal256Formatter: ('Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
|
||||||
|
TerminalFormatter: ('Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.')
|
||||||
|
}
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# lookup formatters
|
||||||
|
found_formatters = []
|
||||||
|
imports = []
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||||
|
from pygments.util import docstring_headline
|
||||||
|
|
||||||
|
for filename in os.listdir('.'):
|
||||||
|
if filename.endswith('.py') and not filename.startswith('_'):
|
||||||
|
module_name = 'pygments.formatters.%s' % filename[:-3]
|
||||||
|
print module_name
|
||||||
|
module = __import__(module_name, None, None, [''])
|
||||||
|
for formatter_name in module.__all__:
|
||||||
|
imports.append((module_name, formatter_name))
|
||||||
|
formatter = getattr(module, formatter_name)
|
||||||
|
found_formatters.append(
|
||||||
|
'%s: %r' % (formatter_name,
|
||||||
|
(formatter.name,
|
||||||
|
tuple(formatter.aliases),
|
||||||
|
tuple(formatter.filenames),
|
||||||
|
docstring_headline(formatter))))
|
||||||
|
# sort them, that should make the diff files for svn smaller
|
||||||
|
found_formatters.sort()
|
||||||
|
imports.sort()
|
||||||
|
|
||||||
|
# extract useful sourcecode from this file
|
||||||
|
f = open(__file__)
|
||||||
|
try:
|
||||||
|
content = f.read()
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
header = content[:content.find('# start')]
|
||||||
|
footer = content[content.find("if __name__ == '__main__':"):]
|
||||||
|
|
||||||
|
# write new file
|
||||||
|
f = open(__file__, 'w')
|
||||||
|
f.write(header)
|
||||||
|
f.write('# start\n')
|
||||||
|
f.write('\n'.join(['from %s import %s' % imp for imp in imports]))
|
||||||
|
f.write('\n\n')
|
||||||
|
f.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters))
|
||||||
|
f.write(footer)
|
||||||
|
f.close()
|
821
packages/wakatime/wakatime/packages/pygments2/formatters/html.py
Normal file
821
packages/wakatime/wakatime/packages/pygments2/formatters/html.py
Normal file
|
@ -0,0 +1,821 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.html
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Formatter for HTML output.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import os.path
|
||||||
|
import StringIO
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
from pygments.token import Token, Text, STANDARD_TYPES
|
||||||
|
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
|
||||||
|
|
||||||
|
try:
|
||||||
|
import ctags
|
||||||
|
except ImportError:
|
||||||
|
ctags = None
|
||||||
|
|
||||||
|
__all__ = ['HtmlFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
_escape_html_table = {
|
||||||
|
ord('&'): u'&',
|
||||||
|
ord('<'): u'<',
|
||||||
|
ord('>'): u'>',
|
||||||
|
ord('"'): u'"',
|
||||||
|
ord("'"): u''',
|
||||||
|
}
|
||||||
|
|
||||||
|
def escape_html(text, table=_escape_html_table):
|
||||||
|
"""Escape &, <, > as well as single and double quotes for HTML."""
|
||||||
|
return text.translate(table)
|
||||||
|
|
||||||
|
def get_random_id():
|
||||||
|
"""Return a random id for javascript fields."""
|
||||||
|
from random import random
|
||||||
|
from time import time
|
||||||
|
try:
|
||||||
|
from hashlib import sha1 as sha
|
||||||
|
except ImportError:
|
||||||
|
import sha
|
||||||
|
sha = sha.new
|
||||||
|
return sha('%s|%s' % (random(), time())).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_ttype_class(ttype):
|
||||||
|
fname = STANDARD_TYPES.get(ttype)
|
||||||
|
if fname:
|
||||||
|
return fname
|
||||||
|
aname = ''
|
||||||
|
while fname is None:
|
||||||
|
aname = '-' + ttype[-1] + aname
|
||||||
|
ttype = ttype.parent
|
||||||
|
fname = STANDARD_TYPES.get(ttype)
|
||||||
|
return fname + aname
|
||||||
|
|
||||||
|
|
||||||
|
CSSFILE_TEMPLATE = '''\
|
||||||
|
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
|
||||||
|
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
|
||||||
|
pre { line-height: 125%%; }
|
||||||
|
%(styledefs)s
|
||||||
|
'''
|
||||||
|
|
||||||
|
DOC_HEADER = '''\
|
||||||
|
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
||||||
|
"http://www.w3.org/TR/html4/strict.dtd">
|
||||||
|
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>%(title)s</title>
|
||||||
|
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
|
||||||
|
<style type="text/css">
|
||||||
|
''' + CSSFILE_TEMPLATE + '''
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>%(title)s</h2>
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
DOC_HEADER_EXTERNALCSS = '''\
|
||||||
|
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
||||||
|
"http://www.w3.org/TR/html4/strict.dtd">
|
||||||
|
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>%(title)s</title>
|
||||||
|
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
|
||||||
|
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>%(title)s</h2>
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
DOC_FOOTER = '''\
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
class HtmlFormatter(Formatter):
|
||||||
|
r"""
|
||||||
|
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
|
||||||
|
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
|
||||||
|
option.
|
||||||
|
|
||||||
|
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
|
||||||
|
additionally wrapped inside a ``<table>`` which has one row and two
|
||||||
|
cells: one containing the line numbers and one containing the code.
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. sourcecode:: html
|
||||||
|
|
||||||
|
<div class="highlight" >
|
||||||
|
<table><tr>
|
||||||
|
<td class="linenos" title="click to toggle"
|
||||||
|
onclick="with (this.firstChild.style)
|
||||||
|
{ display = (display == '') ? 'none' : '' }">
|
||||||
|
<pre>1
|
||||||
|
2</pre>
|
||||||
|
</td>
|
||||||
|
<td class="code">
|
||||||
|
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
|
||||||
|
<span class="Ke">pass</span>
|
||||||
|
</pre>
|
||||||
|
</td>
|
||||||
|
</tr></table></div>
|
||||||
|
|
||||||
|
(whitespace added to improve clarity).
|
||||||
|
|
||||||
|
Wrapping can be disabled using the `nowrap` option.
|
||||||
|
|
||||||
|
A list of lines can be specified using the `hl_lines` option to make these
|
||||||
|
lines highlighted (as of Pygments 0.11).
|
||||||
|
|
||||||
|
With the `full` option, a complete HTML 4 document is output, including
|
||||||
|
the style definitions inside a ``<style>`` tag, or in a separate file if
|
||||||
|
the `cssfile` option is given.
|
||||||
|
|
||||||
|
When `tagsfile` is set to the path of a ctags index file, it is used to
|
||||||
|
generate hyperlinks from names to their definition. You must enable
|
||||||
|
`anchorlines` and run ctags with the `-n` option for this to work. The
|
||||||
|
`python-ctags` module from PyPI must be installed to use this feature;
|
||||||
|
otherwise a `RuntimeError` will be raised.
|
||||||
|
|
||||||
|
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
|
||||||
|
containing CSS rules for the CSS classes used by the formatter. The
|
||||||
|
argument `arg` can be used to specify additional CSS selectors that
|
||||||
|
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
|
||||||
|
would result in the following CSS classes:
|
||||||
|
|
||||||
|
.. sourcecode:: css
|
||||||
|
|
||||||
|
td .code .kw { font-weight: bold; color: #00FF00 }
|
||||||
|
td .code .cm { color: #999999 }
|
||||||
|
...
|
||||||
|
|
||||||
|
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
|
||||||
|
`get_style_defs()` method to request multiple prefixes for the tokens:
|
||||||
|
|
||||||
|
.. sourcecode:: python
|
||||||
|
|
||||||
|
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
|
||||||
|
|
||||||
|
The output would then look like this:
|
||||||
|
|
||||||
|
.. sourcecode:: css
|
||||||
|
|
||||||
|
div.syntax pre .kw,
|
||||||
|
pre.syntax .kw { font-weight: bold; color: #00FF00 }
|
||||||
|
div.syntax pre .cm,
|
||||||
|
pre.syntax .cm { color: #999999 }
|
||||||
|
...
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`nowrap`
|
||||||
|
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
|
||||||
|
tag. This disables most other options (default: ``False``).
|
||||||
|
|
||||||
|
`full`
|
||||||
|
Tells the formatter to output a "full" document, i.e. a complete
|
||||||
|
self-contained document (default: ``False``).
|
||||||
|
|
||||||
|
`title`
|
||||||
|
If `full` is true, the title that should be used to caption the
|
||||||
|
document (default: ``''``).
|
||||||
|
|
||||||
|
`style`
|
||||||
|
The style to use, can be a string or a Style subclass (default:
|
||||||
|
``'default'``). This option has no effect if the `cssfile`
|
||||||
|
and `noclobber_cssfile` option are given and the file specified in
|
||||||
|
`cssfile` exists.
|
||||||
|
|
||||||
|
`noclasses`
|
||||||
|
If set to true, token ``<span>`` tags will not use CSS classes, but
|
||||||
|
inline styles. This is not recommended for larger pieces of code since
|
||||||
|
it increases output size by quite a bit (default: ``False``).
|
||||||
|
|
||||||
|
`classprefix`
|
||||||
|
Since the token types use relatively short class names, they may clash
|
||||||
|
with some of your own class names. In this case you can use the
|
||||||
|
`classprefix` option to give a string to prepend to all Pygments-generated
|
||||||
|
CSS class names for token types.
|
||||||
|
Note that this option also affects the output of `get_style_defs()`.
|
||||||
|
|
||||||
|
`cssclass`
|
||||||
|
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
|
||||||
|
If you set this option, the default selector for `get_style_defs()`
|
||||||
|
will be this class.
|
||||||
|
|
||||||
|
*New in Pygments 0.9:* If you select the ``'table'`` line numbers, the
|
||||||
|
wrapping table will have a CSS class of this string plus ``'table'``,
|
||||||
|
the default is accordingly ``'highlighttable'``.
|
||||||
|
|
||||||
|
`cssstyles`
|
||||||
|
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
|
||||||
|
|
||||||
|
`prestyles`
|
||||||
|
Inline CSS styles for the ``<pre>`` tag (default: ``''``). *New in
|
||||||
|
Pygments 0.11.*
|
||||||
|
|
||||||
|
`cssfile`
|
||||||
|
If the `full` option is true and this option is given, it must be the
|
||||||
|
name of an external file. If the filename does not include an absolute
|
||||||
|
path, the file's path will be assumed to be relative to the main output
|
||||||
|
file's path, if the latter can be found. The stylesheet is then written
|
||||||
|
to this file instead of the HTML file. *New in Pygments 0.6.*
|
||||||
|
|
||||||
|
`noclobber_cssfile`
|
||||||
|
If `cssfile` is given and the specified file exists, the css file will
|
||||||
|
not be overwritten. This allows the use of the `full` option in
|
||||||
|
combination with a user specified css file. Default is ``False``.
|
||||||
|
*New in Pygments 1.1.*
|
||||||
|
|
||||||
|
`linenos`
|
||||||
|
If set to ``'table'``, output line numbers as a table with two cells,
|
||||||
|
one containing the line numbers, the other the whole code. This is
|
||||||
|
copy-and-paste-friendly, but may cause alignment problems with some
|
||||||
|
browsers or fonts. If set to ``'inline'``, the line numbers will be
|
||||||
|
integrated in the ``<pre>`` tag that contains the code (that setting
|
||||||
|
is *new in Pygments 0.8*).
|
||||||
|
|
||||||
|
For compatibility with Pygments 0.7 and earlier, every true value
|
||||||
|
except ``'inline'`` means the same as ``'table'`` (in particular, that
|
||||||
|
means also ``True``).
|
||||||
|
|
||||||
|
The default value is ``False``, which means no line numbers at all.
|
||||||
|
|
||||||
|
**Note:** with the default ("table") line number mechanism, the line
|
||||||
|
numbers and code can have different line heights in Internet Explorer
|
||||||
|
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
|
||||||
|
CSS property (you get the default line spacing with ``line-height:
|
||||||
|
125%``).
|
||||||
|
|
||||||
|
`hl_lines`
|
||||||
|
Specify a list of lines to be highlighted. *New in Pygments 0.11.*
|
||||||
|
|
||||||
|
`linenostart`
|
||||||
|
The line number for the first line (default: ``1``).
|
||||||
|
|
||||||
|
`linenostep`
|
||||||
|
If set to a number n > 1, only every nth line number is printed.
|
||||||
|
|
||||||
|
`linenospecial`
|
||||||
|
If set to a number n > 0, every nth line number is given the CSS
|
||||||
|
class ``"special"`` (default: ``0``).
|
||||||
|
|
||||||
|
`nobackground`
|
||||||
|
If set to ``True``, the formatter won't output the background color
|
||||||
|
for the wrapping element (this automatically defaults to ``False``
|
||||||
|
when there is no wrapping element [eg: no argument for the
|
||||||
|
`get_syntax_defs` method given]) (default: ``False``). *New in
|
||||||
|
Pygments 0.6.*
|
||||||
|
|
||||||
|
`lineseparator`
|
||||||
|
This string is output between lines of code. It defaults to ``"\n"``,
|
||||||
|
which is enough to break a line inside ``<pre>`` tags, but you can
|
||||||
|
e.g. set it to ``"<br>"`` to get HTML line breaks. *New in Pygments
|
||||||
|
0.7.*
|
||||||
|
|
||||||
|
`lineanchors`
|
||||||
|
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
|
||||||
|
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
|
||||||
|
This allows easy linking to certain lines. *New in Pygments 0.9.*
|
||||||
|
|
||||||
|
`linespans`
|
||||||
|
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
|
||||||
|
output line in a span tag with an ``id`` of ``foo-linenumber``.
|
||||||
|
This allows easy access to lines via javascript. *New in Pygments 1.6.*
|
||||||
|
|
||||||
|
`anchorlinenos`
|
||||||
|
If set to `True`, will wrap line numbers in <a> tags. Used in
|
||||||
|
combination with `linenos` and `lineanchors`.
|
||||||
|
|
||||||
|
`tagsfile`
|
||||||
|
If set to the path of a ctags file, wrap names in anchor tags that
|
||||||
|
link to their definitions. `lineanchors` should be used, and the
|
||||||
|
tags file should specify line numbers (see the `-n` option to ctags).
|
||||||
|
*New in Pygments 1.6.*
|
||||||
|
|
||||||
|
`tagurlformat`
|
||||||
|
A string formatting pattern used to generate links to ctags definitions.
|
||||||
|
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
|
||||||
|
Defaults to an empty string, resulting in just `#prefix-number` links.
|
||||||
|
*New in Pygments 1.6.*
|
||||||
|
|
||||||
|
|
||||||
|
**Subclassing the HTML formatter**
|
||||||
|
|
||||||
|
*New in Pygments 0.7.*
|
||||||
|
|
||||||
|
The HTML formatter is now built in a way that allows easy subclassing, thus
|
||||||
|
customizing the output HTML code. The `format()` method calls
|
||||||
|
`self._format_lines()` which returns a generator that yields tuples of ``(1,
|
||||||
|
line)``, where the ``1`` indicates that the ``line`` is a line of the
|
||||||
|
formatted source code.
|
||||||
|
|
||||||
|
If the `nowrap` option is set, the generator is the iterated over and the
|
||||||
|
resulting HTML is output.
|
||||||
|
|
||||||
|
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
|
||||||
|
other generators. These may add some HTML code to the one generated by
|
||||||
|
`_format_lines()`, either by modifying the lines generated by the latter,
|
||||||
|
then yielding them again with ``(1, line)``, and/or by yielding other HTML
|
||||||
|
code before or after the lines, with ``(0, html)``. The distinction between
|
||||||
|
source lines and other code makes it possible to wrap the generator multiple
|
||||||
|
times.
|
||||||
|
|
||||||
|
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
|
||||||
|
|
||||||
|
A custom `HtmlFormatter` subclass could look like this:
|
||||||
|
|
||||||
|
.. sourcecode:: python
|
||||||
|
|
||||||
|
class CodeHtmlFormatter(HtmlFormatter):
|
||||||
|
|
||||||
|
def wrap(self, source, outfile):
|
||||||
|
return self._wrap_code(source)
|
||||||
|
|
||||||
|
def _wrap_code(self, source):
|
||||||
|
yield 0, '<code>'
|
||||||
|
for i, t in source:
|
||||||
|
if i == 1:
|
||||||
|
# it's a line of formatted code
|
||||||
|
t += '<br>'
|
||||||
|
yield i, t
|
||||||
|
yield 0, '</code>'
|
||||||
|
|
||||||
|
This results in wrapping the formatted lines with a ``<code>`` tag, where the
|
||||||
|
source lines are broken using ``<br>`` tags.
|
||||||
|
|
||||||
|
After calling `wrap()`, the `format()` method also adds the "line numbers"
|
||||||
|
and/or "full document" wrappers if the respective options are set. Then, all
|
||||||
|
HTML yielded by the wrapped generator is output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'HTML'
|
||||||
|
aliases = ['html']
|
||||||
|
filenames = ['*.html', '*.htm']
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
self.title = self._decodeifneeded(self.title)
|
||||||
|
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
||||||
|
self.noclasses = get_bool_opt(options, 'noclasses', False)
|
||||||
|
self.classprefix = options.get('classprefix', '')
|
||||||
|
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
|
||||||
|
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
|
||||||
|
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
|
||||||
|
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
|
||||||
|
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
|
||||||
|
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
|
||||||
|
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
|
||||||
|
|
||||||
|
if self.tagsfile:
|
||||||
|
if not ctags:
|
||||||
|
raise RuntimeError('The "ctags" package must to be installed '
|
||||||
|
'to be able to use the "tagsfile" feature.')
|
||||||
|
self._ctags = ctags.CTags(self.tagsfile)
|
||||||
|
|
||||||
|
linenos = options.get('linenos', False)
|
||||||
|
if linenos == 'inline':
|
||||||
|
self.linenos = 2
|
||||||
|
elif linenos:
|
||||||
|
# compatibility with <= 0.7
|
||||||
|
self.linenos = 1
|
||||||
|
else:
|
||||||
|
self.linenos = 0
|
||||||
|
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
|
||||||
|
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
|
||||||
|
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
|
||||||
|
self.nobackground = get_bool_opt(options, 'nobackground', False)
|
||||||
|
self.lineseparator = options.get('lineseparator', '\n')
|
||||||
|
self.lineanchors = options.get('lineanchors', '')
|
||||||
|
self.linespans = options.get('linespans', '')
|
||||||
|
self.anchorlinenos = options.get('anchorlinenos', False)
|
||||||
|
self.hl_lines = set()
|
||||||
|
for lineno in get_list_opt(options, 'hl_lines', []):
|
||||||
|
try:
|
||||||
|
self.hl_lines.add(int(lineno))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self._create_stylesheet()
|
||||||
|
|
||||||
|
def _get_css_class(self, ttype):
|
||||||
|
"""Return the css class of this token type prefixed with
|
||||||
|
the classprefix option."""
|
||||||
|
ttypeclass = _get_ttype_class(ttype)
|
||||||
|
if ttypeclass:
|
||||||
|
return self.classprefix + ttypeclass
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def _create_stylesheet(self):
|
||||||
|
t2c = self.ttype2class = {Token: ''}
|
||||||
|
c2s = self.class2style = {}
|
||||||
|
for ttype, ndef in self.style:
|
||||||
|
name = self._get_css_class(ttype)
|
||||||
|
style = ''
|
||||||
|
if ndef['color']:
|
||||||
|
style += 'color: #%s; ' % ndef['color']
|
||||||
|
if ndef['bold']:
|
||||||
|
style += 'font-weight: bold; '
|
||||||
|
if ndef['italic']:
|
||||||
|
style += 'font-style: italic; '
|
||||||
|
if ndef['underline']:
|
||||||
|
style += 'text-decoration: underline; '
|
||||||
|
if ndef['bgcolor']:
|
||||||
|
style += 'background-color: #%s; ' % ndef['bgcolor']
|
||||||
|
if ndef['border']:
|
||||||
|
style += 'border: 1px solid #%s; ' % ndef['border']
|
||||||
|
if style:
|
||||||
|
t2c[ttype] = name
|
||||||
|
# save len(ttype) to enable ordering the styles by
|
||||||
|
# hierarchy (necessary for CSS cascading rules!)
|
||||||
|
c2s[name] = (style[:-2], ttype, len(ttype))
|
||||||
|
|
||||||
|
def get_style_defs(self, arg=None):
|
||||||
|
"""
|
||||||
|
Return CSS style definitions for the classes produced by the current
|
||||||
|
highlighting style. ``arg`` can be a string or list of selectors to
|
||||||
|
insert before the token type classes.
|
||||||
|
"""
|
||||||
|
if arg is None:
|
||||||
|
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
|
||||||
|
if isinstance(arg, basestring):
|
||||||
|
args = [arg]
|
||||||
|
else:
|
||||||
|
args = list(arg)
|
||||||
|
|
||||||
|
def prefix(cls):
|
||||||
|
if cls:
|
||||||
|
cls = '.' + cls
|
||||||
|
tmp = []
|
||||||
|
for arg in args:
|
||||||
|
tmp.append((arg and arg + ' ' or '') + cls)
|
||||||
|
return ', '.join(tmp)
|
||||||
|
|
||||||
|
styles = [(level, ttype, cls, style)
|
||||||
|
for cls, (style, ttype, level) in self.class2style.iteritems()
|
||||||
|
if cls and style]
|
||||||
|
styles.sort()
|
||||||
|
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
|
||||||
|
for (level, ttype, cls, style) in styles]
|
||||||
|
if arg and not self.nobackground and \
|
||||||
|
self.style.background_color is not None:
|
||||||
|
text_style = ''
|
||||||
|
if Text in self.ttype2class:
|
||||||
|
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
|
||||||
|
lines.insert(0, '%s { background: %s;%s }' %
|
||||||
|
(prefix(''), self.style.background_color, text_style))
|
||||||
|
if self.style.highlight_color is not None:
|
||||||
|
lines.insert(0, '%s.hll { background-color: %s }' %
|
||||||
|
(prefix(''), self.style.highlight_color))
|
||||||
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
def _decodeifneeded(self, value):
|
||||||
|
if isinstance(value, bytes):
|
||||||
|
if self.encoding:
|
||||||
|
return value.decode(self.encoding)
|
||||||
|
return value.decode()
|
||||||
|
return value
|
||||||
|
|
||||||
|
def _wrap_full(self, inner, outfile):
|
||||||
|
if self.cssfile:
|
||||||
|
if os.path.isabs(self.cssfile):
|
||||||
|
# it's an absolute filename
|
||||||
|
cssfilename = self.cssfile
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
filename = outfile.name
|
||||||
|
if not filename or filename[0] == '<':
|
||||||
|
# pseudo files, e.g. name == '<fdopen>'
|
||||||
|
raise AttributeError
|
||||||
|
cssfilename = os.path.join(os.path.dirname(filename),
|
||||||
|
self.cssfile)
|
||||||
|
except AttributeError:
|
||||||
|
print >>sys.stderr, 'Note: Cannot determine output file name, ' \
|
||||||
|
'using current directory as base for the CSS file name'
|
||||||
|
cssfilename = self.cssfile
|
||||||
|
# write CSS file only if noclobber_cssfile isn't given as an option.
|
||||||
|
try:
|
||||||
|
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
|
||||||
|
cf = open(cssfilename, "w")
|
||||||
|
cf.write(CSSFILE_TEMPLATE %
|
||||||
|
{'styledefs': self.get_style_defs('body')})
|
||||||
|
cf.close()
|
||||||
|
except IOError, err:
|
||||||
|
err.strerror = 'Error writing CSS file: ' + err.strerror
|
||||||
|
raise
|
||||||
|
|
||||||
|
yield 0, (DOC_HEADER_EXTERNALCSS %
|
||||||
|
dict(title = self.title,
|
||||||
|
cssfile = self.cssfile,
|
||||||
|
encoding = self.encoding))
|
||||||
|
else:
|
||||||
|
yield 0, (DOC_HEADER %
|
||||||
|
dict(title = self.title,
|
||||||
|
styledefs = self.get_style_defs('body'),
|
||||||
|
encoding = self.encoding))
|
||||||
|
|
||||||
|
for t, line in inner:
|
||||||
|
yield t, line
|
||||||
|
yield 0, DOC_FOOTER
|
||||||
|
|
||||||
|
def _wrap_tablelinenos(self, inner):
|
||||||
|
dummyoutfile = StringIO.StringIO()
|
||||||
|
lncount = 0
|
||||||
|
for t, line in inner:
|
||||||
|
if t:
|
||||||
|
lncount += 1
|
||||||
|
dummyoutfile.write(line)
|
||||||
|
|
||||||
|
fl = self.linenostart
|
||||||
|
mw = len(str(lncount + fl - 1))
|
||||||
|
sp = self.linenospecial
|
||||||
|
st = self.linenostep
|
||||||
|
la = self.lineanchors
|
||||||
|
aln = self.anchorlinenos
|
||||||
|
nocls = self.noclasses
|
||||||
|
if sp:
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
for i in range(fl, fl+lncount):
|
||||||
|
if i % st == 0:
|
||||||
|
if i % sp == 0:
|
||||||
|
if aln:
|
||||||
|
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
|
||||||
|
(la, i, mw, i))
|
||||||
|
else:
|
||||||
|
lines.append('<span class="special">%*d</span>' % (mw, i))
|
||||||
|
else:
|
||||||
|
if aln:
|
||||||
|
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
|
||||||
|
else:
|
||||||
|
lines.append('%*d' % (mw, i))
|
||||||
|
else:
|
||||||
|
lines.append('')
|
||||||
|
ls = '\n'.join(lines)
|
||||||
|
else:
|
||||||
|
lines = []
|
||||||
|
for i in range(fl, fl+lncount):
|
||||||
|
if i % st == 0:
|
||||||
|
if aln:
|
||||||
|
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
|
||||||
|
else:
|
||||||
|
lines.append('%*d' % (mw, i))
|
||||||
|
else:
|
||||||
|
lines.append('')
|
||||||
|
ls = '\n'.join(lines)
|
||||||
|
|
||||||
|
# in case you wonder about the seemingly redundant <div> here: since the
|
||||||
|
# content in the other cell also is wrapped in a div, some browsers in
|
||||||
|
# some configurations seem to mess up the formatting...
|
||||||
|
if nocls:
|
||||||
|
yield 0, ('<table class="%stable">' % self.cssclass +
|
||||||
|
'<tr><td><div class="linenodiv" '
|
||||||
|
'style="background-color: #f0f0f0; padding-right: 10px">'
|
||||||
|
'<pre style="line-height: 125%">' +
|
||||||
|
ls + '</pre></div></td><td class="code">')
|
||||||
|
else:
|
||||||
|
yield 0, ('<table class="%stable">' % self.cssclass +
|
||||||
|
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
|
||||||
|
ls + '</pre></div></td><td class="code">')
|
||||||
|
yield 0, dummyoutfile.getvalue()
|
||||||
|
yield 0, '</td></tr></table>'
|
||||||
|
|
||||||
|
def _wrap_inlinelinenos(self, inner):
|
||||||
|
# need a list of lines since we need the width of a single number :(
|
||||||
|
lines = list(inner)
|
||||||
|
sp = self.linenospecial
|
||||||
|
st = self.linenostep
|
||||||
|
num = self.linenostart
|
||||||
|
mw = len(str(len(lines) + num - 1))
|
||||||
|
|
||||||
|
if self.noclasses:
|
||||||
|
if sp:
|
||||||
|
for t, line in lines:
|
||||||
|
if num%sp == 0:
|
||||||
|
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
|
||||||
|
else:
|
||||||
|
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
|
||||||
|
yield 1, '<span style="%s">%*s</span> ' % (
|
||||||
|
style, mw, (num%st and ' ' or num)) + line
|
||||||
|
num += 1
|
||||||
|
else:
|
||||||
|
for t, line in lines:
|
||||||
|
yield 1, ('<span style="background-color: #f0f0f0; '
|
||||||
|
'padding: 0 5px 0 5px">%*s</span> ' % (
|
||||||
|
mw, (num%st and ' ' or num)) + line)
|
||||||
|
num += 1
|
||||||
|
elif sp:
|
||||||
|
for t, line in lines:
|
||||||
|
yield 1, '<span class="lineno%s">%*s</span> ' % (
|
||||||
|
num%sp == 0 and ' special' or '', mw,
|
||||||
|
(num%st and ' ' or num)) + line
|
||||||
|
num += 1
|
||||||
|
else:
|
||||||
|
for t, line in lines:
|
||||||
|
yield 1, '<span class="lineno">%*s</span> ' % (
|
||||||
|
mw, (num%st and ' ' or num)) + line
|
||||||
|
num += 1
|
||||||
|
|
||||||
|
def _wrap_lineanchors(self, inner):
|
||||||
|
s = self.lineanchors
|
||||||
|
i = self.linenostart - 1 # subtract 1 since we have to increment i
|
||||||
|
# *before* yielding
|
||||||
|
for t, line in inner:
|
||||||
|
if t:
|
||||||
|
i += 1
|
||||||
|
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
|
||||||
|
else:
|
||||||
|
yield 0, line
|
||||||
|
|
||||||
|
def _wrap_linespans(self, inner):
|
||||||
|
s = self.linespans
|
||||||
|
i = self.linenostart - 1
|
||||||
|
for t, line in inner:
|
||||||
|
if t:
|
||||||
|
i += 1
|
||||||
|
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
|
||||||
|
else:
|
||||||
|
yield 0, line
|
||||||
|
|
||||||
|
def _wrap_div(self, inner):
|
||||||
|
style = []
|
||||||
|
if (self.noclasses and not self.nobackground and
|
||||||
|
self.style.background_color is not None):
|
||||||
|
style.append('background: %s' % (self.style.background_color,))
|
||||||
|
if self.cssstyles:
|
||||||
|
style.append(self.cssstyles)
|
||||||
|
style = '; '.join(style)
|
||||||
|
|
||||||
|
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
|
||||||
|
+ (style and (' style="%s"' % style)) + '>')
|
||||||
|
for tup in inner:
|
||||||
|
yield tup
|
||||||
|
yield 0, '</div>\n'
|
||||||
|
|
||||||
|
def _wrap_pre(self, inner):
|
||||||
|
style = []
|
||||||
|
if self.prestyles:
|
||||||
|
style.append(self.prestyles)
|
||||||
|
if self.noclasses:
|
||||||
|
style.append('line-height: 125%')
|
||||||
|
style = '; '.join(style)
|
||||||
|
|
||||||
|
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
|
||||||
|
for tup in inner:
|
||||||
|
yield tup
|
||||||
|
yield 0, '</pre>'
|
||||||
|
|
||||||
|
def _format_lines(self, tokensource):
|
||||||
|
"""
|
||||||
|
Just format the tokens, without any wrapping tags.
|
||||||
|
Yield individual lines.
|
||||||
|
"""
|
||||||
|
nocls = self.noclasses
|
||||||
|
lsep = self.lineseparator
|
||||||
|
# for <span style=""> lookup only
|
||||||
|
getcls = self.ttype2class.get
|
||||||
|
c2s = self.class2style
|
||||||
|
escape_table = _escape_html_table
|
||||||
|
tagsfile = self.tagsfile
|
||||||
|
|
||||||
|
lspan = ''
|
||||||
|
line = ''
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
if nocls:
|
||||||
|
cclass = getcls(ttype)
|
||||||
|
while cclass is None:
|
||||||
|
ttype = ttype.parent
|
||||||
|
cclass = getcls(ttype)
|
||||||
|
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
|
||||||
|
else:
|
||||||
|
cls = self._get_css_class(ttype)
|
||||||
|
cspan = cls and '<span class="%s">' % cls or ''
|
||||||
|
|
||||||
|
parts = value.translate(escape_table).split('\n')
|
||||||
|
|
||||||
|
if tagsfile and ttype in Token.Name:
|
||||||
|
filename, linenumber = self._lookup_ctag(value)
|
||||||
|
if linenumber:
|
||||||
|
base, filename = os.path.split(filename)
|
||||||
|
if base:
|
||||||
|
base += '/'
|
||||||
|
filename, extension = os.path.splitext(filename)
|
||||||
|
url = self.tagurlformat % {'path': base, 'fname': filename,
|
||||||
|
'fext': extension}
|
||||||
|
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
|
||||||
|
(url, self.lineanchors, linenumber, parts[0])
|
||||||
|
parts[-1] = parts[-1] + "</a>"
|
||||||
|
|
||||||
|
# for all but the last line
|
||||||
|
for part in parts[:-1]:
|
||||||
|
if line:
|
||||||
|
if lspan != cspan:
|
||||||
|
line += (lspan and '</span>') + cspan + part + \
|
||||||
|
(cspan and '</span>') + lsep
|
||||||
|
else: # both are the same
|
||||||
|
line += part + (lspan and '</span>') + lsep
|
||||||
|
yield 1, line
|
||||||
|
line = ''
|
||||||
|
elif part:
|
||||||
|
yield 1, cspan + part + (cspan and '</span>') + lsep
|
||||||
|
else:
|
||||||
|
yield 1, lsep
|
||||||
|
# for the last line
|
||||||
|
if line and parts[-1]:
|
||||||
|
if lspan != cspan:
|
||||||
|
line += (lspan and '</span>') + cspan + parts[-1]
|
||||||
|
lspan = cspan
|
||||||
|
else:
|
||||||
|
line += parts[-1]
|
||||||
|
elif parts[-1]:
|
||||||
|
line = cspan + parts[-1]
|
||||||
|
lspan = cspan
|
||||||
|
# else we neither have to open a new span nor set lspan
|
||||||
|
|
||||||
|
if line:
|
||||||
|
yield 1, line + (lspan and '</span>') + lsep
|
||||||
|
|
||||||
|
def _lookup_ctag(self, token):
|
||||||
|
entry = ctags.TagEntry()
|
||||||
|
if self._ctags.find(entry, token, 0):
|
||||||
|
return entry['file'], entry['lineNumber']
|
||||||
|
else:
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def _highlight_lines(self, tokensource):
|
||||||
|
"""
|
||||||
|
Highlighted the lines specified in the `hl_lines` option by
|
||||||
|
post-processing the token stream coming from `_format_lines`.
|
||||||
|
"""
|
||||||
|
hls = self.hl_lines
|
||||||
|
|
||||||
|
for i, (t, value) in enumerate(tokensource):
|
||||||
|
if t != 1:
|
||||||
|
yield t, value
|
||||||
|
if i + 1 in hls: # i + 1 because Python indexes start at 0
|
||||||
|
if self.noclasses:
|
||||||
|
style = ''
|
||||||
|
if self.style.highlight_color is not None:
|
||||||
|
style = (' style="background-color: %s"' %
|
||||||
|
(self.style.highlight_color,))
|
||||||
|
yield 1, '<span%s>%s</span>' % (style, value)
|
||||||
|
else:
|
||||||
|
yield 1, '<span class="hll">%s</span>' % value
|
||||||
|
else:
|
||||||
|
yield 1, value
|
||||||
|
|
||||||
|
def wrap(self, source, outfile):
|
||||||
|
"""
|
||||||
|
Wrap the ``source``, which is a generator yielding
|
||||||
|
individual lines, in custom generators. See docstring
|
||||||
|
for `format`. Can be overridden.
|
||||||
|
"""
|
||||||
|
return self._wrap_div(self._wrap_pre(source))
|
||||||
|
|
||||||
|
def format_unencoded(self, tokensource, outfile):
|
||||||
|
"""
|
||||||
|
The formatting process uses several nested generators; which of
|
||||||
|
them are used is determined by the user's options.
|
||||||
|
|
||||||
|
Each generator should take at least one argument, ``inner``,
|
||||||
|
and wrap the pieces of text generated by this.
|
||||||
|
|
||||||
|
Always yield 2-tuples: (code, text). If "code" is 1, the text
|
||||||
|
is part of the original tokensource being highlighted, if it's
|
||||||
|
0, the text is some piece of wrapping. This makes it possible to
|
||||||
|
use several different wrappers that process the original source
|
||||||
|
linewise, e.g. line number generators.
|
||||||
|
"""
|
||||||
|
source = self._format_lines(tokensource)
|
||||||
|
if self.hl_lines:
|
||||||
|
source = self._highlight_lines(source)
|
||||||
|
if not self.nowrap:
|
||||||
|
if self.linenos == 2:
|
||||||
|
source = self._wrap_inlinelinenos(source)
|
||||||
|
if self.lineanchors:
|
||||||
|
source = self._wrap_lineanchors(source)
|
||||||
|
if self.linespans:
|
||||||
|
source = self._wrap_linespans(source)
|
||||||
|
source = self.wrap(source, outfile)
|
||||||
|
if self.linenos == 1:
|
||||||
|
source = self._wrap_tablelinenos(source)
|
||||||
|
if self.full:
|
||||||
|
source = self._wrap_full(source, outfile)
|
||||||
|
|
||||||
|
for t, piece in source:
|
||||||
|
outfile.write(piece)
|
553
packages/wakatime/wakatime/packages/pygments2/formatters/img.py
Normal file
553
packages/wakatime/wakatime/packages/pygments2/formatters/img.py
Normal file
|
@ -0,0 +1,553 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.img
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Formatter for Pixmap output.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
from pygments.util import get_bool_opt, get_int_opt, \
|
||||||
|
get_list_opt, get_choice_opt
|
||||||
|
|
||||||
|
# Import this carefully
|
||||||
|
try:
|
||||||
|
from PIL import Image, ImageDraw, ImageFont
|
||||||
|
pil_available = True
|
||||||
|
except ImportError:
|
||||||
|
pil_available = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import _winreg
|
||||||
|
except ImportError:
|
||||||
|
_winreg = None
|
||||||
|
|
||||||
|
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
|
||||||
|
'BmpImageFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
# For some unknown reason every font calls it something different
|
||||||
|
STYLES = {
|
||||||
|
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
|
||||||
|
'ITALIC': ['Oblique', 'Italic'],
|
||||||
|
'BOLD': ['Bold'],
|
||||||
|
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
|
||||||
|
}
|
||||||
|
|
||||||
|
# A sane default for modern systems
|
||||||
|
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
|
||||||
|
DEFAULT_FONT_NAME_WIN = 'Courier New'
|
||||||
|
|
||||||
|
|
||||||
|
class PilNotAvailable(ImportError):
|
||||||
|
"""When Python imaging library is not available"""
|
||||||
|
|
||||||
|
|
||||||
|
class FontNotFound(Exception):
|
||||||
|
"""When there are no usable fonts specified"""
|
||||||
|
|
||||||
|
|
||||||
|
class FontManager(object):
|
||||||
|
"""
|
||||||
|
Manages a set of fonts: normal, italic, bold, etc...
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, font_name, font_size=14):
|
||||||
|
self.font_name = font_name
|
||||||
|
self.font_size = font_size
|
||||||
|
self.fonts = {}
|
||||||
|
self.encoding = None
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
if not font_name:
|
||||||
|
self.font_name = DEFAULT_FONT_NAME_WIN
|
||||||
|
self._create_win()
|
||||||
|
else:
|
||||||
|
if not font_name:
|
||||||
|
self.font_name = DEFAULT_FONT_NAME_NIX
|
||||||
|
self._create_nix()
|
||||||
|
|
||||||
|
def _get_nix_font_path(self, name, style):
|
||||||
|
from commands import getstatusoutput
|
||||||
|
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
|
||||||
|
(name, style))
|
||||||
|
if not exit:
|
||||||
|
lines = out.splitlines()
|
||||||
|
if lines:
|
||||||
|
path = lines[0].strip().strip(':')
|
||||||
|
return path
|
||||||
|
|
||||||
|
def _create_nix(self):
|
||||||
|
for name in STYLES['NORMAL']:
|
||||||
|
path = self._get_nix_font_path(self.font_name, name)
|
||||||
|
if path is not None:
|
||||||
|
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise FontNotFound('No usable fonts named: "%s"' %
|
||||||
|
self.font_name)
|
||||||
|
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
||||||
|
for stylename in STYLES[style]:
|
||||||
|
path = self._get_nix_font_path(self.font_name, stylename)
|
||||||
|
if path is not None:
|
||||||
|
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if style == 'BOLDITALIC':
|
||||||
|
self.fonts[style] = self.fonts['BOLD']
|
||||||
|
else:
|
||||||
|
self.fonts[style] = self.fonts['NORMAL']
|
||||||
|
|
||||||
|
def _lookup_win(self, key, basename, styles, fail=False):
|
||||||
|
for suffix in ('', ' (TrueType)'):
|
||||||
|
for style in styles:
|
||||||
|
try:
|
||||||
|
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
|
||||||
|
val, _ = _winreg.QueryValueEx(key, valname)
|
||||||
|
return val
|
||||||
|
except EnvironmentError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
if fail:
|
||||||
|
raise FontNotFound('Font %s (%s) not found in registry' %
|
||||||
|
(basename, styles[0]))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _create_win(self):
|
||||||
|
try:
|
||||||
|
key = _winreg.OpenKey(
|
||||||
|
_winreg.HKEY_LOCAL_MACHINE,
|
||||||
|
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
|
||||||
|
except EnvironmentError:
|
||||||
|
try:
|
||||||
|
key = _winreg.OpenKey(
|
||||||
|
_winreg.HKEY_LOCAL_MACHINE,
|
||||||
|
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
|
||||||
|
except EnvironmentError:
|
||||||
|
raise FontNotFound('Can\'t open Windows font registry key')
|
||||||
|
try:
|
||||||
|
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
|
||||||
|
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
||||||
|
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
||||||
|
path = self._lookup_win(key, self.font_name, STYLES[style])
|
||||||
|
if path:
|
||||||
|
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
||||||
|
else:
|
||||||
|
if style == 'BOLDITALIC':
|
||||||
|
self.fonts[style] = self.fonts['BOLD']
|
||||||
|
else:
|
||||||
|
self.fonts[style] = self.fonts['NORMAL']
|
||||||
|
finally:
|
||||||
|
_winreg.CloseKey(key)
|
||||||
|
|
||||||
|
def get_char_size(self):
|
||||||
|
"""
|
||||||
|
Get the character size.
|
||||||
|
"""
|
||||||
|
return self.fonts['NORMAL'].getsize('M')
|
||||||
|
|
||||||
|
def get_font(self, bold, oblique):
|
||||||
|
"""
|
||||||
|
Get the font based on bold and italic flags.
|
||||||
|
"""
|
||||||
|
if bold and oblique:
|
||||||
|
return self.fonts['BOLDITALIC']
|
||||||
|
elif bold:
|
||||||
|
return self.fonts['BOLD']
|
||||||
|
elif oblique:
|
||||||
|
return self.fonts['ITALIC']
|
||||||
|
else:
|
||||||
|
return self.fonts['NORMAL']
|
||||||
|
|
||||||
|
|
||||||
|
class ImageFormatter(Formatter):
|
||||||
|
"""
|
||||||
|
Create a PNG image from source code. This uses the Python Imaging Library to
|
||||||
|
generate a pixmap from the source code.
|
||||||
|
|
||||||
|
*New in Pygments 0.10.*
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`image_format`
|
||||||
|
An image format to output to that is recognised by PIL, these include:
|
||||||
|
|
||||||
|
* "PNG" (default)
|
||||||
|
* "JPEG"
|
||||||
|
* "BMP"
|
||||||
|
* "GIF"
|
||||||
|
|
||||||
|
`line_pad`
|
||||||
|
The extra spacing (in pixels) between each line of text.
|
||||||
|
|
||||||
|
Default: 2
|
||||||
|
|
||||||
|
`font_name`
|
||||||
|
The font name to be used as the base font from which others, such as
|
||||||
|
bold and italic fonts will be generated. This really should be a
|
||||||
|
monospace font to look sane.
|
||||||
|
|
||||||
|
Default: "Bitstream Vera Sans Mono"
|
||||||
|
|
||||||
|
`font_size`
|
||||||
|
The font size in points to be used.
|
||||||
|
|
||||||
|
Default: 14
|
||||||
|
|
||||||
|
`image_pad`
|
||||||
|
The padding, in pixels to be used at each edge of the resulting image.
|
||||||
|
|
||||||
|
Default: 10
|
||||||
|
|
||||||
|
`line_numbers`
|
||||||
|
Whether line numbers should be shown: True/False
|
||||||
|
|
||||||
|
Default: True
|
||||||
|
|
||||||
|
`line_number_start`
|
||||||
|
The line number of the first line.
|
||||||
|
|
||||||
|
Default: 1
|
||||||
|
|
||||||
|
`line_number_step`
|
||||||
|
The step used when printing line numbers.
|
||||||
|
|
||||||
|
Default: 1
|
||||||
|
|
||||||
|
`line_number_bg`
|
||||||
|
The background colour (in "#123456" format) of the line number bar, or
|
||||||
|
None to use the style background color.
|
||||||
|
|
||||||
|
Default: "#eed"
|
||||||
|
|
||||||
|
`line_number_fg`
|
||||||
|
The text color of the line numbers (in "#123456"-like format).
|
||||||
|
|
||||||
|
Default: "#886"
|
||||||
|
|
||||||
|
`line_number_chars`
|
||||||
|
The number of columns of line numbers allowable in the line number
|
||||||
|
margin.
|
||||||
|
|
||||||
|
Default: 2
|
||||||
|
|
||||||
|
`line_number_bold`
|
||||||
|
Whether line numbers will be bold: True/False
|
||||||
|
|
||||||
|
Default: False
|
||||||
|
|
||||||
|
`line_number_italic`
|
||||||
|
Whether line numbers will be italicized: True/False
|
||||||
|
|
||||||
|
Default: False
|
||||||
|
|
||||||
|
`line_number_separator`
|
||||||
|
Whether a line will be drawn between the line number area and the
|
||||||
|
source code area: True/False
|
||||||
|
|
||||||
|
Default: True
|
||||||
|
|
||||||
|
`line_number_pad`
|
||||||
|
The horizontal padding (in pixels) between the line number margin, and
|
||||||
|
the source code area.
|
||||||
|
|
||||||
|
Default: 6
|
||||||
|
|
||||||
|
`hl_lines`
|
||||||
|
Specify a list of lines to be highlighted. *New in Pygments 1.2.*
|
||||||
|
|
||||||
|
Default: empty list
|
||||||
|
|
||||||
|
`hl_color`
|
||||||
|
Specify the color for highlighting lines. *New in Pygments 1.2.*
|
||||||
|
|
||||||
|
Default: highlight color of the selected style
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Required by the pygments mapper
|
||||||
|
name = 'img'
|
||||||
|
aliases = ['img', 'IMG', 'png']
|
||||||
|
filenames = ['*.png']
|
||||||
|
|
||||||
|
unicodeoutput = False
|
||||||
|
|
||||||
|
default_image_format = 'png'
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
"""
|
||||||
|
See the class docstring for explanation of options.
|
||||||
|
"""
|
||||||
|
if not pil_available:
|
||||||
|
raise PilNotAvailable(
|
||||||
|
'Python Imaging Library is required for this formatter')
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
# Read the style
|
||||||
|
self.styles = dict(self.style)
|
||||||
|
if self.style.background_color is None:
|
||||||
|
self.background_color = '#fff'
|
||||||
|
else:
|
||||||
|
self.background_color = self.style.background_color
|
||||||
|
# Image options
|
||||||
|
self.image_format = get_choice_opt(
|
||||||
|
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
|
||||||
|
self.default_image_format, normcase=True)
|
||||||
|
self.image_pad = get_int_opt(options, 'image_pad', 10)
|
||||||
|
self.line_pad = get_int_opt(options, 'line_pad', 2)
|
||||||
|
# The fonts
|
||||||
|
fontsize = get_int_opt(options, 'font_size', 14)
|
||||||
|
self.fonts = FontManager(options.get('font_name', ''), fontsize)
|
||||||
|
self.fontw, self.fonth = self.fonts.get_char_size()
|
||||||
|
# Line number options
|
||||||
|
self.line_number_fg = options.get('line_number_fg', '#886')
|
||||||
|
self.line_number_bg = options.get('line_number_bg', '#eed')
|
||||||
|
self.line_number_chars = get_int_opt(options,
|
||||||
|
'line_number_chars', 2)
|
||||||
|
self.line_number_bold = get_bool_opt(options,
|
||||||
|
'line_number_bold', False)
|
||||||
|
self.line_number_italic = get_bool_opt(options,
|
||||||
|
'line_number_italic', False)
|
||||||
|
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
|
||||||
|
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
|
||||||
|
self.line_number_separator = get_bool_opt(options,
|
||||||
|
'line_number_separator', True)
|
||||||
|
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
|
||||||
|
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
|
||||||
|
if self.line_numbers:
|
||||||
|
self.line_number_width = (self.fontw * self.line_number_chars +
|
||||||
|
self.line_number_pad * 2)
|
||||||
|
else:
|
||||||
|
self.line_number_width = 0
|
||||||
|
self.hl_lines = []
|
||||||
|
hl_lines_str = get_list_opt(options, 'hl_lines', [])
|
||||||
|
for line in hl_lines_str:
|
||||||
|
try:
|
||||||
|
self.hl_lines.append(int(line))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
self.hl_color = options.get('hl_color',
|
||||||
|
self.style.highlight_color) or '#f90'
|
||||||
|
self.drawables = []
|
||||||
|
|
||||||
|
def get_style_defs(self, arg=''):
|
||||||
|
raise NotImplementedError('The -S option is meaningless for the image '
|
||||||
|
'formatter. Use -O style=<stylename> instead.')
|
||||||
|
|
||||||
|
def _get_line_height(self):
|
||||||
|
"""
|
||||||
|
Get the height of a line.
|
||||||
|
"""
|
||||||
|
return self.fonth + self.line_pad
|
||||||
|
|
||||||
|
def _get_line_y(self, lineno):
|
||||||
|
"""
|
||||||
|
Get the Y coordinate of a line number.
|
||||||
|
"""
|
||||||
|
return lineno * self._get_line_height() + self.image_pad
|
||||||
|
|
||||||
|
def _get_char_width(self):
|
||||||
|
"""
|
||||||
|
Get the width of a character.
|
||||||
|
"""
|
||||||
|
return self.fontw
|
||||||
|
|
||||||
|
def _get_char_x(self, charno):
|
||||||
|
"""
|
||||||
|
Get the X coordinate of a character position.
|
||||||
|
"""
|
||||||
|
return charno * self.fontw + self.image_pad + self.line_number_width
|
||||||
|
|
||||||
|
def _get_text_pos(self, charno, lineno):
|
||||||
|
"""
|
||||||
|
Get the actual position for a character and line position.
|
||||||
|
"""
|
||||||
|
return self._get_char_x(charno), self._get_line_y(lineno)
|
||||||
|
|
||||||
|
def _get_linenumber_pos(self, lineno):
|
||||||
|
"""
|
||||||
|
Get the actual position for the start of a line number.
|
||||||
|
"""
|
||||||
|
return (self.image_pad, self._get_line_y(lineno))
|
||||||
|
|
||||||
|
def _get_text_color(self, style):
|
||||||
|
"""
|
||||||
|
Get the correct color for the token from the style.
|
||||||
|
"""
|
||||||
|
if style['color'] is not None:
|
||||||
|
fill = '#' + style['color']
|
||||||
|
else:
|
||||||
|
fill = '#000'
|
||||||
|
return fill
|
||||||
|
|
||||||
|
def _get_style_font(self, style):
|
||||||
|
"""
|
||||||
|
Get the correct font for the style.
|
||||||
|
"""
|
||||||
|
return self.fonts.get_font(style['bold'], style['italic'])
|
||||||
|
|
||||||
|
def _get_image_size(self, maxcharno, maxlineno):
|
||||||
|
"""
|
||||||
|
Get the required image size.
|
||||||
|
"""
|
||||||
|
return (self._get_char_x(maxcharno) + self.image_pad,
|
||||||
|
self._get_line_y(maxlineno + 0) + self.image_pad)
|
||||||
|
|
||||||
|
def _draw_linenumber(self, posno, lineno):
|
||||||
|
"""
|
||||||
|
Remember a line number drawable to paint later.
|
||||||
|
"""
|
||||||
|
self._draw_text(
|
||||||
|
self._get_linenumber_pos(posno),
|
||||||
|
str(lineno).rjust(self.line_number_chars),
|
||||||
|
font=self.fonts.get_font(self.line_number_bold,
|
||||||
|
self.line_number_italic),
|
||||||
|
fill=self.line_number_fg,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _draw_text(self, pos, text, font, **kw):
|
||||||
|
"""
|
||||||
|
Remember a single drawable tuple to paint later.
|
||||||
|
"""
|
||||||
|
self.drawables.append((pos, text, font, kw))
|
||||||
|
|
||||||
|
def _create_drawables(self, tokensource):
|
||||||
|
"""
|
||||||
|
Create drawables for the token content.
|
||||||
|
"""
|
||||||
|
lineno = charno = maxcharno = 0
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
while ttype not in self.styles:
|
||||||
|
ttype = ttype.parent
|
||||||
|
style = self.styles[ttype]
|
||||||
|
# TODO: make sure tab expansion happens earlier in the chain. It
|
||||||
|
# really ought to be done on the input, as to do it right here is
|
||||||
|
# quite complex.
|
||||||
|
value = value.expandtabs(4)
|
||||||
|
lines = value.splitlines(True)
|
||||||
|
#print lines
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
temp = line.rstrip('\n')
|
||||||
|
if temp:
|
||||||
|
self._draw_text(
|
||||||
|
self._get_text_pos(charno, lineno),
|
||||||
|
temp,
|
||||||
|
font = self._get_style_font(style),
|
||||||
|
fill = self._get_text_color(style)
|
||||||
|
)
|
||||||
|
charno += len(temp)
|
||||||
|
maxcharno = max(maxcharno, charno)
|
||||||
|
if line.endswith('\n'):
|
||||||
|
# add a line for each extra line in the value
|
||||||
|
charno = 0
|
||||||
|
lineno += 1
|
||||||
|
self.maxcharno = maxcharno
|
||||||
|
self.maxlineno = lineno
|
||||||
|
|
||||||
|
def _draw_line_numbers(self):
|
||||||
|
"""
|
||||||
|
Create drawables for the line numbers.
|
||||||
|
"""
|
||||||
|
if not self.line_numbers:
|
||||||
|
return
|
||||||
|
for p in xrange(self.maxlineno):
|
||||||
|
n = p + self.line_number_start
|
||||||
|
if (n % self.line_number_step) == 0:
|
||||||
|
self._draw_linenumber(p, n)
|
||||||
|
|
||||||
|
def _paint_line_number_bg(self, im):
|
||||||
|
"""
|
||||||
|
Paint the line number background on the image.
|
||||||
|
"""
|
||||||
|
if not self.line_numbers:
|
||||||
|
return
|
||||||
|
if self.line_number_fg is None:
|
||||||
|
return
|
||||||
|
draw = ImageDraw.Draw(im)
|
||||||
|
recth = im.size[-1]
|
||||||
|
rectw = self.image_pad + self.line_number_width - self.line_number_pad
|
||||||
|
draw.rectangle([(0, 0),
|
||||||
|
(rectw, recth)],
|
||||||
|
fill=self.line_number_bg)
|
||||||
|
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
|
||||||
|
del draw
|
||||||
|
|
||||||
|
def format(self, tokensource, outfile):
|
||||||
|
"""
|
||||||
|
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
||||||
|
tuples and write it into ``outfile``.
|
||||||
|
|
||||||
|
This implementation calculates where it should draw each token on the
|
||||||
|
pixmap, then calculates the required pixmap size and draws the items.
|
||||||
|
"""
|
||||||
|
self._create_drawables(tokensource)
|
||||||
|
self._draw_line_numbers()
|
||||||
|
im = Image.new(
|
||||||
|
'RGB',
|
||||||
|
self._get_image_size(self.maxcharno, self.maxlineno),
|
||||||
|
self.background_color
|
||||||
|
)
|
||||||
|
self._paint_line_number_bg(im)
|
||||||
|
draw = ImageDraw.Draw(im)
|
||||||
|
# Highlight
|
||||||
|
if self.hl_lines:
|
||||||
|
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
|
||||||
|
recth = self._get_line_height()
|
||||||
|
rectw = im.size[0] - x
|
||||||
|
for linenumber in self.hl_lines:
|
||||||
|
y = self._get_line_y(linenumber - 1)
|
||||||
|
draw.rectangle([(x, y), (x + rectw, y + recth)],
|
||||||
|
fill=self.hl_color)
|
||||||
|
for pos, value, font, kw in self.drawables:
|
||||||
|
draw.text(pos, value, font=font, **kw)
|
||||||
|
im.save(outfile, self.image_format.upper())
|
||||||
|
|
||||||
|
|
||||||
|
# Add one formatter per format, so that the "-f gif" option gives the correct result
|
||||||
|
# when used in pygmentize.
|
||||||
|
|
||||||
|
class GifImageFormatter(ImageFormatter):
|
||||||
|
"""
|
||||||
|
Create a GIF image from source code. This uses the Python Imaging Library to
|
||||||
|
generate a pixmap from the source code.
|
||||||
|
|
||||||
|
*New in Pygments 1.0.* (You could create GIF images before by passing a
|
||||||
|
suitable `image_format` option to the `ImageFormatter`.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'img_gif'
|
||||||
|
aliases = ['gif']
|
||||||
|
filenames = ['*.gif']
|
||||||
|
default_image_format = 'gif'
|
||||||
|
|
||||||
|
|
||||||
|
class JpgImageFormatter(ImageFormatter):
|
||||||
|
"""
|
||||||
|
Create a JPEG image from source code. This uses the Python Imaging Library to
|
||||||
|
generate a pixmap from the source code.
|
||||||
|
|
||||||
|
*New in Pygments 1.0.* (You could create JPEG images before by passing a
|
||||||
|
suitable `image_format` option to the `ImageFormatter`.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'img_jpg'
|
||||||
|
aliases = ['jpg', 'jpeg']
|
||||||
|
filenames = ['*.jpg']
|
||||||
|
default_image_format = 'jpeg'
|
||||||
|
|
||||||
|
|
||||||
|
class BmpImageFormatter(ImageFormatter):
|
||||||
|
"""
|
||||||
|
Create a bitmap image from source code. This uses the Python Imaging Library to
|
||||||
|
generate a pixmap from the source code.
|
||||||
|
|
||||||
|
*New in Pygments 1.0.* (You could create bitmap images before by passing a
|
||||||
|
suitable `image_format` option to the `ImageFormatter`.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'img_bmp'
|
||||||
|
aliases = ['bmp', 'bitmap']
|
||||||
|
filenames = ['*.bmp']
|
||||||
|
default_image_format = 'bmp'
|
|
@ -0,0 +1,378 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.latex
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Formatter for LaTeX fancyvrb output.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
from pygments.token import Token, STANDARD_TYPES
|
||||||
|
from pygments.util import get_bool_opt, get_int_opt, StringIO
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['LatexFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
def escape_tex(text, commandprefix):
|
||||||
|
return text.replace('\\', '\x00'). \
|
||||||
|
replace('{', '\x01'). \
|
||||||
|
replace('}', '\x02'). \
|
||||||
|
replace('\x00', r'\%sZbs{}' % commandprefix). \
|
||||||
|
replace('\x01', r'\%sZob{}' % commandprefix). \
|
||||||
|
replace('\x02', r'\%sZcb{}' % commandprefix). \
|
||||||
|
replace('^', r'\%sZca{}' % commandprefix). \
|
||||||
|
replace('_', r'\%sZus{}' % commandprefix). \
|
||||||
|
replace('&', r'\%sZam{}' % commandprefix). \
|
||||||
|
replace('<', r'\%sZlt{}' % commandprefix). \
|
||||||
|
replace('>', r'\%sZgt{}' % commandprefix). \
|
||||||
|
replace('#', r'\%sZsh{}' % commandprefix). \
|
||||||
|
replace('%', r'\%sZpc{}' % commandprefix). \
|
||||||
|
replace('$', r'\%sZdl{}' % commandprefix). \
|
||||||
|
replace('-', r'\%sZhy{}' % commandprefix). \
|
||||||
|
replace("'", r'\%sZsq{}' % commandprefix). \
|
||||||
|
replace('"', r'\%sZdq{}' % commandprefix). \
|
||||||
|
replace('~', r'\%sZti{}' % commandprefix)
|
||||||
|
|
||||||
|
|
||||||
|
DOC_TEMPLATE = r'''
|
||||||
|
\documentclass{%(docclass)s}
|
||||||
|
\usepackage{fancyvrb}
|
||||||
|
\usepackage{color}
|
||||||
|
\usepackage[%(encoding)s]{inputenc}
|
||||||
|
%(preamble)s
|
||||||
|
|
||||||
|
%(styledefs)s
|
||||||
|
|
||||||
|
\begin{document}
|
||||||
|
|
||||||
|
\section*{%(title)s}
|
||||||
|
|
||||||
|
%(code)s
|
||||||
|
\end{document}
|
||||||
|
'''
|
||||||
|
|
||||||
|
## Small explanation of the mess below :)
|
||||||
|
#
|
||||||
|
# The previous version of the LaTeX formatter just assigned a command to
|
||||||
|
# each token type defined in the current style. That obviously is
|
||||||
|
# problematic if the highlighted code is produced for a different style
|
||||||
|
# than the style commands themselves.
|
||||||
|
#
|
||||||
|
# This version works much like the HTML formatter which assigns multiple
|
||||||
|
# CSS classes to each <span> tag, from the most specific to the least
|
||||||
|
# specific token type, thus falling back to the parent token type if one
|
||||||
|
# is not defined. Here, the classes are there too and use the same short
|
||||||
|
# forms given in token.STANDARD_TYPES.
|
||||||
|
#
|
||||||
|
# Highlighted code now only uses one custom command, which by default is
|
||||||
|
# \PY and selectable by the commandprefix option (and in addition the
|
||||||
|
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
|
||||||
|
# backwards compatibility purposes).
|
||||||
|
#
|
||||||
|
# \PY has two arguments: the classes, separated by +, and the text to
|
||||||
|
# render in that style. The classes are resolved into the respective
|
||||||
|
# style commands by magic, which serves to ignore unknown classes.
|
||||||
|
#
|
||||||
|
# The magic macros are:
|
||||||
|
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
|
||||||
|
# to render in \PY@do. Their definition determines the style.
|
||||||
|
# * \PY@reset resets \PY@it etc. to do nothing.
|
||||||
|
# * \PY@toks parses the list of classes, using magic inspired by the
|
||||||
|
# keyval package (but modified to use plusses instead of commas
|
||||||
|
# because fancyvrb redefines commas inside its environments).
|
||||||
|
# * \PY@tok processes one class, calling the \PY@tok@classname command
|
||||||
|
# if it exists.
|
||||||
|
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
|
||||||
|
# for its class.
|
||||||
|
# * \PY resets the style, parses the classnames and then calls \PY@do.
|
||||||
|
#
|
||||||
|
# Tip: to read this code, print it out in substituted form using e.g.
|
||||||
|
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
|
||||||
|
|
||||||
|
STYLE_TEMPLATE = r'''
|
||||||
|
\makeatletter
|
||||||
|
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
|
||||||
|
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
|
||||||
|
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
|
||||||
|
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
|
||||||
|
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
|
||||||
|
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
|
||||||
|
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
|
||||||
|
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
|
||||||
|
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
|
||||||
|
|
||||||
|
%(styles)s
|
||||||
|
|
||||||
|
\def\%(cp)sZbs{\char`\\}
|
||||||
|
\def\%(cp)sZus{\char`\_}
|
||||||
|
\def\%(cp)sZob{\char`\{}
|
||||||
|
\def\%(cp)sZcb{\char`\}}
|
||||||
|
\def\%(cp)sZca{\char`\^}
|
||||||
|
\def\%(cp)sZam{\char`\&}
|
||||||
|
\def\%(cp)sZlt{\char`\<}
|
||||||
|
\def\%(cp)sZgt{\char`\>}
|
||||||
|
\def\%(cp)sZsh{\char`\#}
|
||||||
|
\def\%(cp)sZpc{\char`\%%}
|
||||||
|
\def\%(cp)sZdl{\char`\$}
|
||||||
|
\def\%(cp)sZhy{\char`\-}
|
||||||
|
\def\%(cp)sZsq{\char`\'}
|
||||||
|
\def\%(cp)sZdq{\char`\"}
|
||||||
|
\def\%(cp)sZti{\char`\~}
|
||||||
|
%% for compatibility with earlier versions
|
||||||
|
\def\%(cp)sZat{@}
|
||||||
|
\def\%(cp)sZlb{[}
|
||||||
|
\def\%(cp)sZrb{]}
|
||||||
|
\makeatother
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def _get_ttype_name(ttype):
|
||||||
|
fname = STANDARD_TYPES.get(ttype)
|
||||||
|
if fname:
|
||||||
|
return fname
|
||||||
|
aname = ''
|
||||||
|
while fname is None:
|
||||||
|
aname = ttype[-1] + aname
|
||||||
|
ttype = ttype.parent
|
||||||
|
fname = STANDARD_TYPES.get(ttype)
|
||||||
|
return fname + aname
|
||||||
|
|
||||||
|
|
||||||
|
class LatexFormatter(Formatter):
|
||||||
|
r"""
|
||||||
|
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
|
||||||
|
standard packages.
|
||||||
|
|
||||||
|
Without the `full` option, code is formatted as one ``Verbatim``
|
||||||
|
environment, like this:
|
||||||
|
|
||||||
|
.. sourcecode:: latex
|
||||||
|
|
||||||
|
\begin{Verbatim}[commandchars=\\{\}]
|
||||||
|
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
|
||||||
|
\PY{k}{pass}
|
||||||
|
\end{Verbatim}
|
||||||
|
|
||||||
|
The special command used here (``\PY``) and all the other macros it needs
|
||||||
|
are output by the `get_style_defs` method.
|
||||||
|
|
||||||
|
With the `full` option, a complete LaTeX document is output, including
|
||||||
|
the command definitions in the preamble.
|
||||||
|
|
||||||
|
The `get_style_defs()` method of a `LatexFormatter` returns a string
|
||||||
|
containing ``\def`` commands defining the macros needed inside the
|
||||||
|
``Verbatim`` environments.
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`style`
|
||||||
|
The style to use, can be a string or a Style subclass (default:
|
||||||
|
``'default'``).
|
||||||
|
|
||||||
|
`full`
|
||||||
|
Tells the formatter to output a "full" document, i.e. a complete
|
||||||
|
self-contained document (default: ``False``).
|
||||||
|
|
||||||
|
`title`
|
||||||
|
If `full` is true, the title that should be used to caption the
|
||||||
|
document (default: ``''``).
|
||||||
|
|
||||||
|
`docclass`
|
||||||
|
If the `full` option is enabled, this is the document class to use
|
||||||
|
(default: ``'article'``).
|
||||||
|
|
||||||
|
`preamble`
|
||||||
|
If the `full` option is enabled, this can be further preamble commands,
|
||||||
|
e.g. ``\usepackage`` (default: ``''``).
|
||||||
|
|
||||||
|
`linenos`
|
||||||
|
If set to ``True``, output line numbers (default: ``False``).
|
||||||
|
|
||||||
|
`linenostart`
|
||||||
|
The line number for the first line (default: ``1``).
|
||||||
|
|
||||||
|
`linenostep`
|
||||||
|
If set to a number n > 1, only every nth line number is printed.
|
||||||
|
|
||||||
|
`verboptions`
|
||||||
|
Additional options given to the Verbatim environment (see the *fancyvrb*
|
||||||
|
docs for possible values) (default: ``''``).
|
||||||
|
|
||||||
|
`commandprefix`
|
||||||
|
The LaTeX commands used to produce colored output are constructed
|
||||||
|
using this prefix and some letters (default: ``'PY'``).
|
||||||
|
*New in Pygments 0.7.*
|
||||||
|
|
||||||
|
*New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
|
||||||
|
|
||||||
|
`texcomments`
|
||||||
|
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
|
||||||
|
in comment tokens is not escaped so that LaTeX can render it (default:
|
||||||
|
``False``). *New in Pygments 1.2.*
|
||||||
|
|
||||||
|
`mathescape`
|
||||||
|
If set to ``True``, enables LaTeX math mode escape in comments. That
|
||||||
|
is, ``'$...$'`` inside a comment will trigger math mode (default:
|
||||||
|
``False``). *New in Pygments 1.2.*
|
||||||
|
"""
|
||||||
|
name = 'LaTeX'
|
||||||
|
aliases = ['latex', 'tex']
|
||||||
|
filenames = ['*.tex']
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
self.docclass = options.get('docclass', 'article')
|
||||||
|
self.preamble = options.get('preamble', '')
|
||||||
|
self.linenos = get_bool_opt(options, 'linenos', False)
|
||||||
|
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
|
||||||
|
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
|
||||||
|
self.verboptions = options.get('verboptions', '')
|
||||||
|
self.nobackground = get_bool_opt(options, 'nobackground', False)
|
||||||
|
self.commandprefix = options.get('commandprefix', 'PY')
|
||||||
|
self.texcomments = get_bool_opt(options, 'texcomments', False)
|
||||||
|
self.mathescape = get_bool_opt(options, 'mathescape', False)
|
||||||
|
|
||||||
|
self._create_stylesheet()
|
||||||
|
|
||||||
|
|
||||||
|
def _create_stylesheet(self):
|
||||||
|
t2n = self.ttype2name = {Token: ''}
|
||||||
|
c2d = self.cmd2def = {}
|
||||||
|
cp = self.commandprefix
|
||||||
|
|
||||||
|
def rgbcolor(col):
|
||||||
|
if col:
|
||||||
|
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
|
||||||
|
for i in (0, 2, 4)])
|
||||||
|
else:
|
||||||
|
return '1,1,1'
|
||||||
|
|
||||||
|
for ttype, ndef in self.style:
|
||||||
|
name = _get_ttype_name(ttype)
|
||||||
|
cmndef = ''
|
||||||
|
if ndef['bold']:
|
||||||
|
cmndef += r'\let\$$@bf=\textbf'
|
||||||
|
if ndef['italic']:
|
||||||
|
cmndef += r'\let\$$@it=\textit'
|
||||||
|
if ndef['underline']:
|
||||||
|
cmndef += r'\let\$$@ul=\underline'
|
||||||
|
if ndef['roman']:
|
||||||
|
cmndef += r'\let\$$@ff=\textrm'
|
||||||
|
if ndef['sans']:
|
||||||
|
cmndef += r'\let\$$@ff=\textsf'
|
||||||
|
if ndef['mono']:
|
||||||
|
cmndef += r'\let\$$@ff=\textsf'
|
||||||
|
if ndef['color']:
|
||||||
|
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
|
||||||
|
rgbcolor(ndef['color']))
|
||||||
|
if ndef['border']:
|
||||||
|
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
|
||||||
|
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
|
||||||
|
(rgbcolor(ndef['border']),
|
||||||
|
rgbcolor(ndef['bgcolor'])))
|
||||||
|
elif ndef['bgcolor']:
|
||||||
|
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
|
||||||
|
r'\colorbox[rgb]{%s}{\strut ##1}}' %
|
||||||
|
rgbcolor(ndef['bgcolor']))
|
||||||
|
if cmndef == '':
|
||||||
|
continue
|
||||||
|
cmndef = cmndef.replace('$$', cp)
|
||||||
|
t2n[ttype] = name
|
||||||
|
c2d[name] = cmndef
|
||||||
|
|
||||||
|
def get_style_defs(self, arg=''):
|
||||||
|
"""
|
||||||
|
Return the command sequences needed to define the commands
|
||||||
|
used to format text in the verbatim environment. ``arg`` is ignored.
|
||||||
|
"""
|
||||||
|
cp = self.commandprefix
|
||||||
|
styles = []
|
||||||
|
for name, definition in self.cmd2def.iteritems():
|
||||||
|
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
|
||||||
|
(cp, name, definition))
|
||||||
|
return STYLE_TEMPLATE % {'cp': self.commandprefix,
|
||||||
|
'styles': '\n'.join(styles)}
|
||||||
|
|
||||||
|
def format_unencoded(self, tokensource, outfile):
|
||||||
|
# TODO: add support for background colors
|
||||||
|
t2n = self.ttype2name
|
||||||
|
cp = self.commandprefix
|
||||||
|
|
||||||
|
if self.full:
|
||||||
|
realoutfile = outfile
|
||||||
|
outfile = StringIO()
|
||||||
|
|
||||||
|
outfile.write(ur'\begin{Verbatim}[commandchars=\\\{\}')
|
||||||
|
if self.linenos:
|
||||||
|
start, step = self.linenostart, self.linenostep
|
||||||
|
outfile.write(u',numbers=left' +
|
||||||
|
(start and u',firstnumber=%d' % start or u'') +
|
||||||
|
(step and u',stepnumber=%d' % step or u''))
|
||||||
|
if self.mathescape or self.texcomments:
|
||||||
|
outfile.write(ur',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
|
||||||
|
if self.verboptions:
|
||||||
|
outfile.write(u',' + self.verboptions)
|
||||||
|
outfile.write(u']\n')
|
||||||
|
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
if ttype in Token.Comment:
|
||||||
|
if self.texcomments:
|
||||||
|
# Try to guess comment starting lexeme and escape it ...
|
||||||
|
start = value[0:1]
|
||||||
|
for i in xrange(1, len(value)):
|
||||||
|
if start[0] != value[i]:
|
||||||
|
break
|
||||||
|
start += value[i]
|
||||||
|
|
||||||
|
value = value[len(start):]
|
||||||
|
start = escape_tex(start, self.commandprefix)
|
||||||
|
|
||||||
|
# ... but do not escape inside comment.
|
||||||
|
value = start + value
|
||||||
|
elif self.mathescape:
|
||||||
|
# Only escape parts not inside a math environment.
|
||||||
|
parts = value.split('$')
|
||||||
|
in_math = False
|
||||||
|
for i, part in enumerate(parts):
|
||||||
|
if not in_math:
|
||||||
|
parts[i] = escape_tex(part, self.commandprefix)
|
||||||
|
in_math = not in_math
|
||||||
|
value = '$'.join(parts)
|
||||||
|
else:
|
||||||
|
value = escape_tex(value, self.commandprefix)
|
||||||
|
else:
|
||||||
|
value = escape_tex(value, self.commandprefix)
|
||||||
|
styles = []
|
||||||
|
while ttype is not Token:
|
||||||
|
try:
|
||||||
|
styles.append(t2n[ttype])
|
||||||
|
except KeyError:
|
||||||
|
# not in current style
|
||||||
|
styles.append(_get_ttype_name(ttype))
|
||||||
|
ttype = ttype.parent
|
||||||
|
styleval = '+'.join(reversed(styles))
|
||||||
|
if styleval:
|
||||||
|
spl = value.split('\n')
|
||||||
|
for line in spl[:-1]:
|
||||||
|
if line:
|
||||||
|
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
|
||||||
|
outfile.write('\n')
|
||||||
|
if spl[-1]:
|
||||||
|
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
|
||||||
|
else:
|
||||||
|
outfile.write(value)
|
||||||
|
|
||||||
|
outfile.write(u'\\end{Verbatim}\n')
|
||||||
|
|
||||||
|
if self.full:
|
||||||
|
realoutfile.write(DOC_TEMPLATE %
|
||||||
|
dict(docclass = self.docclass,
|
||||||
|
preamble = self.preamble,
|
||||||
|
title = self.title,
|
||||||
|
encoding = self.encoding or 'latin1',
|
||||||
|
styledefs = self.get_style_defs(),
|
||||||
|
code = outfile.getvalue()))
|
765
packages/wakatime/wakatime/packages/pygments2/lexer.py
Normal file
765
packages/wakatime/wakatime/packages/pygments2/lexer.py
Normal file
|
@ -0,0 +1,765 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexer
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Base lexer classes.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
import re, itertools
|
||||||
|
|
||||||
|
from pygments.filter import apply_filters, Filter
|
||||||
|
from pygments.filters import get_filter_by_name
|
||||||
|
from pygments.token import Error, Text, Other, _TokenType
|
||||||
|
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
||||||
|
make_analysator
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
|
||||||
|
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this']
|
||||||
|
|
||||||
|
|
||||||
|
_encoding_map = [('\xef\xbb\xbf', 'utf-8'),
|
||||||
|
('\xff\xfe\0\0', 'utf-32'),
|
||||||
|
('\0\0\xfe\xff', 'utf-32be'),
|
||||||
|
('\xff\xfe', 'utf-16'),
|
||||||
|
('\xfe\xff', 'utf-16be')]
|
||||||
|
|
||||||
|
_default_analyse = staticmethod(lambda x: 0.0)
|
||||||
|
|
||||||
|
|
||||||
|
class LexerMeta(type):
|
||||||
|
"""
|
||||||
|
This metaclass automagically converts ``analyse_text`` methods into
|
||||||
|
static methods which always return float values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, name, bases, d):
|
||||||
|
if 'analyse_text' in d:
|
||||||
|
d['analyse_text'] = make_analysator(d['analyse_text'])
|
||||||
|
return type.__new__(cls, name, bases, d)
|
||||||
|
|
||||||
|
|
||||||
|
class Lexer(object):
|
||||||
|
"""
|
||||||
|
Lexer for a specific language.
|
||||||
|
|
||||||
|
Basic options recognized:
|
||||||
|
``stripnl``
|
||||||
|
Strip leading and trailing newlines from the input (default: True).
|
||||||
|
``stripall``
|
||||||
|
Strip all leading and trailing whitespace from the input
|
||||||
|
(default: False).
|
||||||
|
``ensurenl``
|
||||||
|
Make sure that the input ends with a newline (default: True). This
|
||||||
|
is required for some lexers that consume input linewise.
|
||||||
|
*New in Pygments 1.3.*
|
||||||
|
``tabsize``
|
||||||
|
If given and greater than 0, expand tabs in the input (default: 0).
|
||||||
|
``encoding``
|
||||||
|
If given, must be an encoding name. This encoding will be used to
|
||||||
|
convert the input string to Unicode, if it is not already a Unicode
|
||||||
|
string (default: ``'latin1'``).
|
||||||
|
Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
|
||||||
|
``'chardet'`` to use the chardet library, if it is installed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: Name of the lexer
|
||||||
|
name = None
|
||||||
|
|
||||||
|
#: Shortcuts for the lexer
|
||||||
|
aliases = []
|
||||||
|
|
||||||
|
#: File name globs
|
||||||
|
filenames = []
|
||||||
|
|
||||||
|
#: Secondary file name globs
|
||||||
|
alias_filenames = []
|
||||||
|
|
||||||
|
#: MIME types
|
||||||
|
mimetypes = []
|
||||||
|
|
||||||
|
#: Priority, should multiple lexers match and no content is provided
|
||||||
|
priority = 0
|
||||||
|
|
||||||
|
__metaclass__ = LexerMeta
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
self.options = options
|
||||||
|
self.stripnl = get_bool_opt(options, 'stripnl', True)
|
||||||
|
self.stripall = get_bool_opt(options, 'stripall', False)
|
||||||
|
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
|
||||||
|
self.tabsize = get_int_opt(options, 'tabsize', 0)
|
||||||
|
self.encoding = options.get('encoding', 'latin1')
|
||||||
|
# self.encoding = options.get('inencoding', None) or self.encoding
|
||||||
|
self.filters = []
|
||||||
|
for filter_ in get_list_opt(options, 'filters', ()):
|
||||||
|
self.add_filter(filter_)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self.options:
|
||||||
|
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
|
||||||
|
self.options)
|
||||||
|
else:
|
||||||
|
return '<pygments.lexers.%s>' % self.__class__.__name__
|
||||||
|
|
||||||
|
def add_filter(self, filter_, **options):
|
||||||
|
"""
|
||||||
|
Add a new stream filter to this lexer.
|
||||||
|
"""
|
||||||
|
if not isinstance(filter_, Filter):
|
||||||
|
filter_ = get_filter_by_name(filter_, **options)
|
||||||
|
self.filters.append(filter_)
|
||||||
|
|
||||||
|
def analyse_text(text):
|
||||||
|
"""
|
||||||
|
Has to return a float between ``0`` and ``1`` that indicates
|
||||||
|
if a lexer wants to highlight this text. Used by ``guess_lexer``.
|
||||||
|
If this method returns ``0`` it won't highlight it in any case, if
|
||||||
|
it returns ``1`` highlighting with this lexer is guaranteed.
|
||||||
|
|
||||||
|
The `LexerMeta` metaclass automatically wraps this function so
|
||||||
|
that it works like a static method (no ``self`` or ``cls``
|
||||||
|
parameter) and the return value is automatically converted to
|
||||||
|
`float`. If the return value is an object that is boolean `False`
|
||||||
|
it's the same as if the return values was ``0.0``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_tokens(self, text, unfiltered=False):
|
||||||
|
"""
|
||||||
|
Return an iterable of (tokentype, value) pairs generated from
|
||||||
|
`text`. If `unfiltered` is set to `True`, the filtering mechanism
|
||||||
|
is bypassed even if filters are defined.
|
||||||
|
|
||||||
|
Also preprocess the text, i.e. expand tabs and strip it if
|
||||||
|
wanted and applies registered filters.
|
||||||
|
"""
|
||||||
|
if not isinstance(text, unicode):
|
||||||
|
if self.encoding == 'guess':
|
||||||
|
try:
|
||||||
|
text = text.decode('utf-8')
|
||||||
|
if text.startswith(u'\ufeff'):
|
||||||
|
text = text[len(u'\ufeff'):]
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
text = text.decode('latin1')
|
||||||
|
elif self.encoding == 'chardet':
|
||||||
|
try:
|
||||||
|
import chardet
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError('To enable chardet encoding guessing, '
|
||||||
|
'please install the chardet library '
|
||||||
|
'from http://chardet.feedparser.org/')
|
||||||
|
# check for BOM first
|
||||||
|
decoded = None
|
||||||
|
for bom, encoding in _encoding_map:
|
||||||
|
if text.startswith(bom):
|
||||||
|
decoded = unicode(text[len(bom):], encoding,
|
||||||
|
errors='replace')
|
||||||
|
break
|
||||||
|
# no BOM found, so use chardet
|
||||||
|
if decoded is None:
|
||||||
|
enc = chardet.detect(text[:1024]) # Guess using first 1KB
|
||||||
|
decoded = unicode(text, enc.get('encoding') or 'utf-8',
|
||||||
|
errors='replace')
|
||||||
|
text = decoded
|
||||||
|
else:
|
||||||
|
text = text.decode(self.encoding)
|
||||||
|
else:
|
||||||
|
if text.startswith(u'\ufeff'):
|
||||||
|
text = text[len(u'\ufeff'):]
|
||||||
|
|
||||||
|
# text now *is* a unicode string
|
||||||
|
text = text.replace('\r\n', '\n')
|
||||||
|
text = text.replace('\r', '\n')
|
||||||
|
if self.stripall:
|
||||||
|
text = text.strip()
|
||||||
|
elif self.stripnl:
|
||||||
|
text = text.strip('\n')
|
||||||
|
if self.tabsize > 0:
|
||||||
|
text = text.expandtabs(self.tabsize)
|
||||||
|
if self.ensurenl and not text.endswith('\n'):
|
||||||
|
text += '\n'
|
||||||
|
|
||||||
|
def streamer():
|
||||||
|
for i, t, v in self.get_tokens_unprocessed(text):
|
||||||
|
yield t, v
|
||||||
|
stream = streamer()
|
||||||
|
if not unfiltered:
|
||||||
|
stream = apply_filters(stream, self.filters, self)
|
||||||
|
return stream
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, text):
|
||||||
|
"""
|
||||||
|
Return an iterable of (tokentype, value) pairs.
|
||||||
|
In subclasses, implement this method as a generator to
|
||||||
|
maximize effectiveness.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class DelegatingLexer(Lexer):
|
||||||
|
"""
|
||||||
|
This lexer takes two lexer as arguments. A root lexer and
|
||||||
|
a language lexer. First everything is scanned using the language
|
||||||
|
lexer, afterwards all ``Other`` tokens are lexed using the root
|
||||||
|
lexer.
|
||||||
|
|
||||||
|
The lexers from the ``template`` lexer package use this base lexer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
|
||||||
|
self.root_lexer = _root_lexer(**options)
|
||||||
|
self.language_lexer = _language_lexer(**options)
|
||||||
|
self.needle = _needle
|
||||||
|
Lexer.__init__(self, **options)
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, text):
|
||||||
|
buffered = ''
|
||||||
|
insertions = []
|
||||||
|
lng_buffer = []
|
||||||
|
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
|
||||||
|
if t is self.needle:
|
||||||
|
if lng_buffer:
|
||||||
|
insertions.append((len(buffered), lng_buffer))
|
||||||
|
lng_buffer = []
|
||||||
|
buffered += v
|
||||||
|
else:
|
||||||
|
lng_buffer.append((i, t, v))
|
||||||
|
if lng_buffer:
|
||||||
|
insertions.append((len(buffered), lng_buffer))
|
||||||
|
return do_insertions(insertions,
|
||||||
|
self.root_lexer.get_tokens_unprocessed(buffered))
|
||||||
|
|
||||||
|
|
||||||
|
#-------------------------------------------------------------------------------
|
||||||
|
# RegexLexer and ExtendedRegexLexer
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
class include(str):
|
||||||
|
"""
|
||||||
|
Indicates that a state should include rules from another state.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class _inherit(object):
|
||||||
|
"""
|
||||||
|
Indicates the a state should inherit from its superclass.
|
||||||
|
"""
|
||||||
|
def __repr__(self):
|
||||||
|
return 'inherit'
|
||||||
|
|
||||||
|
inherit = _inherit()
|
||||||
|
|
||||||
|
|
||||||
|
class combined(tuple):
|
||||||
|
"""
|
||||||
|
Indicates a state combined from multiple states.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, *args):
|
||||||
|
return tuple.__new__(cls, args)
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
# tuple.__init__ doesn't do anything
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class _PseudoMatch(object):
|
||||||
|
"""
|
||||||
|
A pseudo match object constructed from a string.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, start, text):
|
||||||
|
self._text = text
|
||||||
|
self._start = start
|
||||||
|
|
||||||
|
def start(self, arg=None):
|
||||||
|
return self._start
|
||||||
|
|
||||||
|
def end(self, arg=None):
|
||||||
|
return self._start + len(self._text)
|
||||||
|
|
||||||
|
def group(self, arg=None):
|
||||||
|
if arg:
|
||||||
|
raise IndexError('No such group')
|
||||||
|
return self._text
|
||||||
|
|
||||||
|
def groups(self):
|
||||||
|
return (self._text,)
|
||||||
|
|
||||||
|
def groupdict(self):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def bygroups(*args):
|
||||||
|
"""
|
||||||
|
Callback that yields multiple actions for each group in the match.
|
||||||
|
"""
|
||||||
|
def callback(lexer, match, ctx=None):
|
||||||
|
for i, action in enumerate(args):
|
||||||
|
if action is None:
|
||||||
|
continue
|
||||||
|
elif type(action) is _TokenType:
|
||||||
|
data = match.group(i + 1)
|
||||||
|
if data:
|
||||||
|
yield match.start(i + 1), action, data
|
||||||
|
else:
|
||||||
|
data = match.group(i + 1)
|
||||||
|
if data is not None:
|
||||||
|
if ctx:
|
||||||
|
ctx.pos = match.start(i + 1)
|
||||||
|
for item in action(lexer, _PseudoMatch(match.start(i + 1),
|
||||||
|
data), ctx):
|
||||||
|
if item:
|
||||||
|
yield item
|
||||||
|
if ctx:
|
||||||
|
ctx.pos = match.end()
|
||||||
|
return callback
|
||||||
|
|
||||||
|
|
||||||
|
class _This(object):
|
||||||
|
"""
|
||||||
|
Special singleton used for indicating the caller class.
|
||||||
|
Used by ``using``.
|
||||||
|
"""
|
||||||
|
this = _This()
|
||||||
|
|
||||||
|
|
||||||
|
def using(_other, **kwargs):
|
||||||
|
"""
|
||||||
|
Callback that processes the match with a different lexer.
|
||||||
|
|
||||||
|
The keyword arguments are forwarded to the lexer, except `state` which
|
||||||
|
is handled separately.
|
||||||
|
|
||||||
|
`state` specifies the state that the new lexer will start in, and can
|
||||||
|
be an enumerable such as ('root', 'inline', 'string') or a simple
|
||||||
|
string which is assumed to be on top of the root state.
|
||||||
|
|
||||||
|
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
|
||||||
|
"""
|
||||||
|
gt_kwargs = {}
|
||||||
|
if 'state' in kwargs:
|
||||||
|
s = kwargs.pop('state')
|
||||||
|
if isinstance(s, (list, tuple)):
|
||||||
|
gt_kwargs['stack'] = s
|
||||||
|
else:
|
||||||
|
gt_kwargs['stack'] = ('root', s)
|
||||||
|
|
||||||
|
if _other is this:
|
||||||
|
def callback(lexer, match, ctx=None):
|
||||||
|
# if keyword arguments are given the callback
|
||||||
|
# function has to create a new lexer instance
|
||||||
|
if kwargs:
|
||||||
|
# XXX: cache that somehow
|
||||||
|
kwargs.update(lexer.options)
|
||||||
|
lx = lexer.__class__(**kwargs)
|
||||||
|
else:
|
||||||
|
lx = lexer
|
||||||
|
s = match.start()
|
||||||
|
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
||||||
|
yield i + s, t, v
|
||||||
|
if ctx:
|
||||||
|
ctx.pos = match.end()
|
||||||
|
else:
|
||||||
|
def callback(lexer, match, ctx=None):
|
||||||
|
# XXX: cache that somehow
|
||||||
|
kwargs.update(lexer.options)
|
||||||
|
lx = _other(**kwargs)
|
||||||
|
|
||||||
|
s = match.start()
|
||||||
|
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
||||||
|
yield i + s, t, v
|
||||||
|
if ctx:
|
||||||
|
ctx.pos = match.end()
|
||||||
|
return callback
|
||||||
|
|
||||||
|
|
||||||
|
class RegexLexerMeta(LexerMeta):
|
||||||
|
"""
|
||||||
|
Metaclass for RegexLexer, creates the self._tokens attribute from
|
||||||
|
self.tokens on the first instantiation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _process_regex(cls, regex, rflags):
|
||||||
|
"""Preprocess the regular expression component of a token definition."""
|
||||||
|
return re.compile(regex, rflags).match
|
||||||
|
|
||||||
|
def _process_token(cls, token):
|
||||||
|
"""Preprocess the token component of a token definition."""
|
||||||
|
assert type(token) is _TokenType or callable(token), \
|
||||||
|
'token type must be simple type or callable, not %r' % (token,)
|
||||||
|
return token
|
||||||
|
|
||||||
|
def _process_new_state(cls, new_state, unprocessed, processed):
|
||||||
|
"""Preprocess the state transition action of a token definition."""
|
||||||
|
if isinstance(new_state, str):
|
||||||
|
# an existing state
|
||||||
|
if new_state == '#pop':
|
||||||
|
return -1
|
||||||
|
elif new_state in unprocessed:
|
||||||
|
return (new_state,)
|
||||||
|
elif new_state == '#push':
|
||||||
|
return new_state
|
||||||
|
elif new_state[:5] == '#pop:':
|
||||||
|
return -int(new_state[5:])
|
||||||
|
else:
|
||||||
|
assert False, 'unknown new state %r' % new_state
|
||||||
|
elif isinstance(new_state, combined):
|
||||||
|
# combine a new state from existing ones
|
||||||
|
tmp_state = '_tmp_%d' % cls._tmpname
|
||||||
|
cls._tmpname += 1
|
||||||
|
itokens = []
|
||||||
|
for istate in new_state:
|
||||||
|
assert istate != new_state, 'circular state ref %r' % istate
|
||||||
|
itokens.extend(cls._process_state(unprocessed,
|
||||||
|
processed, istate))
|
||||||
|
processed[tmp_state] = itokens
|
||||||
|
return (tmp_state,)
|
||||||
|
elif isinstance(new_state, tuple):
|
||||||
|
# push more than one state
|
||||||
|
for istate in new_state:
|
||||||
|
assert (istate in unprocessed or
|
||||||
|
istate in ('#pop', '#push')), \
|
||||||
|
'unknown new state ' + istate
|
||||||
|
return new_state
|
||||||
|
else:
|
||||||
|
assert False, 'unknown new state def %r' % new_state
|
||||||
|
|
||||||
|
def _process_state(cls, unprocessed, processed, state):
|
||||||
|
"""Preprocess a single state definition."""
|
||||||
|
assert type(state) is str, "wrong state name %r" % state
|
||||||
|
assert state[0] != '#', "invalid state name %r" % state
|
||||||
|
if state in processed:
|
||||||
|
return processed[state]
|
||||||
|
tokens = processed[state] = []
|
||||||
|
rflags = cls.flags
|
||||||
|
for tdef in unprocessed[state]:
|
||||||
|
if isinstance(tdef, include):
|
||||||
|
# it's a state reference
|
||||||
|
assert tdef != state, "circular state reference %r" % state
|
||||||
|
tokens.extend(cls._process_state(unprocessed, processed,
|
||||||
|
str(tdef)))
|
||||||
|
continue
|
||||||
|
if isinstance(tdef, _inherit):
|
||||||
|
# processed already
|
||||||
|
continue
|
||||||
|
|
||||||
|
assert type(tdef) is tuple, "wrong rule def %r" % tdef
|
||||||
|
|
||||||
|
try:
|
||||||
|
rex = cls._process_regex(tdef[0], rflags)
|
||||||
|
except Exception, err:
|
||||||
|
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
|
||||||
|
(tdef[0], state, cls, err))
|
||||||
|
|
||||||
|
token = cls._process_token(tdef[1])
|
||||||
|
|
||||||
|
if len(tdef) == 2:
|
||||||
|
new_state = None
|
||||||
|
else:
|
||||||
|
new_state = cls._process_new_state(tdef[2],
|
||||||
|
unprocessed, processed)
|
||||||
|
|
||||||
|
tokens.append((rex, token, new_state))
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
def process_tokendef(cls, name, tokendefs=None):
|
||||||
|
"""Preprocess a dictionary of token definitions."""
|
||||||
|
processed = cls._all_tokens[name] = {}
|
||||||
|
tokendefs = tokendefs or cls.tokens[name]
|
||||||
|
for state in tokendefs.keys():
|
||||||
|
cls._process_state(tokendefs, processed, state)
|
||||||
|
return processed
|
||||||
|
|
||||||
|
def get_tokendefs(cls):
|
||||||
|
"""
|
||||||
|
Merge tokens from superclasses in MRO order, returning a single tokendef
|
||||||
|
dictionary.
|
||||||
|
|
||||||
|
Any state that is not defined by a subclass will be inherited
|
||||||
|
automatically. States that *are* defined by subclasses will, by
|
||||||
|
default, override that state in the superclass. If a subclass wishes to
|
||||||
|
inherit definitions from a superclass, it can use the special value
|
||||||
|
"inherit", which will cause the superclass' state definition to be
|
||||||
|
included at that point in the state.
|
||||||
|
"""
|
||||||
|
tokens = {}
|
||||||
|
inheritable = {}
|
||||||
|
for c in itertools.chain((cls,), cls.__mro__):
|
||||||
|
toks = c.__dict__.get('tokens', {})
|
||||||
|
|
||||||
|
for state, items in toks.iteritems():
|
||||||
|
curitems = tokens.get(state)
|
||||||
|
if curitems is None:
|
||||||
|
tokens[state] = items
|
||||||
|
try:
|
||||||
|
inherit_ndx = items.index(inherit)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
inheritable[state] = inherit_ndx
|
||||||
|
continue
|
||||||
|
|
||||||
|
inherit_ndx = inheritable.pop(state, None)
|
||||||
|
if inherit_ndx is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Replace the "inherit" value with the items
|
||||||
|
curitems[inherit_ndx:inherit_ndx+1] = items
|
||||||
|
try:
|
||||||
|
new_inh_ndx = items.index(inherit)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
inheritable[state] = inherit_ndx + new_inh_ndx
|
||||||
|
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
def __call__(cls, *args, **kwds):
|
||||||
|
"""Instantiate cls after preprocessing its token definitions."""
|
||||||
|
if '_tokens' not in cls.__dict__:
|
||||||
|
cls._all_tokens = {}
|
||||||
|
cls._tmpname = 0
|
||||||
|
if hasattr(cls, 'token_variants') and cls.token_variants:
|
||||||
|
# don't process yet
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
|
||||||
|
|
||||||
|
return type.__call__(cls, *args, **kwds)
|
||||||
|
|
||||||
|
|
||||||
|
class RegexLexer(Lexer):
|
||||||
|
"""
|
||||||
|
Base for simple stateful regular expression-based lexers.
|
||||||
|
Simplifies the lexing process so that you need only
|
||||||
|
provide a list of states and regular expressions.
|
||||||
|
"""
|
||||||
|
__metaclass__ = RegexLexerMeta
|
||||||
|
|
||||||
|
#: Flags for compiling the regular expressions.
|
||||||
|
#: Defaults to MULTILINE.
|
||||||
|
flags = re.MULTILINE
|
||||||
|
|
||||||
|
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
|
||||||
|
#:
|
||||||
|
#: The initial state is 'root'.
|
||||||
|
#: ``new_state`` can be omitted to signify no state transition.
|
||||||
|
#: If it is a string, the state is pushed on the stack and changed.
|
||||||
|
#: If it is a tuple of strings, all states are pushed on the stack and
|
||||||
|
#: the current state will be the topmost.
|
||||||
|
#: It can also be ``combined('state1', 'state2', ...)``
|
||||||
|
#: to signify a new, anonymous state combined from the rules of two
|
||||||
|
#: or more existing ones.
|
||||||
|
#: Furthermore, it can be '#pop' to signify going back one step in
|
||||||
|
#: the state stack, or '#push' to push the current state on the stack
|
||||||
|
#: again.
|
||||||
|
#:
|
||||||
|
#: The tuple can also be replaced with ``include('state')``, in which
|
||||||
|
#: case the rules from the state named by the string are included in the
|
||||||
|
#: current one.
|
||||||
|
tokens = {}
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, text, stack=('root',)):
|
||||||
|
"""
|
||||||
|
Split ``text`` into (tokentype, text) pairs.
|
||||||
|
|
||||||
|
``stack`` is the inital stack (default: ``['root']``)
|
||||||
|
"""
|
||||||
|
pos = 0
|
||||||
|
tokendefs = self._tokens
|
||||||
|
statestack = list(stack)
|
||||||
|
statetokens = tokendefs[statestack[-1]]
|
||||||
|
while 1:
|
||||||
|
for rexmatch, action, new_state in statetokens:
|
||||||
|
m = rexmatch(text, pos)
|
||||||
|
if m:
|
||||||
|
if type(action) is _TokenType:
|
||||||
|
yield pos, action, m.group()
|
||||||
|
else:
|
||||||
|
for item in action(self, m):
|
||||||
|
yield item
|
||||||
|
pos = m.end()
|
||||||
|
if new_state is not None:
|
||||||
|
# state transition
|
||||||
|
if isinstance(new_state, tuple):
|
||||||
|
for state in new_state:
|
||||||
|
if state == '#pop':
|
||||||
|
statestack.pop()
|
||||||
|
elif state == '#push':
|
||||||
|
statestack.append(statestack[-1])
|
||||||
|
else:
|
||||||
|
statestack.append(state)
|
||||||
|
elif isinstance(new_state, int):
|
||||||
|
# pop
|
||||||
|
del statestack[new_state:]
|
||||||
|
elif new_state == '#push':
|
||||||
|
statestack.append(statestack[-1])
|
||||||
|
else:
|
||||||
|
assert False, "wrong state def: %r" % new_state
|
||||||
|
statetokens = tokendefs[statestack[-1]]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if text[pos] == '\n':
|
||||||
|
# at EOL, reset state to "root"
|
||||||
|
statestack = ['root']
|
||||||
|
statetokens = tokendefs['root']
|
||||||
|
yield pos, Text, u'\n'
|
||||||
|
pos += 1
|
||||||
|
continue
|
||||||
|
yield pos, Error, text[pos]
|
||||||
|
pos += 1
|
||||||
|
except IndexError:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
class LexerContext(object):
|
||||||
|
"""
|
||||||
|
A helper object that holds lexer position data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, text, pos, stack=None, end=None):
|
||||||
|
self.text = text
|
||||||
|
self.pos = pos
|
||||||
|
self.end = end or len(text) # end=0 not supported ;-)
|
||||||
|
self.stack = stack or ['root']
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return 'LexerContext(%r, %r, %r)' % (
|
||||||
|
self.text, self.pos, self.stack)
|
||||||
|
|
||||||
|
|
||||||
|
class ExtendedRegexLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
A RegexLexer that uses a context object to store its state.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, text=None, context=None):
|
||||||
|
"""
|
||||||
|
Split ``text`` into (tokentype, text) pairs.
|
||||||
|
If ``context`` is given, use this lexer context instead.
|
||||||
|
"""
|
||||||
|
tokendefs = self._tokens
|
||||||
|
if not context:
|
||||||
|
ctx = LexerContext(text, 0)
|
||||||
|
statetokens = tokendefs['root']
|
||||||
|
else:
|
||||||
|
ctx = context
|
||||||
|
statetokens = tokendefs[ctx.stack[-1]]
|
||||||
|
text = ctx.text
|
||||||
|
while 1:
|
||||||
|
for rexmatch, action, new_state in statetokens:
|
||||||
|
m = rexmatch(text, ctx.pos, ctx.end)
|
||||||
|
if m:
|
||||||
|
if type(action) is _TokenType:
|
||||||
|
yield ctx.pos, action, m.group()
|
||||||
|
ctx.pos = m.end()
|
||||||
|
else:
|
||||||
|
for item in action(self, m, ctx):
|
||||||
|
yield item
|
||||||
|
if not new_state:
|
||||||
|
# altered the state stack?
|
||||||
|
statetokens = tokendefs[ctx.stack[-1]]
|
||||||
|
# CAUTION: callback must set ctx.pos!
|
||||||
|
if new_state is not None:
|
||||||
|
# state transition
|
||||||
|
if isinstance(new_state, tuple):
|
||||||
|
for state in new_state:
|
||||||
|
if state == '#pop':
|
||||||
|
ctx.stack.pop()
|
||||||
|
elif state == '#push':
|
||||||
|
ctx.stack.append(statestack[-1])
|
||||||
|
else:
|
||||||
|
ctx.stack.append(state)
|
||||||
|
elif isinstance(new_state, int):
|
||||||
|
# pop
|
||||||
|
del ctx.stack[new_state:]
|
||||||
|
elif new_state == '#push':
|
||||||
|
ctx.stack.append(ctx.stack[-1])
|
||||||
|
else:
|
||||||
|
assert False, "wrong state def: %r" % new_state
|
||||||
|
statetokens = tokendefs[ctx.stack[-1]]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if ctx.pos >= ctx.end:
|
||||||
|
break
|
||||||
|
if text[ctx.pos] == '\n':
|
||||||
|
# at EOL, reset state to "root"
|
||||||
|
ctx.stack = ['root']
|
||||||
|
statetokens = tokendefs['root']
|
||||||
|
yield ctx.pos, Text, u'\n'
|
||||||
|
ctx.pos += 1
|
||||||
|
continue
|
||||||
|
yield ctx.pos, Error, text[ctx.pos]
|
||||||
|
ctx.pos += 1
|
||||||
|
except IndexError:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
def do_insertions(insertions, tokens):
|
||||||
|
"""
|
||||||
|
Helper for lexers which must combine the results of several
|
||||||
|
sublexers.
|
||||||
|
|
||||||
|
``insertions`` is a list of ``(index, itokens)`` pairs.
|
||||||
|
Each ``itokens`` iterable should be inserted at position
|
||||||
|
``index`` into the token stream given by the ``tokens``
|
||||||
|
argument.
|
||||||
|
|
||||||
|
The result is a combined token stream.
|
||||||
|
|
||||||
|
TODO: clean up the code here.
|
||||||
|
"""
|
||||||
|
insertions = iter(insertions)
|
||||||
|
try:
|
||||||
|
index, itokens = insertions.next()
|
||||||
|
except StopIteration:
|
||||||
|
# no insertions
|
||||||
|
for item in tokens:
|
||||||
|
yield item
|
||||||
|
return
|
||||||
|
|
||||||
|
realpos = None
|
||||||
|
insleft = True
|
||||||
|
|
||||||
|
# iterate over the token stream where we want to insert
|
||||||
|
# the tokens from the insertion list.
|
||||||
|
for i, t, v in tokens:
|
||||||
|
# first iteration. store the postition of first item
|
||||||
|
if realpos is None:
|
||||||
|
realpos = i
|
||||||
|
oldi = 0
|
||||||
|
while insleft and i + len(v) >= index:
|
||||||
|
tmpval = v[oldi:index - i]
|
||||||
|
yield realpos, t, tmpval
|
||||||
|
realpos += len(tmpval)
|
||||||
|
for it_index, it_token, it_value in itokens:
|
||||||
|
yield realpos, it_token, it_value
|
||||||
|
realpos += len(it_value)
|
||||||
|
oldi = index - i
|
||||||
|
try:
|
||||||
|
index, itokens = insertions.next()
|
||||||
|
except StopIteration:
|
||||||
|
insleft = False
|
||||||
|
break # not strictly necessary
|
||||||
|
yield realpos, t, v[oldi:]
|
||||||
|
realpos += len(v) - oldi
|
||||||
|
|
||||||
|
# leftover tokens
|
||||||
|
while insleft:
|
||||||
|
# no normal tokens, set realpos to zero
|
||||||
|
realpos = realpos or 0
|
||||||
|
for p, t, v in itokens:
|
||||||
|
yield realpos, t, v
|
||||||
|
realpos += len(v)
|
||||||
|
try:
|
||||||
|
index, itokens = insertions.next()
|
||||||
|
except StopIteration:
|
||||||
|
insleft = False
|
||||||
|
break # not strictly necessary
|
240
packages/wakatime/wakatime/packages/pygments2/lexers/__init__.py
Normal file
240
packages/wakatime/wakatime/packages/pygments2/lexers/__init__.py
Normal file
|
@ -0,0 +1,240 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Pygments lexers.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import types
|
||||||
|
import fnmatch
|
||||||
|
from os.path import basename
|
||||||
|
|
||||||
|
from pygments.lexers._mapping import LEXERS
|
||||||
|
from pygments.modeline import get_filetype_from_buffer
|
||||||
|
from pygments.plugin import find_plugin_lexers
|
||||||
|
from pygments.util import ClassNotFound, bytes
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
|
||||||
|
'guess_lexer'] + LEXERS.keys()
|
||||||
|
|
||||||
|
_lexer_cache = {}
|
||||||
|
|
||||||
|
|
||||||
|
def _load_lexers(module_name):
|
||||||
|
"""
|
||||||
|
Load a lexer (and all others in the module too).
|
||||||
|
"""
|
||||||
|
mod = __import__(module_name, None, None, ['__all__'])
|
||||||
|
for lexer_name in mod.__all__:
|
||||||
|
cls = getattr(mod, lexer_name)
|
||||||
|
_lexer_cache[cls.name] = cls
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_lexers():
|
||||||
|
"""
|
||||||
|
Return a generator of tuples in the form ``(name, aliases,
|
||||||
|
filenames, mimetypes)`` of all know lexers.
|
||||||
|
"""
|
||||||
|
for item in LEXERS.itervalues():
|
||||||
|
yield item[1:]
|
||||||
|
for lexer in find_plugin_lexers():
|
||||||
|
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
|
||||||
|
|
||||||
|
|
||||||
|
def find_lexer_class(name):
|
||||||
|
"""
|
||||||
|
Lookup a lexer class by name. Return None if not found.
|
||||||
|
"""
|
||||||
|
if name in _lexer_cache:
|
||||||
|
return _lexer_cache[name]
|
||||||
|
# lookup builtin lexers
|
||||||
|
for module_name, lname, aliases, _, _ in LEXERS.itervalues():
|
||||||
|
if name == lname:
|
||||||
|
_load_lexers(module_name)
|
||||||
|
return _lexer_cache[name]
|
||||||
|
# continue with lexers from setuptools entrypoints
|
||||||
|
for cls in find_plugin_lexers():
|
||||||
|
if cls.name == name:
|
||||||
|
return cls
|
||||||
|
|
||||||
|
|
||||||
|
def get_lexer_by_name(_alias, **options):
|
||||||
|
"""
|
||||||
|
Get a lexer by an alias.
|
||||||
|
"""
|
||||||
|
# lookup builtin lexers
|
||||||
|
for module_name, name, aliases, _, _ in LEXERS.itervalues():
|
||||||
|
if _alias in aliases:
|
||||||
|
if name not in _lexer_cache:
|
||||||
|
_load_lexers(module_name)
|
||||||
|
return _lexer_cache[name](**options)
|
||||||
|
# continue with lexers from setuptools entrypoints
|
||||||
|
for cls in find_plugin_lexers():
|
||||||
|
if _alias in cls.aliases:
|
||||||
|
return cls(**options)
|
||||||
|
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
||||||
|
|
||||||
|
|
||||||
|
def get_lexer_for_filename(_fn, code=None, **options):
|
||||||
|
"""
|
||||||
|
Get a lexer for a filename. If multiple lexers match the filename
|
||||||
|
pattern, use ``analyze_text()`` to figure out which one is more
|
||||||
|
appropriate.
|
||||||
|
"""
|
||||||
|
matches = []
|
||||||
|
fn = basename(_fn)
|
||||||
|
for modname, name, _, filenames, _ in LEXERS.itervalues():
|
||||||
|
for filename in filenames:
|
||||||
|
if fnmatch.fnmatch(fn, filename):
|
||||||
|
if name not in _lexer_cache:
|
||||||
|
_load_lexers(modname)
|
||||||
|
matches.append((_lexer_cache[name], filename))
|
||||||
|
for cls in find_plugin_lexers():
|
||||||
|
for filename in cls.filenames:
|
||||||
|
if fnmatch.fnmatch(fn, filename):
|
||||||
|
matches.append((cls, filename))
|
||||||
|
|
||||||
|
if sys.version_info > (3,) and isinstance(code, bytes):
|
||||||
|
# decode it, since all analyse_text functions expect unicode
|
||||||
|
code = code.decode('latin1')
|
||||||
|
|
||||||
|
def get_rating(info):
|
||||||
|
cls, filename = info
|
||||||
|
# explicit patterns get a bonus
|
||||||
|
bonus = '*' not in filename and 0.5 or 0
|
||||||
|
# The class _always_ defines analyse_text because it's included in
|
||||||
|
# the Lexer class. The default implementation returns None which
|
||||||
|
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
|
||||||
|
# to find lexers which need it overridden.
|
||||||
|
if code:
|
||||||
|
return cls.analyse_text(code) + bonus
|
||||||
|
return cls.priority + bonus
|
||||||
|
|
||||||
|
if matches:
|
||||||
|
matches.sort(key=get_rating)
|
||||||
|
#print "Possible lexers, after sort:", matches
|
||||||
|
return matches[-1][0](**options)
|
||||||
|
raise ClassNotFound('no lexer for filename %r found' % _fn)
|
||||||
|
|
||||||
|
|
||||||
|
def get_lexer_for_mimetype(_mime, **options):
|
||||||
|
"""
|
||||||
|
Get a lexer for a mimetype.
|
||||||
|
"""
|
||||||
|
for modname, name, _, _, mimetypes in LEXERS.itervalues():
|
||||||
|
if _mime in mimetypes:
|
||||||
|
if name not in _lexer_cache:
|
||||||
|
_load_lexers(modname)
|
||||||
|
return _lexer_cache[name](**options)
|
||||||
|
for cls in find_plugin_lexers():
|
||||||
|
if _mime in cls.mimetypes:
|
||||||
|
return cls(**options)
|
||||||
|
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
|
||||||
|
|
||||||
|
|
||||||
|
def _iter_lexerclasses():
|
||||||
|
"""
|
||||||
|
Return an iterator over all lexer classes.
|
||||||
|
"""
|
||||||
|
for key in sorted(LEXERS):
|
||||||
|
module_name, name = LEXERS[key][:2]
|
||||||
|
if name not in _lexer_cache:
|
||||||
|
_load_lexers(module_name)
|
||||||
|
yield _lexer_cache[name]
|
||||||
|
for lexer in find_plugin_lexers():
|
||||||
|
yield lexer
|
||||||
|
|
||||||
|
|
||||||
|
def guess_lexer_for_filename(_fn, _text, **options):
|
||||||
|
"""
|
||||||
|
Lookup all lexers that handle those filenames primary (``filenames``)
|
||||||
|
or secondary (``alias_filenames``). Then run a text analysis for those
|
||||||
|
lexers and choose the best result.
|
||||||
|
|
||||||
|
usage::
|
||||||
|
|
||||||
|
>>> from pygments.lexers import guess_lexer_for_filename
|
||||||
|
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
|
||||||
|
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
|
||||||
|
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
|
||||||
|
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
|
||||||
|
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
|
||||||
|
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
|
||||||
|
"""
|
||||||
|
fn = basename(_fn)
|
||||||
|
primary = None
|
||||||
|
matching_lexers = set()
|
||||||
|
for lexer in _iter_lexerclasses():
|
||||||
|
for filename in lexer.filenames:
|
||||||
|
if fnmatch.fnmatch(fn, filename):
|
||||||
|
matching_lexers.add(lexer)
|
||||||
|
primary = lexer
|
||||||
|
for filename in lexer.alias_filenames:
|
||||||
|
if fnmatch.fnmatch(fn, filename):
|
||||||
|
matching_lexers.add(lexer)
|
||||||
|
if not matching_lexers:
|
||||||
|
raise ClassNotFound('no lexer for filename %r found' % fn)
|
||||||
|
if len(matching_lexers) == 1:
|
||||||
|
return matching_lexers.pop()(**options)
|
||||||
|
result = []
|
||||||
|
for lexer in matching_lexers:
|
||||||
|
rv = lexer.analyse_text(_text)
|
||||||
|
if rv == 1.0:
|
||||||
|
return lexer(**options)
|
||||||
|
result.append((rv, lexer))
|
||||||
|
result.sort()
|
||||||
|
if not result[-1][0] and primary is not None:
|
||||||
|
return primary(**options)
|
||||||
|
return result[-1][1](**options)
|
||||||
|
|
||||||
|
|
||||||
|
def guess_lexer(_text, **options):
|
||||||
|
"""
|
||||||
|
Guess a lexer by strong distinctions in the text (eg, shebang).
|
||||||
|
"""
|
||||||
|
|
||||||
|
# try to get a vim modeline first
|
||||||
|
ft = get_filetype_from_buffer(_text)
|
||||||
|
|
||||||
|
if ft is not None:
|
||||||
|
try:
|
||||||
|
return get_lexer_by_name(ft, **options)
|
||||||
|
except ClassNotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
best_lexer = [0.0, None]
|
||||||
|
for lexer in _iter_lexerclasses():
|
||||||
|
rv = lexer.analyse_text(_text)
|
||||||
|
if rv == 1.0:
|
||||||
|
return lexer(**options)
|
||||||
|
if rv > best_lexer[0]:
|
||||||
|
best_lexer[:] = (rv, lexer)
|
||||||
|
if not best_lexer[0] or best_lexer[1] is None:
|
||||||
|
raise ClassNotFound('no lexer matching the text found')
|
||||||
|
return best_lexer[1](**options)
|
||||||
|
|
||||||
|
|
||||||
|
class _automodule(types.ModuleType):
|
||||||
|
"""Automatically import lexers."""
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
info = LEXERS.get(name)
|
||||||
|
if info:
|
||||||
|
_load_lexers(info[0])
|
||||||
|
cls = _lexer_cache[info[1]]
|
||||||
|
setattr(self, name, cls)
|
||||||
|
return cls
|
||||||
|
raise AttributeError(name)
|
||||||
|
|
||||||
|
|
||||||
|
oldmod = sys.modules['pygments.lexers']
|
||||||
|
newmod = _automodule('pygments.lexers')
|
||||||
|
newmod.__dict__.update(oldmod.__dict__)
|
||||||
|
sys.modules['pygments.lexers'] = newmod
|
||||||
|
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
|
@ -0,0 +1,249 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers._luabuiltins
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This file contains the names and modules of lua functions
|
||||||
|
It is able to re-generate itself, but for adding new functions you
|
||||||
|
probably have to add some callbacks (see function module_callbacks).
|
||||||
|
|
||||||
|
Do not edit the MODULES dict by hand.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
MODULES = {'basic': ['_G',
|
||||||
|
'_VERSION',
|
||||||
|
'assert',
|
||||||
|
'collectgarbage',
|
||||||
|
'dofile',
|
||||||
|
'error',
|
||||||
|
'getfenv',
|
||||||
|
'getmetatable',
|
||||||
|
'ipairs',
|
||||||
|
'load',
|
||||||
|
'loadfile',
|
||||||
|
'loadstring',
|
||||||
|
'next',
|
||||||
|
'pairs',
|
||||||
|
'pcall',
|
||||||
|
'print',
|
||||||
|
'rawequal',
|
||||||
|
'rawget',
|
||||||
|
'rawset',
|
||||||
|
'select',
|
||||||
|
'setfenv',
|
||||||
|
'setmetatable',
|
||||||
|
'tonumber',
|
||||||
|
'tostring',
|
||||||
|
'type',
|
||||||
|
'unpack',
|
||||||
|
'xpcall'],
|
||||||
|
'coroutine': ['coroutine.create',
|
||||||
|
'coroutine.resume',
|
||||||
|
'coroutine.running',
|
||||||
|
'coroutine.status',
|
||||||
|
'coroutine.wrap',
|
||||||
|
'coroutine.yield'],
|
||||||
|
'debug': ['debug.debug',
|
||||||
|
'debug.getfenv',
|
||||||
|
'debug.gethook',
|
||||||
|
'debug.getinfo',
|
||||||
|
'debug.getlocal',
|
||||||
|
'debug.getmetatable',
|
||||||
|
'debug.getregistry',
|
||||||
|
'debug.getupvalue',
|
||||||
|
'debug.setfenv',
|
||||||
|
'debug.sethook',
|
||||||
|
'debug.setlocal',
|
||||||
|
'debug.setmetatable',
|
||||||
|
'debug.setupvalue',
|
||||||
|
'debug.traceback'],
|
||||||
|
'io': ['io.close',
|
||||||
|
'io.flush',
|
||||||
|
'io.input',
|
||||||
|
'io.lines',
|
||||||
|
'io.open',
|
||||||
|
'io.output',
|
||||||
|
'io.popen',
|
||||||
|
'io.read',
|
||||||
|
'io.tmpfile',
|
||||||
|
'io.type',
|
||||||
|
'io.write'],
|
||||||
|
'math': ['math.abs',
|
||||||
|
'math.acos',
|
||||||
|
'math.asin',
|
||||||
|
'math.atan2',
|
||||||
|
'math.atan',
|
||||||
|
'math.ceil',
|
||||||
|
'math.cosh',
|
||||||
|
'math.cos',
|
||||||
|
'math.deg',
|
||||||
|
'math.exp',
|
||||||
|
'math.floor',
|
||||||
|
'math.fmod',
|
||||||
|
'math.frexp',
|
||||||
|
'math.huge',
|
||||||
|
'math.ldexp',
|
||||||
|
'math.log10',
|
||||||
|
'math.log',
|
||||||
|
'math.max',
|
||||||
|
'math.min',
|
||||||
|
'math.modf',
|
||||||
|
'math.pi',
|
||||||
|
'math.pow',
|
||||||
|
'math.rad',
|
||||||
|
'math.random',
|
||||||
|
'math.randomseed',
|
||||||
|
'math.sinh',
|
||||||
|
'math.sin',
|
||||||
|
'math.sqrt',
|
||||||
|
'math.tanh',
|
||||||
|
'math.tan'],
|
||||||
|
'modules': ['module',
|
||||||
|
'require',
|
||||||
|
'package.cpath',
|
||||||
|
'package.loaded',
|
||||||
|
'package.loadlib',
|
||||||
|
'package.path',
|
||||||
|
'package.preload',
|
||||||
|
'package.seeall'],
|
||||||
|
'os': ['os.clock',
|
||||||
|
'os.date',
|
||||||
|
'os.difftime',
|
||||||
|
'os.execute',
|
||||||
|
'os.exit',
|
||||||
|
'os.getenv',
|
||||||
|
'os.remove',
|
||||||
|
'os.rename',
|
||||||
|
'os.setlocale',
|
||||||
|
'os.time',
|
||||||
|
'os.tmpname'],
|
||||||
|
'string': ['string.byte',
|
||||||
|
'string.char',
|
||||||
|
'string.dump',
|
||||||
|
'string.find',
|
||||||
|
'string.format',
|
||||||
|
'string.gmatch',
|
||||||
|
'string.gsub',
|
||||||
|
'string.len',
|
||||||
|
'string.lower',
|
||||||
|
'string.match',
|
||||||
|
'string.rep',
|
||||||
|
'string.reverse',
|
||||||
|
'string.sub',
|
||||||
|
'string.upper'],
|
||||||
|
'table': ['table.concat',
|
||||||
|
'table.insert',
|
||||||
|
'table.maxn',
|
||||||
|
'table.remove',
|
||||||
|
'table.sort']}
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import re
|
||||||
|
import urllib
|
||||||
|
import pprint
|
||||||
|
|
||||||
|
# you can't generally find out what module a function belongs to if you
|
||||||
|
# have only its name. Because of this, here are some callback functions
|
||||||
|
# that recognize if a gioven function belongs to a specific module
|
||||||
|
def module_callbacks():
|
||||||
|
def is_in_coroutine_module(name):
|
||||||
|
return name.startswith('coroutine.')
|
||||||
|
|
||||||
|
def is_in_modules_module(name):
|
||||||
|
if name in ['require', 'module'] or name.startswith('package'):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def is_in_string_module(name):
|
||||||
|
return name.startswith('string.')
|
||||||
|
|
||||||
|
def is_in_table_module(name):
|
||||||
|
return name.startswith('table.')
|
||||||
|
|
||||||
|
def is_in_math_module(name):
|
||||||
|
return name.startswith('math')
|
||||||
|
|
||||||
|
def is_in_io_module(name):
|
||||||
|
return name.startswith('io.')
|
||||||
|
|
||||||
|
def is_in_os_module(name):
|
||||||
|
return name.startswith('os.')
|
||||||
|
|
||||||
|
def is_in_debug_module(name):
|
||||||
|
return name.startswith('debug.')
|
||||||
|
|
||||||
|
return {'coroutine': is_in_coroutine_module,
|
||||||
|
'modules': is_in_modules_module,
|
||||||
|
'string': is_in_string_module,
|
||||||
|
'table': is_in_table_module,
|
||||||
|
'math': is_in_math_module,
|
||||||
|
'io': is_in_io_module,
|
||||||
|
'os': is_in_os_module,
|
||||||
|
'debug': is_in_debug_module}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_newest_version():
|
||||||
|
f = urllib.urlopen('http://www.lua.org/manual/')
|
||||||
|
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
|
||||||
|
for line in f:
|
||||||
|
m = r.match(line)
|
||||||
|
if m is not None:
|
||||||
|
return m.groups()[0]
|
||||||
|
|
||||||
|
def get_lua_functions(version):
|
||||||
|
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
|
||||||
|
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
|
||||||
|
functions = []
|
||||||
|
for line in f:
|
||||||
|
m = r.match(line)
|
||||||
|
if m is not None:
|
||||||
|
functions.append(m.groups()[0])
|
||||||
|
return functions
|
||||||
|
|
||||||
|
def get_function_module(name):
|
||||||
|
for mod, cb in module_callbacks().iteritems():
|
||||||
|
if cb(name):
|
||||||
|
return mod
|
||||||
|
if '.' in name:
|
||||||
|
return name.split('.')[0]
|
||||||
|
else:
|
||||||
|
return 'basic'
|
||||||
|
|
||||||
|
def regenerate(filename, modules):
|
||||||
|
f = open(filename)
|
||||||
|
try:
|
||||||
|
content = f.read()
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
header = content[:content.find('MODULES = {')]
|
||||||
|
footer = content[content.find("if __name__ == '__main__':"):]
|
||||||
|
|
||||||
|
|
||||||
|
f = open(filename, 'w')
|
||||||
|
f.write(header)
|
||||||
|
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
|
||||||
|
f.write(footer)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def run():
|
||||||
|
version = get_newest_version()
|
||||||
|
print '> Downloading function index for Lua %s' % version
|
||||||
|
functions = get_lua_functions(version)
|
||||||
|
print '> %d functions found:' % len(functions)
|
||||||
|
|
||||||
|
modules = {}
|
||||||
|
for full_function_name in functions:
|
||||||
|
print '>> %s' % full_function_name
|
||||||
|
m = get_function_module(full_function_name)
|
||||||
|
modules.setdefault(m, []).append(full_function_name)
|
||||||
|
|
||||||
|
regenerate(__file__, modules)
|
||||||
|
|
||||||
|
|
||||||
|
run()
|
350
packages/wakatime/wakatime/packages/pygments2/lexers/_mapping.py
Normal file
350
packages/wakatime/wakatime/packages/pygments2/lexers/_mapping.py
Normal file
|
@ -0,0 +1,350 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers._mapping
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Lexer mapping defintions. This file is generated by itself. Everytime
|
||||||
|
you change something on a builtin lexer defintion, run this script from
|
||||||
|
the lexers folder to update it.
|
||||||
|
|
||||||
|
Do not alter the LEXERS dictionary by hand.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
LEXERS = {
|
||||||
|
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
|
||||||
|
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
|
||||||
|
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
|
||||||
|
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
|
||||||
|
'AgdaLexer': ('pygments.lexers.functional', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
|
||||||
|
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
|
||||||
|
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
|
||||||
|
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
|
||||||
|
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
|
||||||
|
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
|
||||||
|
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
|
||||||
|
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
|
||||||
|
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
|
||||||
|
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
|
||||||
|
'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
|
||||||
|
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
|
||||||
|
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
|
||||||
|
'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
|
||||||
|
'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
|
||||||
|
'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
|
||||||
|
'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
|
||||||
|
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
|
||||||
|
'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
|
||||||
|
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
|
||||||
|
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
|
||||||
|
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
|
||||||
|
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
|
||||||
|
'BlitzBasicLexer': ('pygments.lexers.compiled', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
|
||||||
|
'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
|
||||||
|
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
|
||||||
|
'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
|
||||||
|
'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()),
|
||||||
|
'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
|
||||||
|
'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
|
||||||
|
'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
|
||||||
|
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
|
||||||
|
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
|
||||||
|
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
|
||||||
|
'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()),
|
||||||
|
'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
|
||||||
|
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
|
||||||
|
'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
|
||||||
|
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
|
||||||
|
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
|
||||||
|
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
|
||||||
|
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
|
||||||
|
'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
|
||||||
|
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
|
||||||
|
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
|
||||||
|
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
|
||||||
|
'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
|
||||||
|
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
|
||||||
|
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
|
||||||
|
'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
|
||||||
|
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
|
||||||
|
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
|
||||||
|
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
|
||||||
|
'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
|
||||||
|
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
|
||||||
|
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
|
||||||
|
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
|
||||||
|
'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)),
|
||||||
|
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
|
||||||
|
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
|
||||||
|
'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
|
||||||
|
'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
|
||||||
|
'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
|
||||||
|
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
|
||||||
|
'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
|
||||||
|
'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
|
||||||
|
'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
|
||||||
|
'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
|
||||||
|
'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
|
||||||
|
'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
|
||||||
|
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
|
||||||
|
'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
|
||||||
|
'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
|
||||||
|
'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
|
||||||
|
'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
|
||||||
|
'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
|
||||||
|
'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
|
||||||
|
'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
|
||||||
|
'EbnfLexer': ('pygments.lexers.text', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
|
||||||
|
'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
|
||||||
|
'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
|
||||||
|
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
|
||||||
|
'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
|
||||||
|
'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
|
||||||
|
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
|
||||||
|
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
|
||||||
|
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
|
||||||
|
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
|
||||||
|
'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
|
||||||
|
'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
|
||||||
|
'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
|
||||||
|
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
|
||||||
|
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
|
||||||
|
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
|
||||||
|
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
|
||||||
|
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
|
||||||
|
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
|
||||||
|
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
|
||||||
|
'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
|
||||||
|
'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
|
||||||
|
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
|
||||||
|
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
|
||||||
|
'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
|
||||||
|
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
|
||||||
|
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
|
||||||
|
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
|
||||||
|
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
|
||||||
|
'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
|
||||||
|
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
|
||||||
|
'HaxeLexer': ('pygments.lexers.web', 'Haxe', ('hx', 'Haxe', 'haxe', 'haXe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
|
||||||
|
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
|
||||||
|
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
|
||||||
|
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
|
||||||
|
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
|
||||||
|
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
|
||||||
|
'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
|
||||||
|
'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
|
||||||
|
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
|
||||||
|
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
|
||||||
|
'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
|
||||||
|
'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
|
||||||
|
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
|
||||||
|
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
|
||||||
|
'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
|
||||||
|
'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
|
||||||
|
'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
|
||||||
|
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
|
||||||
|
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
|
||||||
|
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
|
||||||
|
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
|
||||||
|
'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
|
||||||
|
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
|
||||||
|
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
|
||||||
|
'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)),
|
||||||
|
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
|
||||||
|
'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
|
||||||
|
'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
|
||||||
|
'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
|
||||||
|
'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
|
||||||
|
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
|
||||||
|
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
|
||||||
|
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
|
||||||
|
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
|
||||||
|
'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
|
||||||
|
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
|
||||||
|
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
|
||||||
|
'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
|
||||||
|
'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
|
||||||
|
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
|
||||||
|
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
|
||||||
|
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
|
||||||
|
'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
|
||||||
|
'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
|
||||||
|
'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
|
||||||
|
'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
|
||||||
|
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
|
||||||
|
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
|
||||||
|
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
|
||||||
|
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
|
||||||
|
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
|
||||||
|
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
|
||||||
|
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
|
||||||
|
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
|
||||||
|
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
|
||||||
|
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
|
||||||
|
'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
|
||||||
|
'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
|
||||||
|
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
|
||||||
|
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
|
||||||
|
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
|
||||||
|
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
|
||||||
|
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
|
||||||
|
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
|
||||||
|
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
|
||||||
|
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
|
||||||
|
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
|
||||||
|
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
|
||||||
|
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
|
||||||
|
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
|
||||||
|
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
|
||||||
|
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
|
||||||
|
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
|
||||||
|
'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
|
||||||
|
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
|
||||||
|
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
|
||||||
|
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
|
||||||
|
'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
|
||||||
|
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
|
||||||
|
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
|
||||||
|
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
|
||||||
|
'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
|
||||||
|
'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
|
||||||
|
'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
|
||||||
|
'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
|
||||||
|
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
|
||||||
|
'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
|
||||||
|
'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
|
||||||
|
'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6'), ('text/x-perl6', 'application/x-perl6')),
|
||||||
|
'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
|
||||||
|
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
|
||||||
|
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
|
||||||
|
'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
|
||||||
|
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
|
||||||
|
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
|
||||||
|
'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
|
||||||
|
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
|
||||||
|
'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
|
||||||
|
'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
|
||||||
|
'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
|
||||||
|
'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
|
||||||
|
'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
|
||||||
|
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
|
||||||
|
'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
|
||||||
|
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
|
||||||
|
'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
|
||||||
|
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
|
||||||
|
'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
|
||||||
|
'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
|
||||||
|
'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
|
||||||
|
'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
|
||||||
|
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
|
||||||
|
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
|
||||||
|
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
|
||||||
|
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
|
||||||
|
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
|
||||||
|
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
|
||||||
|
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
|
||||||
|
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
|
||||||
|
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
|
||||||
|
'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
|
||||||
|
'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
|
||||||
|
'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
|
||||||
|
'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
|
||||||
|
'RexxLexer': ('pygments.lexers.other', 'Rexx', ('rexx', 'ARexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
|
||||||
|
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
|
||||||
|
'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
|
||||||
|
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
|
||||||
|
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
|
||||||
|
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
|
||||||
|
'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
|
||||||
|
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
|
||||||
|
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
|
||||||
|
'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
|
||||||
|
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
|
||||||
|
'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
|
||||||
|
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
|
||||||
|
'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
|
||||||
|
'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
|
||||||
|
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
|
||||||
|
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
|
||||||
|
'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
|
||||||
|
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
|
||||||
|
'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
|
||||||
|
'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
|
||||||
|
'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
|
||||||
|
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
|
||||||
|
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
|
||||||
|
'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
|
||||||
|
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
|
||||||
|
'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
|
||||||
|
'SwigLexer': ('pygments.lexers.compiled', 'SWIG', ('Swig', 'swig'), ('*.swg', '*.i'), ('text/swig',)),
|
||||||
|
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
|
||||||
|
'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
|
||||||
|
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
|
||||||
|
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
|
||||||
|
'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
|
||||||
|
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
|
||||||
|
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
|
||||||
|
'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
|
||||||
|
'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
|
||||||
|
'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()),
|
||||||
|
'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
|
||||||
|
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
|
||||||
|
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
|
||||||
|
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
|
||||||
|
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
|
||||||
|
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
|
||||||
|
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
|
||||||
|
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
|
||||||
|
'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
|
||||||
|
'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
|
||||||
|
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
|
||||||
|
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
|
||||||
|
'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
|
||||||
|
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
|
||||||
|
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
|
||||||
|
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
|
||||||
|
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
|
||||||
|
'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# lookup lexers
|
||||||
|
found_lexers = []
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||||
|
for filename in os.listdir('.'):
|
||||||
|
if filename.endswith('.py') and not filename.startswith('_'):
|
||||||
|
module_name = 'pygments.lexers.%s' % filename[:-3]
|
||||||
|
print module_name
|
||||||
|
module = __import__(module_name, None, None, [''])
|
||||||
|
for lexer_name in module.__all__:
|
||||||
|
lexer = getattr(module, lexer_name)
|
||||||
|
found_lexers.append(
|
||||||
|
'%r: %r' % (lexer_name,
|
||||||
|
(module_name,
|
||||||
|
lexer.name,
|
||||||
|
tuple(lexer.aliases),
|
||||||
|
tuple(lexer.filenames),
|
||||||
|
tuple(lexer.mimetypes))))
|
||||||
|
# sort them, that should make the diff files for svn smaller
|
||||||
|
found_lexers.sort()
|
||||||
|
|
||||||
|
# extract useful sourcecode from this file
|
||||||
|
f = open(__file__)
|
||||||
|
try:
|
||||||
|
content = f.read()
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
header = content[:content.find('LEXERS = {')]
|
||||||
|
footer = content[content.find("if __name__ == '__main__':"):]
|
||||||
|
|
||||||
|
# write new file
|
||||||
|
f = open(__file__, 'wb')
|
||||||
|
f.write(header)
|
||||||
|
f.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
|
||||||
|
f.write(footer)
|
||||||
|
f.close()
|
3787
packages/wakatime/wakatime/packages/pygments2/lexers/_phpbuiltins.py
Normal file
3787
packages/wakatime/wakatime/packages/pygments2/lexers/_phpbuiltins.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,233 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers._postgres_builtins
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Self-updating data files for PostgreSQL lexer.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
# One man's constant is another man's variable.
|
||||||
|
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
|
||||||
|
KEYWORDS_URL = SOURCE_URL + '/doc/src/sgml/keywords.sgml'
|
||||||
|
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
|
||||||
|
|
||||||
|
def update_myself():
|
||||||
|
data_file = list(fetch(DATATYPES_URL))
|
||||||
|
datatypes = parse_datatypes(data_file)
|
||||||
|
pseudos = parse_pseudos(data_file)
|
||||||
|
|
||||||
|
keywords = parse_keywords(fetch(KEYWORDS_URL))
|
||||||
|
update_consts(__file__, 'DATATYPES', datatypes)
|
||||||
|
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
|
||||||
|
update_consts(__file__, 'KEYWORDS', keywords)
|
||||||
|
|
||||||
|
def parse_keywords(f):
|
||||||
|
kw = []
|
||||||
|
for m in re.finditer(
|
||||||
|
r'\s*<entry><token>([^<]+)</token></entry>\s*'
|
||||||
|
r'<entry>([^<]+)</entry>', f.read()):
|
||||||
|
kw.append(m.group(1))
|
||||||
|
|
||||||
|
if not kw:
|
||||||
|
raise ValueError('no keyword found')
|
||||||
|
|
||||||
|
kw.sort()
|
||||||
|
return kw
|
||||||
|
|
||||||
|
def parse_datatypes(f):
|
||||||
|
dt = set()
|
||||||
|
for line in f:
|
||||||
|
if '<sect1' in line:
|
||||||
|
break
|
||||||
|
if '<entry><type>' not in line:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Parse a string such as
|
||||||
|
# time [ (<replaceable>p</replaceable>) ] [ without time zone ]
|
||||||
|
# into types "time" and "without time zone"
|
||||||
|
|
||||||
|
# remove all the tags
|
||||||
|
line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
|
||||||
|
line = re.sub("<[^>]+>", "", line)
|
||||||
|
|
||||||
|
# Drop the parts containing braces
|
||||||
|
for tmp in [t for tmp in line.split('[')
|
||||||
|
for t in tmp.split(']') if "(" not in t]:
|
||||||
|
for t in tmp.split(','):
|
||||||
|
t = t.strip()
|
||||||
|
if not t: continue
|
||||||
|
dt.add(" ".join(t.split()))
|
||||||
|
|
||||||
|
dt = list(dt)
|
||||||
|
dt.sort()
|
||||||
|
return dt
|
||||||
|
|
||||||
|
def parse_pseudos(f):
|
||||||
|
dt = []
|
||||||
|
re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
|
||||||
|
re_entry = re.compile(r'\s*<entry><type>([^<]+)</></entry>')
|
||||||
|
re_end = re.compile(r'\s*</table>')
|
||||||
|
|
||||||
|
f = iter(f)
|
||||||
|
for line in f:
|
||||||
|
if re_start.match(line) is not None:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ValueError('pseudo datatypes table not found')
|
||||||
|
|
||||||
|
for line in f:
|
||||||
|
m = re_entry.match(line)
|
||||||
|
if m is not None:
|
||||||
|
dt.append(m.group(1))
|
||||||
|
|
||||||
|
if re_end.match(line) is not None:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ValueError('end of pseudo datatypes table not found')
|
||||||
|
|
||||||
|
if not dt:
|
||||||
|
raise ValueError('pseudo datatypes not found')
|
||||||
|
|
||||||
|
return dt
|
||||||
|
|
||||||
|
def fetch(url):
|
||||||
|
return urllib.urlopen(url)
|
||||||
|
|
||||||
|
def update_consts(filename, constname, content):
|
||||||
|
f = open(filename)
|
||||||
|
lines = f.readlines()
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
# Line to start/end inserting
|
||||||
|
re_start = re.compile(r'^%s\s*=\s*\[\s*$' % constname)
|
||||||
|
re_end = re.compile(r'^\s*\]\s*$')
|
||||||
|
start = [ n for n, l in enumerate(lines) if re_start.match(l) ]
|
||||||
|
if not start:
|
||||||
|
raise ValueError("couldn't find line containing '%s = ['" % constname)
|
||||||
|
if len(start) > 1:
|
||||||
|
raise ValueError("too many lines containing '%s = ['" % constname)
|
||||||
|
start = start[0] + 1
|
||||||
|
|
||||||
|
end = [ n for n, l in enumerate(lines) if n >= start and re_end.match(l) ]
|
||||||
|
if not end:
|
||||||
|
raise ValueError("couldn't find line containing ']' after %s " % constname)
|
||||||
|
end = end[0]
|
||||||
|
|
||||||
|
# Pack the new content in lines not too long
|
||||||
|
content = [repr(item) for item in content ]
|
||||||
|
new_lines = [[]]
|
||||||
|
for item in content:
|
||||||
|
if sum(map(len, new_lines[-1])) + 2 * len(new_lines[-1]) + len(item) + 4 > 75:
|
||||||
|
new_lines.append([])
|
||||||
|
new_lines[-1].append(item)
|
||||||
|
|
||||||
|
lines[start:end] = [ " %s,\n" % ", ".join(items) for items in new_lines ]
|
||||||
|
|
||||||
|
f = open(filename, 'w')
|
||||||
|
f.write(''.join(lines))
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
|
# Autogenerated: please edit them if you like wasting your time.
|
||||||
|
|
||||||
|
KEYWORDS = [
|
||||||
|
'ABORT', 'ABSOLUTE', 'ACCESS', 'ACTION', 'ADD', 'ADMIN', 'AFTER',
|
||||||
|
'AGGREGATE', 'ALL', 'ALSO', 'ALTER', 'ALWAYS', 'ANALYSE', 'ANALYZE',
|
||||||
|
'AND', 'ANY', 'ARRAY', 'AS', 'ASC', 'ASSERTION', 'ASSIGNMENT',
|
||||||
|
'ASYMMETRIC', 'AT', 'ATTRIBUTE', 'AUTHORIZATION', 'BACKWARD', 'BEFORE',
|
||||||
|
'BEGIN', 'BETWEEN', 'BIGINT', 'BINARY', 'BIT', 'BOOLEAN', 'BOTH', 'BY',
|
||||||
|
'CACHE', 'CALLED', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG',
|
||||||
|
'CHAIN', 'CHAR', 'CHARACTER', 'CHARACTERISTICS', 'CHECK', 'CHECKPOINT',
|
||||||
|
'CLASS', 'CLOSE', 'CLUSTER', 'COALESCE', 'COLLATE', 'COLLATION',
|
||||||
|
'COLUMN', 'COMMENT', 'COMMENTS', 'COMMIT', 'COMMITTED', 'CONCURRENTLY',
|
||||||
|
'CONFIGURATION', 'CONNECTION', 'CONSTRAINT', 'CONSTRAINTS', 'CONTENT',
|
||||||
|
'CONTINUE', 'CONVERSION', 'COPY', 'COST', 'CREATE', 'CROSS', 'CSV',
|
||||||
|
'CURRENT', 'CURRENT_CATALOG', 'CURRENT_DATE', 'CURRENT_ROLE',
|
||||||
|
'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER',
|
||||||
|
'CURSOR', 'CYCLE', 'DATA', 'DATABASE', 'DAY', 'DEALLOCATE', 'DEC',
|
||||||
|
'DECIMAL', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED',
|
||||||
|
'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DESC', 'DICTIONARY',
|
||||||
|
'DISABLE', 'DISCARD', 'DISTINCT', 'DO', 'DOCUMENT', 'DOMAIN', 'DOUBLE',
|
||||||
|
'DROP', 'EACH', 'ELSE', 'ENABLE', 'ENCODING', 'ENCRYPTED', 'END',
|
||||||
|
'ENUM', 'ESCAPE', 'EXCEPT', 'EXCLUDE', 'EXCLUDING', 'EXCLUSIVE',
|
||||||
|
'EXECUTE', 'EXISTS', 'EXPLAIN', 'EXTENSION', 'EXTERNAL', 'EXTRACT',
|
||||||
|
'FALSE', 'FAMILY', 'FETCH', 'FIRST', 'FLOAT', 'FOLLOWING', 'FOR',
|
||||||
|
'FORCE', 'FOREIGN', 'FORWARD', 'FREEZE', 'FROM', 'FULL', 'FUNCTION',
|
||||||
|
'FUNCTIONS', 'GLOBAL', 'GRANT', 'GRANTED', 'GREATEST', 'GROUP',
|
||||||
|
'HANDLER', 'HAVING', 'HEADER', 'HOLD', 'HOUR', 'IDENTITY', 'IF',
|
||||||
|
'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLICIT', 'IN', 'INCLUDING',
|
||||||
|
'INCREMENT', 'INDEX', 'INDEXES', 'INHERIT', 'INHERITS', 'INITIALLY',
|
||||||
|
'INLINE', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTEAD',
|
||||||
|
'INT', 'INTEGER', 'INTERSECT', 'INTERVAL', 'INTO', 'INVOKER', 'IS',
|
||||||
|
'ISNULL', 'ISOLATION', 'JOIN', 'KEY', 'LABEL', 'LANGUAGE', 'LARGE',
|
||||||
|
'LAST', 'LC_COLLATE', 'LC_CTYPE', 'LEADING', 'LEAST', 'LEFT', 'LEVEL',
|
||||||
|
'LIKE', 'LIMIT', 'LISTEN', 'LOAD', 'LOCAL', 'LOCALTIME',
|
||||||
|
'LOCALTIMESTAMP', 'LOCATION', 'LOCK', 'MAPPING', 'MATCH', 'MAXVALUE',
|
||||||
|
'MINUTE', 'MINVALUE', 'MODE', 'MONTH', 'MOVE', 'NAME', 'NAMES',
|
||||||
|
'NATIONAL', 'NATURAL', 'NCHAR', 'NEXT', 'NO', 'NONE', 'NOT', 'NOTHING',
|
||||||
|
'NOTIFY', 'NOTNULL', 'NOWAIT', 'NULL', 'NULLIF', 'NULLS', 'NUMERIC',
|
||||||
|
'OBJECT', 'OF', 'OFF', 'OFFSET', 'OIDS', 'ON', 'ONLY', 'OPERATOR',
|
||||||
|
'OPTION', 'OPTIONS', 'OR', 'ORDER', 'OUT', 'OUTER', 'OVER', 'OVERLAPS',
|
||||||
|
'OVERLAY', 'OWNED', 'OWNER', 'PARSER', 'PARTIAL', 'PARTITION',
|
||||||
|
'PASSING', 'PASSWORD', 'PLACING', 'PLANS', 'POSITION', 'PRECEDING',
|
||||||
|
'PRECISION', 'PREPARE', 'PREPARED', 'PRESERVE', 'PRIMARY', 'PRIOR',
|
||||||
|
'PRIVILEGES', 'PROCEDURAL', 'PROCEDURE', 'QUOTE', 'RANGE', 'READ',
|
||||||
|
'REAL', 'REASSIGN', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES',
|
||||||
|
'REINDEX', 'RELATIVE', 'RELEASE', 'RENAME', 'REPEATABLE', 'REPLACE',
|
||||||
|
'REPLICA', 'RESET', 'RESTART', 'RESTRICT', 'RETURNING', 'RETURNS',
|
||||||
|
'REVOKE', 'RIGHT', 'ROLE', 'ROLLBACK', 'ROW', 'ROWS', 'RULE',
|
||||||
|
'SAVEPOINT', 'SCHEMA', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY',
|
||||||
|
'SELECT', 'SEQUENCE', 'SEQUENCES', 'SERIALIZABLE', 'SERVER', 'SESSION',
|
||||||
|
'SESSION_USER', 'SET', 'SETOF', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE',
|
||||||
|
'SMALLINT', 'SOME', 'STABLE', 'STANDALONE', 'START', 'STATEMENT',
|
||||||
|
'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRIP',
|
||||||
|
'SUBSTRING', 'SYMMETRIC', 'SYSID', 'SYSTEM', 'TABLE', 'TABLES',
|
||||||
|
'TABLESPACE', 'TEMP', 'TEMPLATE', 'TEMPORARY', 'TEXT', 'THEN', 'TIME',
|
||||||
|
'TIMESTAMP', 'TO', 'TRAILING', 'TRANSACTION', 'TREAT', 'TRIGGER',
|
||||||
|
'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE', 'UNBOUNDED',
|
||||||
|
'UNCOMMITTED', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN',
|
||||||
|
'UNLOGGED', 'UNTIL', 'UPDATE', 'USER', 'USING', 'VACUUM', 'VALID',
|
||||||
|
'VALIDATE', 'VALIDATOR', 'VALUE', 'VALUES', 'VARCHAR', 'VARIADIC',
|
||||||
|
'VARYING', 'VERBOSE', 'VERSION', 'VIEW', 'VOLATILE', 'WHEN', 'WHERE',
|
||||||
|
'WHITESPACE', 'WINDOW', 'WITH', 'WITHOUT', 'WORK', 'WRAPPER', 'WRITE',
|
||||||
|
'XML', 'XMLATTRIBUTES', 'XMLCONCAT', 'XMLELEMENT', 'XMLEXISTS',
|
||||||
|
'XMLFOREST', 'XMLPARSE', 'XMLPI', 'XMLROOT', 'XMLSERIALIZE', 'YEAR',
|
||||||
|
'YES', 'ZONE',
|
||||||
|
]
|
||||||
|
|
||||||
|
DATATYPES = [
|
||||||
|
'bigint', 'bigserial', 'bit', 'bit varying', 'bool', 'boolean', 'box',
|
||||||
|
'bytea', 'char', 'character', 'character varying', 'cidr', 'circle',
|
||||||
|
'date', 'decimal', 'double precision', 'float4', 'float8', 'inet',
|
||||||
|
'int', 'int2', 'int4', 'int8', 'integer', 'interval', 'json', 'line',
|
||||||
|
'lseg', 'macaddr', 'money', 'numeric', 'path', 'point', 'polygon',
|
||||||
|
'real', 'serial', 'serial2', 'serial4', 'serial8', 'smallint',
|
||||||
|
'smallserial', 'text', 'time', 'timestamp', 'timestamptz', 'timetz',
|
||||||
|
'tsquery', 'tsvector', 'txid_snapshot', 'uuid', 'varbit', 'varchar',
|
||||||
|
'with time zone', 'without time zone', 'xml',
|
||||||
|
]
|
||||||
|
|
||||||
|
PSEUDO_TYPES = [
|
||||||
|
'any', 'anyelement', 'anyarray', 'anynonarray', 'anyenum', 'anyrange',
|
||||||
|
'cstring', 'internal', 'language_handler', 'fdw_handler', 'record',
|
||||||
|
'trigger', 'void', 'opaque',
|
||||||
|
]
|
||||||
|
|
||||||
|
# Remove 'trigger' from types
|
||||||
|
PSEUDO_TYPES = sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS)))
|
||||||
|
|
||||||
|
PLPGSQL_KEYWORDS = [
|
||||||
|
'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
|
||||||
|
'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
|
||||||
|
'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
|
||||||
|
]
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
update_myself()
|
||||||
|
|
|
@ -0,0 +1,557 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers._robotframeworklexer
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Lexer for Robot Framework.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Copyright 2012 Nokia Siemens Networks Oyj
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from pygments.lexer import Lexer
|
||||||
|
from pygments.token import Token
|
||||||
|
|
||||||
|
|
||||||
|
HEADING = Token.Generic.Heading
|
||||||
|
SETTING = Token.Keyword.Namespace
|
||||||
|
IMPORT = Token.Name.Namespace
|
||||||
|
TC_KW_NAME = Token.Generic.Subheading
|
||||||
|
KEYWORD = Token.Name.Function
|
||||||
|
ARGUMENT = Token.String
|
||||||
|
VARIABLE = Token.Name.Variable
|
||||||
|
COMMENT = Token.Comment
|
||||||
|
SEPARATOR = Token.Punctuation
|
||||||
|
SYNTAX = Token.Punctuation
|
||||||
|
GHERKIN = Token.Generic.Emph
|
||||||
|
ERROR = Token.Error
|
||||||
|
|
||||||
|
|
||||||
|
def normalize(string, remove=''):
|
||||||
|
string = string.lower()
|
||||||
|
for char in remove + ' ':
|
||||||
|
if char in string:
|
||||||
|
string = string.replace(char, '')
|
||||||
|
return string
|
||||||
|
|
||||||
|
|
||||||
|
class RobotFrameworkLexer(Lexer):
|
||||||
|
"""
|
||||||
|
For `Robot Framework <http://robotframework.org>`_ test data.
|
||||||
|
|
||||||
|
Supports both space and pipe separated plain text formats.
|
||||||
|
|
||||||
|
*New in Pygments 1.6.*
|
||||||
|
"""
|
||||||
|
name = 'RobotFramework'
|
||||||
|
aliases = ['RobotFramework', 'robotframework']
|
||||||
|
filenames = ['*.txt', '*.robot']
|
||||||
|
mimetypes = ['text/x-robotframework']
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
options['tabsize'] = 2
|
||||||
|
options['encoding'] = 'UTF-8'
|
||||||
|
Lexer.__init__(self, **options)
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, text):
|
||||||
|
row_tokenizer = RowTokenizer()
|
||||||
|
var_tokenizer = VariableTokenizer()
|
||||||
|
index = 0
|
||||||
|
for row in text.splitlines():
|
||||||
|
for value, token in row_tokenizer.tokenize(row):
|
||||||
|
for value, token in var_tokenizer.tokenize(value, token):
|
||||||
|
if value:
|
||||||
|
yield index, token, unicode(value)
|
||||||
|
index += len(value)
|
||||||
|
|
||||||
|
|
||||||
|
class VariableTokenizer(object):
|
||||||
|
|
||||||
|
def tokenize(self, string, token):
|
||||||
|
var = VariableSplitter(string, identifiers='$@%')
|
||||||
|
if var.start < 0 or token in (COMMENT, ERROR):
|
||||||
|
yield string, token
|
||||||
|
return
|
||||||
|
for value, token in self._tokenize(var, string, token):
|
||||||
|
if value:
|
||||||
|
yield value, token
|
||||||
|
|
||||||
|
def _tokenize(self, var, string, orig_token):
|
||||||
|
before = string[:var.start]
|
||||||
|
yield before, orig_token
|
||||||
|
yield var.identifier + '{', SYNTAX
|
||||||
|
for value, token in self.tokenize(var.base, VARIABLE):
|
||||||
|
yield value, token
|
||||||
|
yield '}', SYNTAX
|
||||||
|
if var.index:
|
||||||
|
yield '[', SYNTAX
|
||||||
|
for value, token in self.tokenize(var.index, VARIABLE):
|
||||||
|
yield value, token
|
||||||
|
yield ']', SYNTAX
|
||||||
|
for value, token in self.tokenize(string[var.end:], orig_token):
|
||||||
|
yield value, token
|
||||||
|
|
||||||
|
|
||||||
|
class RowTokenizer(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._table = UnknownTable()
|
||||||
|
self._splitter = RowSplitter()
|
||||||
|
testcases = TestCaseTable()
|
||||||
|
settings = SettingTable(testcases.set_default_template)
|
||||||
|
variables = VariableTable()
|
||||||
|
keywords = KeywordTable()
|
||||||
|
self._tables = {'settings': settings, 'setting': settings,
|
||||||
|
'metadata': settings,
|
||||||
|
'variables': variables, 'variable': variables,
|
||||||
|
'testcases': testcases, 'testcase': testcases,
|
||||||
|
'keywords': keywords, 'keyword': keywords,
|
||||||
|
'userkeywords': keywords, 'userkeyword': keywords}
|
||||||
|
|
||||||
|
def tokenize(self, row):
|
||||||
|
commented = False
|
||||||
|
heading = False
|
||||||
|
for index, value in enumerate(self._splitter.split(row)):
|
||||||
|
# First value, and every second after that, is a separator.
|
||||||
|
index, separator = divmod(index-1, 2)
|
||||||
|
if value.startswith('#'):
|
||||||
|
commented = True
|
||||||
|
elif index == 0 and value.startswith('*'):
|
||||||
|
self._table = self._start_table(value)
|
||||||
|
heading = True
|
||||||
|
for value, token in self._tokenize(value, index, commented,
|
||||||
|
separator, heading):
|
||||||
|
yield value, token
|
||||||
|
self._table.end_row()
|
||||||
|
|
||||||
|
def _start_table(self, header):
|
||||||
|
name = normalize(header, remove='*')
|
||||||
|
return self._tables.get(name, UnknownTable())
|
||||||
|
|
||||||
|
def _tokenize(self, value, index, commented, separator, heading):
|
||||||
|
if commented:
|
||||||
|
yield value, COMMENT
|
||||||
|
elif separator:
|
||||||
|
yield value, SEPARATOR
|
||||||
|
elif heading:
|
||||||
|
yield value, HEADING
|
||||||
|
else:
|
||||||
|
for value, token in self._table.tokenize(value, index):
|
||||||
|
yield value, token
|
||||||
|
|
||||||
|
|
||||||
|
class RowSplitter(object):
|
||||||
|
_space_splitter = re.compile('( {2,})')
|
||||||
|
_pipe_splitter = re.compile('((?:^| +)\|(?: +|$))')
|
||||||
|
|
||||||
|
def split(self, row):
|
||||||
|
splitter = (row.startswith('| ') and self._split_from_pipes
|
||||||
|
or self._split_from_spaces)
|
||||||
|
for value in splitter(row):
|
||||||
|
yield value
|
||||||
|
yield '\n'
|
||||||
|
|
||||||
|
def _split_from_spaces(self, row):
|
||||||
|
yield '' # Start with (pseudo)separator similarly as with pipes
|
||||||
|
for value in self._space_splitter.split(row):
|
||||||
|
yield value
|
||||||
|
|
||||||
|
def _split_from_pipes(self, row):
|
||||||
|
_, separator, rest = self._pipe_splitter.split(row, 1)
|
||||||
|
yield separator
|
||||||
|
while self._pipe_splitter.search(rest):
|
||||||
|
cell, separator, rest = self._pipe_splitter.split(rest, 1)
|
||||||
|
yield cell
|
||||||
|
yield separator
|
||||||
|
yield rest
|
||||||
|
|
||||||
|
|
||||||
|
class Tokenizer(object):
|
||||||
|
_tokens = None
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._index = 0
|
||||||
|
|
||||||
|
def tokenize(self, value):
|
||||||
|
values_and_tokens = self._tokenize(value, self._index)
|
||||||
|
self._index += 1
|
||||||
|
if isinstance(values_and_tokens, type(Token)):
|
||||||
|
values_and_tokens = [(value, values_and_tokens)]
|
||||||
|
return values_and_tokens
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
index = min(index, len(self._tokens) - 1)
|
||||||
|
return self._tokens[index]
|
||||||
|
|
||||||
|
def _is_assign(self, value):
|
||||||
|
if value.endswith('='):
|
||||||
|
value = value[:-1].strip()
|
||||||
|
var = VariableSplitter(value, identifiers='$@')
|
||||||
|
return var.start == 0 and var.end == len(value)
|
||||||
|
|
||||||
|
|
||||||
|
class Comment(Tokenizer):
|
||||||
|
_tokens = (COMMENT,)
|
||||||
|
|
||||||
|
|
||||||
|
class Setting(Tokenizer):
|
||||||
|
_tokens = (SETTING, ARGUMENT)
|
||||||
|
_keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
|
||||||
|
'suitepostcondition', 'testsetup', 'testprecondition',
|
||||||
|
'testteardown', 'testpostcondition', 'testtemplate')
|
||||||
|
_import_settings = ('library', 'resource', 'variables')
|
||||||
|
_other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
|
||||||
|
'testtimeout')
|
||||||
|
_custom_tokenizer = None
|
||||||
|
|
||||||
|
def __init__(self, template_setter=None):
|
||||||
|
Tokenizer.__init__(self)
|
||||||
|
self._template_setter = template_setter
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
if index == 1 and self._template_setter:
|
||||||
|
self._template_setter(value)
|
||||||
|
if index == 0:
|
||||||
|
normalized = normalize(value)
|
||||||
|
if normalized in self._keyword_settings:
|
||||||
|
self._custom_tokenizer = KeywordCall(support_assign=False)
|
||||||
|
elif normalized in self._import_settings:
|
||||||
|
self._custom_tokenizer = ImportSetting()
|
||||||
|
elif normalized not in self._other_settings:
|
||||||
|
return ERROR
|
||||||
|
elif self._custom_tokenizer:
|
||||||
|
return self._custom_tokenizer.tokenize(value)
|
||||||
|
return Tokenizer._tokenize(self, value, index)
|
||||||
|
|
||||||
|
|
||||||
|
class ImportSetting(Tokenizer):
|
||||||
|
_tokens = (IMPORT, ARGUMENT)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCaseSetting(Setting):
|
||||||
|
_keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
|
||||||
|
'template')
|
||||||
|
_import_settings = ()
|
||||||
|
_other_settings = ('documentation', 'tags', 'timeout')
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
if index == 0:
|
||||||
|
type = Setting._tokenize(self, value[1:-1], index)
|
||||||
|
return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
|
||||||
|
return Setting._tokenize(self, value, index)
|
||||||
|
|
||||||
|
|
||||||
|
class KeywordSetting(TestCaseSetting):
|
||||||
|
_keyword_settings = ('teardown',)
|
||||||
|
_other_settings = ('documentation', 'arguments', 'return', 'timeout')
|
||||||
|
|
||||||
|
|
||||||
|
class Variable(Tokenizer):
|
||||||
|
_tokens = (SYNTAX, ARGUMENT)
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
if index == 0 and not self._is_assign(value):
|
||||||
|
return ERROR
|
||||||
|
return Tokenizer._tokenize(self, value, index)
|
||||||
|
|
||||||
|
|
||||||
|
class KeywordCall(Tokenizer):
|
||||||
|
_tokens = (KEYWORD, ARGUMENT)
|
||||||
|
|
||||||
|
def __init__(self, support_assign=True):
|
||||||
|
Tokenizer.__init__(self)
|
||||||
|
self._keyword_found = not support_assign
|
||||||
|
self._assigns = 0
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
if not self._keyword_found and self._is_assign(value):
|
||||||
|
self._assigns += 1
|
||||||
|
return SYNTAX # VariableTokenizer tokenizes this later.
|
||||||
|
if self._keyword_found:
|
||||||
|
return Tokenizer._tokenize(self, value, index - self._assigns)
|
||||||
|
self._keyword_found = True
|
||||||
|
return GherkinTokenizer().tokenize(value, KEYWORD)
|
||||||
|
|
||||||
|
|
||||||
|
class GherkinTokenizer(object):
|
||||||
|
_gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE)
|
||||||
|
|
||||||
|
def tokenize(self, value, token):
|
||||||
|
match = self._gherkin_prefix.match(value)
|
||||||
|
if not match:
|
||||||
|
return [(value, token)]
|
||||||
|
end = match.end()
|
||||||
|
return [(value[:end], GHERKIN), (value[end:], token)]
|
||||||
|
|
||||||
|
|
||||||
|
class TemplatedKeywordCall(Tokenizer):
|
||||||
|
_tokens = (ARGUMENT,)
|
||||||
|
|
||||||
|
|
||||||
|
class ForLoop(Tokenizer):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
Tokenizer.__init__(self)
|
||||||
|
self._in_arguments = False
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
token = self._in_arguments and ARGUMENT or SYNTAX
|
||||||
|
if value.upper() in ('IN', 'IN RANGE'):
|
||||||
|
self._in_arguments = True
|
||||||
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
class _Table(object):
|
||||||
|
_tokenizer_class = None
|
||||||
|
|
||||||
|
def __init__(self, prev_tokenizer=None):
|
||||||
|
self._tokenizer = self._tokenizer_class()
|
||||||
|
self._prev_tokenizer = prev_tokenizer
|
||||||
|
self._prev_values_on_row = []
|
||||||
|
|
||||||
|
def tokenize(self, value, index):
|
||||||
|
if self._continues(value, index):
|
||||||
|
self._tokenizer = self._prev_tokenizer
|
||||||
|
yield value, SYNTAX
|
||||||
|
else:
|
||||||
|
for value_and_token in self._tokenize(value, index):
|
||||||
|
yield value_and_token
|
||||||
|
self._prev_values_on_row.append(value)
|
||||||
|
|
||||||
|
def _continues(self, value, index):
|
||||||
|
return value == '...' and all(self._is_empty(t)
|
||||||
|
for t in self._prev_values_on_row)
|
||||||
|
|
||||||
|
def _is_empty(self, value):
|
||||||
|
return value in ('', '\\')
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
return self._tokenizer.tokenize(value)
|
||||||
|
|
||||||
|
def end_row(self):
|
||||||
|
self.__init__(prev_tokenizer=self._tokenizer)
|
||||||
|
|
||||||
|
|
||||||
|
class UnknownTable(_Table):
|
||||||
|
_tokenizer_class = Comment
|
||||||
|
|
||||||
|
def _continues(self, value, index):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class VariableTable(_Table):
|
||||||
|
_tokenizer_class = Variable
|
||||||
|
|
||||||
|
|
||||||
|
class SettingTable(_Table):
|
||||||
|
_tokenizer_class = Setting
|
||||||
|
|
||||||
|
def __init__(self, template_setter, prev_tokenizer=None):
|
||||||
|
_Table.__init__(self, prev_tokenizer)
|
||||||
|
self._template_setter = template_setter
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
if index == 0 and normalize(value) == 'testtemplate':
|
||||||
|
self._tokenizer = Setting(self._template_setter)
|
||||||
|
return _Table._tokenize(self, value, index)
|
||||||
|
|
||||||
|
def end_row(self):
|
||||||
|
self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCaseTable(_Table):
|
||||||
|
_setting_class = TestCaseSetting
|
||||||
|
_test_template = None
|
||||||
|
_default_template = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _tokenizer_class(self):
|
||||||
|
if self._test_template or (self._default_template and
|
||||||
|
self._test_template is not False):
|
||||||
|
return TemplatedKeywordCall
|
||||||
|
return KeywordCall
|
||||||
|
|
||||||
|
def _continues(self, value, index):
|
||||||
|
return index > 0 and _Table._continues(self, value, index)
|
||||||
|
|
||||||
|
def _tokenize(self, value, index):
|
||||||
|
if index == 0:
|
||||||
|
if value:
|
||||||
|
self._test_template = None
|
||||||
|
return GherkinTokenizer().tokenize(value, TC_KW_NAME)
|
||||||
|
if index == 1 and self._is_setting(value):
|
||||||
|
if self._is_template(value):
|
||||||
|
self._test_template = False
|
||||||
|
self._tokenizer = self._setting_class(self.set_test_template)
|
||||||
|
else:
|
||||||
|
self._tokenizer = self._setting_class()
|
||||||
|
if index == 1 and self._is_for_loop(value):
|
||||||
|
self._tokenizer = ForLoop()
|
||||||
|
if index == 1 and self._is_empty(value):
|
||||||
|
return [(value, SYNTAX)]
|
||||||
|
return _Table._tokenize(self, value, index)
|
||||||
|
|
||||||
|
def _is_setting(self, value):
|
||||||
|
return value.startswith('[') and value.endswith(']')
|
||||||
|
|
||||||
|
def _is_template(self, value):
|
||||||
|
return normalize(value) == '[template]'
|
||||||
|
|
||||||
|
def _is_for_loop(self, value):
|
||||||
|
return value.startswith(':') and normalize(value, remove=':') == 'for'
|
||||||
|
|
||||||
|
def set_test_template(self, template):
|
||||||
|
self._test_template = self._is_template_set(template)
|
||||||
|
|
||||||
|
def set_default_template(self, template):
|
||||||
|
self._default_template = self._is_template_set(template)
|
||||||
|
|
||||||
|
def _is_template_set(self, template):
|
||||||
|
return normalize(template) not in ('', '\\', 'none', '${empty}')
|
||||||
|
|
||||||
|
|
||||||
|
class KeywordTable(TestCaseTable):
|
||||||
|
_tokenizer_class = KeywordCall
|
||||||
|
_setting_class = KeywordSetting
|
||||||
|
|
||||||
|
def _is_template(self, value):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# Following code copied directly from Robot Framework 2.7.5.
|
||||||
|
|
||||||
|
class VariableSplitter:
|
||||||
|
|
||||||
|
def __init__(self, string, identifiers):
|
||||||
|
self.identifier = None
|
||||||
|
self.base = None
|
||||||
|
self.index = None
|
||||||
|
self.start = -1
|
||||||
|
self.end = -1
|
||||||
|
self._identifiers = identifiers
|
||||||
|
self._may_have_internal_variables = False
|
||||||
|
try:
|
||||||
|
self._split(string)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self._finalize()
|
||||||
|
|
||||||
|
def get_replaced_base(self, variables):
|
||||||
|
if self._may_have_internal_variables:
|
||||||
|
return variables.replace_string(self.base)
|
||||||
|
return self.base
|
||||||
|
|
||||||
|
def _finalize(self):
|
||||||
|
self.identifier = self._variable_chars[0]
|
||||||
|
self.base = ''.join(self._variable_chars[2:-1])
|
||||||
|
self.end = self.start + len(self._variable_chars)
|
||||||
|
if self._has_list_variable_index():
|
||||||
|
self.index = ''.join(self._list_variable_index_chars[1:-1])
|
||||||
|
self.end += len(self._list_variable_index_chars)
|
||||||
|
|
||||||
|
def _has_list_variable_index(self):
|
||||||
|
return self._list_variable_index_chars\
|
||||||
|
and self._list_variable_index_chars[-1] == ']'
|
||||||
|
|
||||||
|
def _split(self, string):
|
||||||
|
start_index, max_index = self._find_variable(string)
|
||||||
|
self.start = start_index
|
||||||
|
self._open_curly = 1
|
||||||
|
self._state = self._variable_state
|
||||||
|
self._variable_chars = [string[start_index], '{']
|
||||||
|
self._list_variable_index_chars = []
|
||||||
|
self._string = string
|
||||||
|
start_index += 2
|
||||||
|
for index, char in enumerate(string[start_index:]):
|
||||||
|
index += start_index # Giving start to enumerate only in Py 2.6+
|
||||||
|
try:
|
||||||
|
self._state(char, index)
|
||||||
|
except StopIteration:
|
||||||
|
return
|
||||||
|
if index == max_index and not self._scanning_list_variable_index():
|
||||||
|
return
|
||||||
|
|
||||||
|
def _scanning_list_variable_index(self):
|
||||||
|
return self._state in [self._waiting_list_variable_index_state,
|
||||||
|
self._list_variable_index_state]
|
||||||
|
|
||||||
|
def _find_variable(self, string):
|
||||||
|
max_end_index = string.rfind('}')
|
||||||
|
if max_end_index == -1:
|
||||||
|
raise ValueError('No variable end found')
|
||||||
|
if self._is_escaped(string, max_end_index):
|
||||||
|
return self._find_variable(string[:max_end_index])
|
||||||
|
start_index = self._find_start_index(string, 1, max_end_index)
|
||||||
|
if start_index == -1:
|
||||||
|
raise ValueError('No variable start found')
|
||||||
|
return start_index, max_end_index
|
||||||
|
|
||||||
|
def _find_start_index(self, string, start, end):
|
||||||
|
index = string.find('{', start, end) - 1
|
||||||
|
if index < 0:
|
||||||
|
return -1
|
||||||
|
if self._start_index_is_ok(string, index):
|
||||||
|
return index
|
||||||
|
return self._find_start_index(string, index+2, end)
|
||||||
|
|
||||||
|
def _start_index_is_ok(self, string, index):
|
||||||
|
return string[index] in self._identifiers\
|
||||||
|
and not self._is_escaped(string, index)
|
||||||
|
|
||||||
|
def _is_escaped(self, string, index):
|
||||||
|
escaped = False
|
||||||
|
while index > 0 and string[index-1] == '\\':
|
||||||
|
index -= 1
|
||||||
|
escaped = not escaped
|
||||||
|
return escaped
|
||||||
|
|
||||||
|
def _variable_state(self, char, index):
|
||||||
|
self._variable_chars.append(char)
|
||||||
|
if char == '}' and not self._is_escaped(self._string, index):
|
||||||
|
self._open_curly -= 1
|
||||||
|
if self._open_curly == 0:
|
||||||
|
if not self._is_list_variable():
|
||||||
|
raise StopIteration
|
||||||
|
self._state = self._waiting_list_variable_index_state
|
||||||
|
elif char in self._identifiers:
|
||||||
|
self._state = self._internal_variable_start_state
|
||||||
|
|
||||||
|
def _is_list_variable(self):
|
||||||
|
return self._variable_chars[0] == '@'
|
||||||
|
|
||||||
|
def _internal_variable_start_state(self, char, index):
|
||||||
|
self._state = self._variable_state
|
||||||
|
if char == '{':
|
||||||
|
self._variable_chars.append(char)
|
||||||
|
self._open_curly += 1
|
||||||
|
self._may_have_internal_variables = True
|
||||||
|
else:
|
||||||
|
self._variable_state(char, index)
|
||||||
|
|
||||||
|
def _waiting_list_variable_index_state(self, char, index):
|
||||||
|
if char != '[':
|
||||||
|
raise StopIteration
|
||||||
|
self._list_variable_index_chars.append(char)
|
||||||
|
self._state = self._list_variable_index_state
|
||||||
|
|
||||||
|
def _list_variable_index_state(self, char, index):
|
||||||
|
self._list_variable_index_chars.append(char)
|
||||||
|
if char == ']':
|
||||||
|
raise StopIteration
|
File diff suppressed because it is too large
Load diff
2290
packages/wakatime/wakatime/packages/pygments2/lexers/agile.py
Normal file
2290
packages/wakatime/wakatime/packages/pygments2/lexers/agile.py
Normal file
File diff suppressed because it is too large
Load diff
3723
packages/wakatime/wakatime/packages/pygments2/lexers/compiled.py
Normal file
3723
packages/wakatime/wakatime/packages/pygments2/lexers/compiled.py
Normal file
File diff suppressed because it is too large
Load diff
671
packages/wakatime/wakatime/packages/pygments2/lexers/dotnet.py
Normal file
671
packages/wakatime/wakatime/packages/pygments2/lexers/dotnet.py
Normal file
|
@ -0,0 +1,671 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers.dotnet
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Lexers for .net languages.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
|
||||||
|
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
|
||||||
|
using, this
|
||||||
|
from pygments.token import Punctuation, \
|
||||||
|
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
|
||||||
|
from pygments.util import get_choice_opt
|
||||||
|
from pygments import unistring as uni
|
||||||
|
|
||||||
|
from pygments.lexers.web import XmlLexer
|
||||||
|
|
||||||
|
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
|
||||||
|
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
|
||||||
|
|
||||||
|
|
||||||
|
class CSharpLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
|
||||||
|
source code.
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`unicodelevel`
|
||||||
|
Determines which Unicode characters this lexer allows for identifiers.
|
||||||
|
The possible values are:
|
||||||
|
|
||||||
|
* ``none`` -- only the ASCII letters and numbers are allowed. This
|
||||||
|
is the fastest selection.
|
||||||
|
* ``basic`` -- all Unicode characters from the specification except
|
||||||
|
category ``Lo`` are allowed.
|
||||||
|
* ``full`` -- all Unicode characters as specified in the C# specs
|
||||||
|
are allowed. Note that this means a considerable slowdown since the
|
||||||
|
``Lo`` category has more than 40,000 characters in it!
|
||||||
|
|
||||||
|
The default value is ``basic``.
|
||||||
|
|
||||||
|
*New in Pygments 0.8.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'C#'
|
||||||
|
aliases = ['csharp', 'c#']
|
||||||
|
filenames = ['*.cs']
|
||||||
|
mimetypes = ['text/x-csharp'] # inferred
|
||||||
|
|
||||||
|
flags = re.MULTILINE | re.DOTALL | re.UNICODE
|
||||||
|
|
||||||
|
# for the range of allowed unicode characters in identifiers,
|
||||||
|
# see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
|
||||||
|
|
||||||
|
levels = {
|
||||||
|
'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
|
||||||
|
'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
|
||||||
|
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
|
||||||
|
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
|
||||||
|
'full': ('@?(?:_|[^' +
|
||||||
|
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
|
||||||
|
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
|
||||||
|
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens = {}
|
||||||
|
token_variants = True
|
||||||
|
|
||||||
|
for levelname, cs_ident in levels.items():
|
||||||
|
tokens[levelname] = {
|
||||||
|
'root': [
|
||||||
|
# method names
|
||||||
|
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
|
||||||
|
r'(' + cs_ident + ')' # method name
|
||||||
|
r'(\s*)(\()', # signature start
|
||||||
|
bygroups(using(this), Name.Function, Text, Punctuation)),
|
||||||
|
(r'^\s*\[.*?\]', Name.Attribute),
|
||||||
|
(r'[^\S\n]+', Text),
|
||||||
|
(r'\\\n', Text), # line continuation
|
||||||
|
(r'//.*?\n', Comment.Single),
|
||||||
|
(r'/[*].*?[*]/', Comment.Multiline),
|
||||||
|
(r'\n', Text),
|
||||||
|
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
|
||||||
|
(r'[{}]', Punctuation),
|
||||||
|
(r'@"(""|[^"])*"', String),
|
||||||
|
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
|
||||||
|
(r"'\\.'|'[^\\]'", String.Char),
|
||||||
|
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
|
||||||
|
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
|
||||||
|
(r'#[ \t]*(if|endif|else|elif|define|undef|'
|
||||||
|
r'line|error|warning|region|endregion|pragma)\b.*?\n',
|
||||||
|
Comment.Preproc),
|
||||||
|
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
|
||||||
|
Keyword)),
|
||||||
|
(r'(abstract|as|async|await|base|break|case|catch|'
|
||||||
|
r'checked|const|continue|default|delegate|'
|
||||||
|
r'do|else|enum|event|explicit|extern|false|finally|'
|
||||||
|
r'fixed|for|foreach|goto|if|implicit|in|interface|'
|
||||||
|
r'internal|is|lock|new|null|operator|'
|
||||||
|
r'out|override|params|private|protected|public|readonly|'
|
||||||
|
r'ref|return|sealed|sizeof|stackalloc|static|'
|
||||||
|
r'switch|this|throw|true|try|typeof|'
|
||||||
|
r'unchecked|unsafe|virtual|void|while|'
|
||||||
|
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
|
||||||
|
r'descending|from|group|into|orderby|select|where|'
|
||||||
|
r'join|equals)\b', Keyword),
|
||||||
|
(r'(global)(::)', bygroups(Keyword, Punctuation)),
|
||||||
|
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
|
||||||
|
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
|
||||||
|
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
|
||||||
|
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
|
||||||
|
(cs_ident, Name),
|
||||||
|
],
|
||||||
|
'class': [
|
||||||
|
(cs_ident, Name.Class, '#pop')
|
||||||
|
],
|
||||||
|
'namespace': [
|
||||||
|
(r'(?=\()', Text, '#pop'), # using (resource)
|
||||||
|
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic')
|
||||||
|
if level not in self._all_tokens:
|
||||||
|
# compile the regexes now
|
||||||
|
self._tokens = self.__class__.process_tokendef(level)
|
||||||
|
else:
|
||||||
|
self._tokens = self._all_tokens[level]
|
||||||
|
|
||||||
|
RegexLexer.__init__(self, **options)
|
||||||
|
|
||||||
|
|
||||||
|
class NemerleLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
For `Nemerle <http://nemerle.org>`_ source code.
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`unicodelevel`
|
||||||
|
Determines which Unicode characters this lexer allows for identifiers.
|
||||||
|
The possible values are:
|
||||||
|
|
||||||
|
* ``none`` -- only the ASCII letters and numbers are allowed. This
|
||||||
|
is the fastest selection.
|
||||||
|
* ``basic`` -- all Unicode characters from the specification except
|
||||||
|
category ``Lo`` are allowed.
|
||||||
|
* ``full`` -- all Unicode characters as specified in the C# specs
|
||||||
|
are allowed. Note that this means a considerable slowdown since the
|
||||||
|
``Lo`` category has more than 40,000 characters in it!
|
||||||
|
|
||||||
|
The default value is ``basic``.
|
||||||
|
|
||||||
|
*New in Pygments 1.5.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'Nemerle'
|
||||||
|
aliases = ['nemerle']
|
||||||
|
filenames = ['*.n']
|
||||||
|
mimetypes = ['text/x-nemerle'] # inferred
|
||||||
|
|
||||||
|
flags = re.MULTILINE | re.DOTALL | re.UNICODE
|
||||||
|
|
||||||
|
# for the range of allowed unicode characters in identifiers, see
|
||||||
|
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
|
||||||
|
|
||||||
|
levels = dict(
|
||||||
|
none = '@?[_a-zA-Z][a-zA-Z0-9_]*',
|
||||||
|
basic = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
|
||||||
|
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
|
||||||
|
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
|
||||||
|
full = ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo',
|
||||||
|
'Nl') + '])'
|
||||||
|
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
|
||||||
|
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
|
||||||
|
)
|
||||||
|
|
||||||
|
tokens = {}
|
||||||
|
token_variants = True
|
||||||
|
|
||||||
|
for levelname, cs_ident in levels.items():
|
||||||
|
tokens[levelname] = {
|
||||||
|
'root': [
|
||||||
|
# method names
|
||||||
|
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
|
||||||
|
r'(' + cs_ident + ')' # method name
|
||||||
|
r'(\s*)(\()', # signature start
|
||||||
|
bygroups(using(this), Name.Function, Text, Punctuation)),
|
||||||
|
(r'^\s*\[.*?\]', Name.Attribute),
|
||||||
|
(r'[^\S\n]+', Text),
|
||||||
|
(r'\\\n', Text), # line continuation
|
||||||
|
(r'//.*?\n', Comment.Single),
|
||||||
|
(r'/[*].*?[*]/', Comment.Multiline),
|
||||||
|
(r'\n', Text),
|
||||||
|
(r'\$\s*"', String, 'splice-string'),
|
||||||
|
(r'\$\s*<#', String, 'splice-string2'),
|
||||||
|
(r'<#', String, 'recursive-string'),
|
||||||
|
|
||||||
|
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
|
||||||
|
(r'\]\>', Keyword),
|
||||||
|
|
||||||
|
# quasiquotation only
|
||||||
|
(r'\$' + cs_ident, Name),
|
||||||
|
(r'(\$)(\()', bygroups(Name, Punctuation),
|
||||||
|
'splice-string-content'),
|
||||||
|
|
||||||
|
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
|
||||||
|
(r'[{}]', Punctuation),
|
||||||
|
(r'@"(""|[^"])*"', String),
|
||||||
|
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
|
||||||
|
(r"'\\.'|'[^\\]'", String.Char),
|
||||||
|
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
|
||||||
|
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
|
||||||
|
(r'#[ \t]*(if|endif|else|elif|define|undef|'
|
||||||
|
r'line|error|warning|region|endregion|pragma)\b.*?\n',
|
||||||
|
Comment.Preproc),
|
||||||
|
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
|
||||||
|
Keyword)),
|
||||||
|
(r'(abstract|and|as|base|catch|def|delegate|'
|
||||||
|
r'enum|event|extern|false|finally|'
|
||||||
|
r'fun|implements|interface|internal|'
|
||||||
|
r'is|macro|match|matches|module|mutable|new|'
|
||||||
|
r'null|out|override|params|partial|private|'
|
||||||
|
r'protected|public|ref|sealed|static|'
|
||||||
|
r'syntax|this|throw|true|try|type|typeof|'
|
||||||
|
r'virtual|volatile|when|where|with|'
|
||||||
|
r'assert|assert2|async|break|checked|continue|do|else|'
|
||||||
|
r'ensures|for|foreach|if|late|lock|new|nolate|'
|
||||||
|
r'otherwise|regexp|repeat|requires|return|surroundwith|'
|
||||||
|
r'unchecked|unless|using|while|yield)\b', Keyword),
|
||||||
|
(r'(global)(::)', bygroups(Keyword, Punctuation)),
|
||||||
|
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
|
||||||
|
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
|
||||||
|
Keyword.Type),
|
||||||
|
(r'(:>?)\s*(' + cs_ident + r'\??)',
|
||||||
|
bygroups(Punctuation, Keyword.Type)),
|
||||||
|
(r'(class|struct|variant|module)(\s+)',
|
||||||
|
bygroups(Keyword, Text), 'class'),
|
||||||
|
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
|
||||||
|
'namespace'),
|
||||||
|
(cs_ident, Name),
|
||||||
|
],
|
||||||
|
'class': [
|
||||||
|
(cs_ident, Name.Class, '#pop')
|
||||||
|
],
|
||||||
|
'namespace': [
|
||||||
|
(r'(?=\()', Text, '#pop'), # using (resource)
|
||||||
|
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
|
||||||
|
],
|
||||||
|
'splice-string': [
|
||||||
|
(r'[^"$]', String),
|
||||||
|
(r'\$' + cs_ident, Name),
|
||||||
|
(r'(\$)(\()', bygroups(Name, Punctuation),
|
||||||
|
'splice-string-content'),
|
||||||
|
(r'\\"', String),
|
||||||
|
(r'"', String, '#pop')
|
||||||
|
],
|
||||||
|
'splice-string2': [
|
||||||
|
(r'[^#<>$]', String),
|
||||||
|
(r'\$' + cs_ident, Name),
|
||||||
|
(r'(\$)(\()', bygroups(Name, Punctuation),
|
||||||
|
'splice-string-content'),
|
||||||
|
(r'<#', String, '#push'),
|
||||||
|
(r'#>', String, '#pop')
|
||||||
|
],
|
||||||
|
'recursive-string': [
|
||||||
|
(r'[^#<>]', String),
|
||||||
|
(r'<#', String, '#push'),
|
||||||
|
(r'#>', String, '#pop')
|
||||||
|
],
|
||||||
|
'splice-string-content': [
|
||||||
|
(r'if|match', Keyword),
|
||||||
|
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
|
||||||
|
(cs_ident, Name),
|
||||||
|
(r'\d+', Number),
|
||||||
|
(r'\(', Punctuation, '#push'),
|
||||||
|
(r'\)', Punctuation, '#pop')
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(),
|
||||||
|
'basic')
|
||||||
|
if level not in self._all_tokens:
|
||||||
|
# compile the regexes now
|
||||||
|
self._tokens = self.__class__.process_tokendef(level)
|
||||||
|
else:
|
||||||
|
self._tokens = self._all_tokens[level]
|
||||||
|
|
||||||
|
RegexLexer.__init__(self, **options)
|
||||||
|
|
||||||
|
|
||||||
|
class BooLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
For `Boo <http://boo.codehaus.org/>`_ source code.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'Boo'
|
||||||
|
aliases = ['boo']
|
||||||
|
filenames = ['*.boo']
|
||||||
|
mimetypes = ['text/x-boo']
|
||||||
|
|
||||||
|
tokens = {
|
||||||
|
'root': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'(#|//).*$', Comment.Single),
|
||||||
|
(r'/[*]', Comment.Multiline, 'comment'),
|
||||||
|
(r'[]{}:(),.;[]', Punctuation),
|
||||||
|
(r'\\\n', Text),
|
||||||
|
(r'\\', Text),
|
||||||
|
(r'(in|is|and|or|not)\b', Operator.Word),
|
||||||
|
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
|
||||||
|
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
|
||||||
|
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
|
||||||
|
(r'(as|abstract|callable|constructor|destructor|do|import|'
|
||||||
|
r'enum|event|final|get|interface|internal|of|override|'
|
||||||
|
r'partial|private|protected|public|return|set|static|'
|
||||||
|
r'struct|transient|virtual|yield|super|and|break|cast|'
|
||||||
|
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
|
||||||
|
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
|
||||||
|
r'while|from|as)\b', Keyword),
|
||||||
|
(r'def(?=\s+\(.*?\))', Keyword),
|
||||||
|
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
|
||||||
|
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
|
||||||
|
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
|
||||||
|
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
|
||||||
|
r'assert|checked|enumerate|filter|getter|len|lock|map|'
|
||||||
|
r'matrix|max|min|normalArrayIndexing|print|property|range|'
|
||||||
|
r'rawArrayIndexing|required|typeof|unchecked|using|'
|
||||||
|
r'yieldAll|zip)\b', Name.Builtin),
|
||||||
|
(r'"""(\\\\|\\"|.*?)"""', String.Double),
|
||||||
|
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
|
||||||
|
(r"'(\\\\|\\'|[^']*?)'", String.Single),
|
||||||
|
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
|
||||||
|
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
|
||||||
|
(r'[0-9][0-9\.]*(ms?|d|h|s)', Number),
|
||||||
|
(r'0\d+', Number.Oct),
|
||||||
|
(r'0x[a-fA-F0-9]+', Number.Hex),
|
||||||
|
(r'\d+L', Number.Integer.Long),
|
||||||
|
(r'\d+', Number.Integer),
|
||||||
|
],
|
||||||
|
'comment': [
|
||||||
|
('/[*]', Comment.Multiline, '#push'),
|
||||||
|
('[*]/', Comment.Multiline, '#pop'),
|
||||||
|
('[^/*]', Comment.Multiline),
|
||||||
|
('[*/]', Comment.Multiline)
|
||||||
|
],
|
||||||
|
'funcname': [
|
||||||
|
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
|
||||||
|
],
|
||||||
|
'classname': [
|
||||||
|
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
|
||||||
|
],
|
||||||
|
'namespace': [
|
||||||
|
('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class VbNetLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
For
|
||||||
|
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
|
||||||
|
source code.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'VB.net'
|
||||||
|
aliases = ['vb.net', 'vbnet']
|
||||||
|
filenames = ['*.vb', '*.bas']
|
||||||
|
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
|
||||||
|
|
||||||
|
flags = re.MULTILINE | re.IGNORECASE
|
||||||
|
tokens = {
|
||||||
|
'root': [
|
||||||
|
(r'^\s*<.*?>', Name.Attribute),
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'\n', Text),
|
||||||
|
(r'rem\b.*?\n', Comment),
|
||||||
|
(r"'.*?\n", Comment),
|
||||||
|
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#End\s+If|#Const|'
|
||||||
|
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
|
||||||
|
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
|
||||||
|
Comment.Preproc),
|
||||||
|
(r'[\(\){}!#,.:]', Punctuation),
|
||||||
|
(r'Option\s+(Strict|Explicit|Compare)\s+'
|
||||||
|
r'(On|Off|Binary|Text)', Keyword.Declaration),
|
||||||
|
(r'(?<!\.)(AddHandler|Alias|'
|
||||||
|
r'ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|'
|
||||||
|
r'CDec|CDbl|CInt|CLng|CObj|Continue|CSByte|CShort|'
|
||||||
|
r'CSng|CStr|CType|CUInt|CULng|CUShort|Declare|'
|
||||||
|
r'Default|Delegate|DirectCast|Do|Each|Else|ElseIf|'
|
||||||
|
r'EndIf|Erase|Error|Event|Exit|False|Finally|For|'
|
||||||
|
r'Friend|Get|Global|GoSub|GoTo|Handles|If|'
|
||||||
|
r'Implements|Inherits|Interface|'
|
||||||
|
r'Let|Lib|Loop|Me|MustInherit|'
|
||||||
|
r'MustOverride|MyBase|MyClass|Narrowing|New|Next|'
|
||||||
|
r'Not|Nothing|NotInheritable|NotOverridable|Of|On|'
|
||||||
|
r'Operator|Option|Optional|Overloads|Overridable|'
|
||||||
|
r'Overrides|ParamArray|Partial|Private|Protected|'
|
||||||
|
r'Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|'
|
||||||
|
r'Return|Select|Set|Shadows|Shared|Single|'
|
||||||
|
r'Static|Step|Stop|SyncLock|Then|'
|
||||||
|
r'Throw|To|True|Try|TryCast|Wend|'
|
||||||
|
r'Using|When|While|Widening|With|WithEvents|'
|
||||||
|
r'WriteOnly)\b', Keyword),
|
||||||
|
(r'(?<!\.)End\b', Keyword, 'end'),
|
||||||
|
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
|
||||||
|
(r'(?<!\.)(Function|Sub|Property)(\s+)',
|
||||||
|
bygroups(Keyword, Text), 'funcname'),
|
||||||
|
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
|
||||||
|
bygroups(Keyword, Text), 'classname'),
|
||||||
|
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
|
||||||
|
bygroups(Keyword, Text), 'namespace'),
|
||||||
|
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
|
||||||
|
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
|
||||||
|
r'UShort)\b', Keyword.Type),
|
||||||
|
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
|
||||||
|
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
|
||||||
|
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
|
||||||
|
r'<=|>=|<>|[-&*/\\^+=<>]',
|
||||||
|
Operator),
|
||||||
|
('"', String, 'string'),
|
||||||
|
('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name),
|
||||||
|
('#.*?#', Literal.Date),
|
||||||
|
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
|
||||||
|
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
|
||||||
|
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
|
||||||
|
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
|
||||||
|
(r'_\n', Text), # Line continuation
|
||||||
|
],
|
||||||
|
'string': [
|
||||||
|
(r'""', String),
|
||||||
|
(r'"C?', String, '#pop'),
|
||||||
|
(r'[^"]+', String),
|
||||||
|
],
|
||||||
|
'dim': [
|
||||||
|
(r'[a-z_][a-z0-9_]*', Name.Variable, '#pop'),
|
||||||
|
(r'', Text, '#pop'), # any other syntax
|
||||||
|
],
|
||||||
|
'funcname': [
|
||||||
|
(r'[a-z_][a-z0-9_]*', Name.Function, '#pop'),
|
||||||
|
],
|
||||||
|
'classname': [
|
||||||
|
(r'[a-z_][a-z0-9_]*', Name.Class, '#pop'),
|
||||||
|
],
|
||||||
|
'namespace': [
|
||||||
|
(r'[a-z_][a-z0-9_.]*', Name.Namespace, '#pop'),
|
||||||
|
],
|
||||||
|
'end': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
|
||||||
|
Keyword, '#pop'),
|
||||||
|
(r'', Text, '#pop'),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class GenericAspxLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
Lexer for ASP.NET pages.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'aspx-gen'
|
||||||
|
filenames = []
|
||||||
|
mimetypes = []
|
||||||
|
|
||||||
|
flags = re.DOTALL
|
||||||
|
|
||||||
|
tokens = {
|
||||||
|
'root': [
|
||||||
|
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
|
||||||
|
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
|
||||||
|
Other,
|
||||||
|
using(XmlLexer))),
|
||||||
|
(r'(.+?)(?=<)', using(XmlLexer)),
|
||||||
|
(r'.+', using(XmlLexer)),
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#TODO support multiple languages within the same source file
|
||||||
|
class CSharpAspxLexer(DelegatingLexer):
|
||||||
|
"""
|
||||||
|
Lexer for highligting C# within ASP.NET pages.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'aspx-cs'
|
||||||
|
aliases = ['aspx-cs']
|
||||||
|
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
|
||||||
|
mimetypes = []
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
super(CSharpAspxLexer, self).__init__(CSharpLexer,GenericAspxLexer,
|
||||||
|
**options)
|
||||||
|
|
||||||
|
def analyse_text(text):
|
||||||
|
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
|
||||||
|
return 0.2
|
||||||
|
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
|
||||||
|
return 0.15
|
||||||
|
|
||||||
|
|
||||||
|
class VbNetAspxLexer(DelegatingLexer):
|
||||||
|
"""
|
||||||
|
Lexer for highligting Visual Basic.net within ASP.NET pages.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'aspx-vb'
|
||||||
|
aliases = ['aspx-vb']
|
||||||
|
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
|
||||||
|
mimetypes = []
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
super(VbNetAspxLexer, self).__init__(VbNetLexer,GenericAspxLexer,
|
||||||
|
**options)
|
||||||
|
|
||||||
|
def analyse_text(text):
|
||||||
|
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
|
||||||
|
return 0.2
|
||||||
|
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
|
||||||
|
return 0.15
|
||||||
|
|
||||||
|
|
||||||
|
# Very close to functional.OcamlLexer
|
||||||
|
class FSharpLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
For the F# language (version 3.0).
|
||||||
|
|
||||||
|
*New in Pygments 1.5.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'FSharp'
|
||||||
|
aliases = ['fsharp']
|
||||||
|
filenames = ['*.fs', '*.fsi']
|
||||||
|
mimetypes = ['text/x-fsharp']
|
||||||
|
|
||||||
|
keywords = [
|
||||||
|
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
|
||||||
|
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
|
||||||
|
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
|
||||||
|
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
|
||||||
|
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
|
||||||
|
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
|
||||||
|
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
|
||||||
|
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
|
||||||
|
'while', 'with', 'yield!', 'yield',
|
||||||
|
]
|
||||||
|
# Reserved words; cannot hurt to color them as keywords too.
|
||||||
|
keywords += [
|
||||||
|
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
|
||||||
|
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
|
||||||
|
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
|
||||||
|
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
|
||||||
|
'virtual', 'volatile',
|
||||||
|
]
|
||||||
|
keyopts = [
|
||||||
|
'!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
|
||||||
|
'->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
|
||||||
|
'<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
|
||||||
|
'_', '`', '{', '\|\]', '\|', '}', '~', '<@@', '<@', '=', '@>', '@@>',
|
||||||
|
]
|
||||||
|
|
||||||
|
operators = r'[!$%&*+\./:<=>?@^|~-]'
|
||||||
|
word_operators = ['and', 'or', 'not']
|
||||||
|
prefix_syms = r'[!?~]'
|
||||||
|
infix_syms = r'[=<>@^|&+\*/$%-]'
|
||||||
|
primitives = [
|
||||||
|
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
|
||||||
|
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
|
||||||
|
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
|
||||||
|
'list', 'exn', 'obj', 'enum',
|
||||||
|
]
|
||||||
|
|
||||||
|
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
|
||||||
|
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
|
||||||
|
|
||||||
|
tokens = {
|
||||||
|
'escape-sequence': [
|
||||||
|
(r'\\[\\\"\'ntbrafv]', String.Escape),
|
||||||
|
(r'\\[0-9]{3}', String.Escape),
|
||||||
|
(r'\\u[0-9a-fA-F]{4}', String.Escape),
|
||||||
|
(r'\\U[0-9a-fA-F]{8}', String.Escape),
|
||||||
|
],
|
||||||
|
'root': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'\(\)|\[\]', Name.Builtin.Pseudo),
|
||||||
|
(r'\b(?<!\.)([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
|
||||||
|
Name.Namespace, 'dotted'),
|
||||||
|
(r'\b([A-Z][A-Za-z0-9_\']*)', Name),
|
||||||
|
(r'///.*?\n', String.Doc),
|
||||||
|
(r'//.*?\n', Comment.Single),
|
||||||
|
(r'\(\*(?!\))', Comment, 'comment'),
|
||||||
|
|
||||||
|
(r'@"', String, 'lstring'),
|
||||||
|
(r'"""', String, 'tqs'),
|
||||||
|
(r'"', String, 'string'),
|
||||||
|
|
||||||
|
(r'\b(open|module)(\s+)([a-zA-Z0-9_.]+)',
|
||||||
|
bygroups(Keyword, Text, Name.Namespace)),
|
||||||
|
(r'\b(let!?)(\s+)([a-zA-Z0-9_]+)',
|
||||||
|
bygroups(Keyword, Text, Name.Variable)),
|
||||||
|
(r'\b(type)(\s+)([a-zA-Z0-9_]+)',
|
||||||
|
bygroups(Keyword, Text, Name.Class)),
|
||||||
|
(r'\b(member|override)(\s+)([a-zA-Z0-9_]+)(\.)([a-zA-Z0-9_]+)',
|
||||||
|
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
|
||||||
|
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
|
||||||
|
(r'(%s)' % '|'.join(keyopts), Operator),
|
||||||
|
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
|
||||||
|
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
|
||||||
|
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
|
||||||
|
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
|
||||||
|
Comment.Preproc),
|
||||||
|
|
||||||
|
(r"[^\W\d][\w']*", Name),
|
||||||
|
|
||||||
|
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
|
||||||
|
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
|
||||||
|
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
|
||||||
|
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Binary),
|
||||||
|
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
|
||||||
|
Number.Float),
|
||||||
|
|
||||||
|
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
|
||||||
|
String.Char),
|
||||||
|
(r"'.'", String.Char),
|
||||||
|
(r"'", Keyword), # a stray quote is another syntax element
|
||||||
|
|
||||||
|
(r'[~?][a-z][\w\']*:', Name.Variable),
|
||||||
|
],
|
||||||
|
'dotted': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'\.', Punctuation),
|
||||||
|
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
|
||||||
|
(r'[A-Z][A-Za-z0-9_\']*', Name, '#pop'),
|
||||||
|
(r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
|
||||||
|
],
|
||||||
|
'comment': [
|
||||||
|
(r'[^(*)@"]+', Comment),
|
||||||
|
(r'\(\*', Comment, '#push'),
|
||||||
|
(r'\*\)', Comment, '#pop'),
|
||||||
|
# comments cannot be closed within strings in comments
|
||||||
|
(r'@"', String, 'lstring'),
|
||||||
|
(r'"""', String, 'tqs'),
|
||||||
|
(r'"', String, 'string'),
|
||||||
|
(r'[(*)@]', Comment),
|
||||||
|
],
|
||||||
|
'string': [
|
||||||
|
(r'[^\\"]+', String),
|
||||||
|
include('escape-sequence'),
|
||||||
|
(r'\\\n', String),
|
||||||
|
(r'\n', String), # newlines are allowed in any string
|
||||||
|
(r'"B?', String, '#pop'),
|
||||||
|
],
|
||||||
|
'lstring': [
|
||||||
|
(r'[^"]+', String),
|
||||||
|
(r'\n', String),
|
||||||
|
(r'""', String),
|
||||||
|
(r'"B?', String, '#pop'),
|
||||||
|
],
|
||||||
|
'tqs': [
|
||||||
|
(r'[^"]+', String),
|
||||||
|
(r'\n', String),
|
||||||
|
(r'"""B?', String, '#pop'),
|
||||||
|
(r'"', String),
|
||||||
|
],
|
||||||
|
}
|
2731
packages/wakatime/wakatime/packages/pygments2/lexers/functional.py
Normal file
2731
packages/wakatime/wakatime/packages/pygments2/lexers/functional.py
Normal file
File diff suppressed because it is too large
Load diff
1112
packages/wakatime/wakatime/packages/pygments2/lexers/jvm.py
Normal file
1112
packages/wakatime/wakatime/packages/pygments2/lexers/jvm.py
Normal file
File diff suppressed because it is too large
Load diff
3778
packages/wakatime/wakatime/packages/pygments2/lexers/other.py
Normal file
3778
packages/wakatime/wakatime/packages/pygments2/lexers/other.py
Normal file
File diff suppressed because it is too large
Load diff
100
packages/wakatime/wakatime/packages/pygments2/lexers/special.py
Normal file
100
packages/wakatime/wakatime/packages/pygments2/lexers/special.py
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers.special
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Special lexers.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import cStringIO
|
||||||
|
|
||||||
|
from pygments.lexer import Lexer
|
||||||
|
from pygments.token import Token, Error, Text
|
||||||
|
from pygments.util import get_choice_opt, b
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['TextLexer', 'RawTokenLexer']
|
||||||
|
|
||||||
|
|
||||||
|
class TextLexer(Lexer):
|
||||||
|
"""
|
||||||
|
"Null" lexer, doesn't highlight anything.
|
||||||
|
"""
|
||||||
|
name = 'Text only'
|
||||||
|
aliases = ['text']
|
||||||
|
filenames = ['*.txt']
|
||||||
|
mimetypes = ['text/plain']
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, text):
|
||||||
|
yield 0, Text, text
|
||||||
|
|
||||||
|
|
||||||
|
_ttype_cache = {}
|
||||||
|
|
||||||
|
line_re = re.compile(b('.*?\n'))
|
||||||
|
|
||||||
|
class RawTokenLexer(Lexer):
|
||||||
|
"""
|
||||||
|
Recreate a token stream formatted with the `RawTokenFormatter`. This
|
||||||
|
lexer raises exceptions during parsing if the token stream in the
|
||||||
|
file is malformed.
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`compress`
|
||||||
|
If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
|
||||||
|
the given compression algorithm before lexing (default: ``""``).
|
||||||
|
"""
|
||||||
|
name = 'Raw token data'
|
||||||
|
aliases = ['raw']
|
||||||
|
filenames = []
|
||||||
|
mimetypes = ['application/x-pygments-tokens']
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
self.compress = get_choice_opt(options, 'compress',
|
||||||
|
['', 'none', 'gz', 'bz2'], '')
|
||||||
|
Lexer.__init__(self, **options)
|
||||||
|
|
||||||
|
def get_tokens(self, text):
|
||||||
|
if isinstance(text, unicode):
|
||||||
|
# raw token stream never has any non-ASCII characters
|
||||||
|
text = text.encode('ascii')
|
||||||
|
if self.compress == 'gz':
|
||||||
|
import gzip
|
||||||
|
gzipfile = gzip.GzipFile('', 'rb', 9, cStringIO.StringIO(text))
|
||||||
|
text = gzipfile.read()
|
||||||
|
elif self.compress == 'bz2':
|
||||||
|
import bz2
|
||||||
|
text = bz2.decompress(text)
|
||||||
|
|
||||||
|
# do not call Lexer.get_tokens() because we do not want Unicode
|
||||||
|
# decoding to occur, and stripping is not optional.
|
||||||
|
text = text.strip(b('\n')) + b('\n')
|
||||||
|
for i, t, v in self.get_tokens_unprocessed(text):
|
||||||
|
yield t, v
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, text):
|
||||||
|
length = 0
|
||||||
|
for match in line_re.finditer(text):
|
||||||
|
try:
|
||||||
|
ttypestr, val = match.group().split(b('\t'), 1)
|
||||||
|
except ValueError:
|
||||||
|
val = match.group().decode(self.encoding)
|
||||||
|
ttype = Error
|
||||||
|
else:
|
||||||
|
ttype = _ttype_cache.get(ttypestr)
|
||||||
|
if not ttype:
|
||||||
|
ttype = Token
|
||||||
|
ttypes = ttypestr.split('.')[1:]
|
||||||
|
for ttype_ in ttypes:
|
||||||
|
if not ttype_ or not ttype_[0].isupper():
|
||||||
|
raise ValueError('malformed token name')
|
||||||
|
ttype = getattr(ttype, ttype_)
|
||||||
|
_ttype_cache[ttypestr] = ttype
|
||||||
|
val = val[2:-2].decode('unicode-escape')
|
||||||
|
yield length, ttype, val
|
||||||
|
length += len(val)
|
559
packages/wakatime/wakatime/packages/pygments2/lexers/sql.py
Normal file
559
packages/wakatime/wakatime/packages/pygments2/lexers/sql.py
Normal file
|
@ -0,0 +1,559 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers.sql
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Lexers for various SQL dialects and related interactive sessions.
|
||||||
|
|
||||||
|
Postgres specific lexers:
|
||||||
|
|
||||||
|
`PostgresLexer`
|
||||||
|
A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL
|
||||||
|
lexer are:
|
||||||
|
|
||||||
|
- keywords and data types list parsed from the PG docs (run the
|
||||||
|
`_postgres_builtins` module to update them);
|
||||||
|
- Content of $-strings parsed using a specific lexer, e.g. the content
|
||||||
|
of a PL/Python function is parsed using the Python lexer;
|
||||||
|
- parse PG specific constructs: E-strings, $-strings, U&-strings,
|
||||||
|
different operators and punctuation.
|
||||||
|
|
||||||
|
`PlPgsqlLexer`
|
||||||
|
A lexer for the PL/pgSQL language. Adds a few specific construct on
|
||||||
|
top of the PG SQL lexer (such as <<label>>).
|
||||||
|
|
||||||
|
`PostgresConsoleLexer`
|
||||||
|
A lexer to highlight an interactive psql session:
|
||||||
|
|
||||||
|
- identifies the prompt and does its best to detect the end of command
|
||||||
|
in multiline statement where not all the lines are prefixed by a
|
||||||
|
prompt, telling them apart from the output;
|
||||||
|
- highlights errors in the output and notification levels;
|
||||||
|
- handles psql backslash commands.
|
||||||
|
|
||||||
|
The ``tests/examplefiles`` contains a few test files with data to be
|
||||||
|
parsed by these lexers.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups
|
||||||
|
from pygments.token import Punctuation, \
|
||||||
|
Text, Comment, Operator, Keyword, Name, String, Number, Generic
|
||||||
|
from pygments.lexers import get_lexer_by_name, ClassNotFound
|
||||||
|
|
||||||
|
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
|
||||||
|
PSEUDO_TYPES, PLPGSQL_KEYWORDS
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer',
|
||||||
|
'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer']
|
||||||
|
|
||||||
|
line_re = re.compile('.*?\n')
|
||||||
|
|
||||||
|
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
|
||||||
|
|
||||||
|
def language_callback(lexer, match):
|
||||||
|
"""Parse the content of a $-string using a lexer
|
||||||
|
|
||||||
|
The lexer is chosen looking for a nearby LANGUAGE.
|
||||||
|
"""
|
||||||
|
l = None
|
||||||
|
m = language_re.match(lexer.text[match.end():match.end()+100])
|
||||||
|
if m is not None:
|
||||||
|
l = lexer._get_lexer(m.group(1))
|
||||||
|
else:
|
||||||
|
m = list(language_re.finditer(
|
||||||
|
lexer.text[max(0, match.start()-100):match.start()]))
|
||||||
|
if m:
|
||||||
|
l = lexer._get_lexer(m[-1].group(1))
|
||||||
|
|
||||||
|
if l:
|
||||||
|
yield (match.start(1), String, match.group(1))
|
||||||
|
for x in l.get_tokens_unprocessed(match.group(2)):
|
||||||
|
yield x
|
||||||
|
yield (match.start(3), String, match.group(3))
|
||||||
|
|
||||||
|
else:
|
||||||
|
yield (match.start(), String, match.group())
|
||||||
|
|
||||||
|
|
||||||
|
class PostgresBase(object):
|
||||||
|
"""Base class for Postgres-related lexers.
|
||||||
|
|
||||||
|
This is implemented as a mixin to avoid the Lexer metaclass kicking in.
|
||||||
|
this way the different lexer don't have a common Lexer ancestor. If they
|
||||||
|
had, _tokens could be created on this ancestor and not updated for the
|
||||||
|
other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming
|
||||||
|
seem to suggest that regexp lexers are not really subclassable.
|
||||||
|
"""
|
||||||
|
def get_tokens_unprocessed(self, text, *args):
|
||||||
|
# Have a copy of the entire text to be used by `language_callback`.
|
||||||
|
self.text = text
|
||||||
|
for x in super(PostgresBase, self).get_tokens_unprocessed(
|
||||||
|
text, *args):
|
||||||
|
yield x
|
||||||
|
|
||||||
|
def _get_lexer(self, lang):
|
||||||
|
if lang.lower() == 'sql':
|
||||||
|
return get_lexer_by_name('postgresql', **self.options)
|
||||||
|
|
||||||
|
tries = [ lang ]
|
||||||
|
if lang.startswith('pl'):
|
||||||
|
tries.append(lang[2:])
|
||||||
|
if lang.endswith('u'):
|
||||||
|
tries.append(lang[:-1])
|
||||||
|
if lang.startswith('pl') and lang.endswith('u'):
|
||||||
|
tries.append(lang[2:-1])
|
||||||
|
|
||||||
|
for l in tries:
|
||||||
|
try:
|
||||||
|
return get_lexer_by_name(l, **self.options)
|
||||||
|
except ClassNotFound:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# TODO: better logging
|
||||||
|
# print >>sys.stderr, "language not found:", lang
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class PostgresLexer(PostgresBase, RegexLexer):
|
||||||
|
"""
|
||||||
|
Lexer for the PostgreSQL dialect of SQL.
|
||||||
|
|
||||||
|
*New in Pygments 1.5.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'PostgreSQL SQL dialect'
|
||||||
|
aliases = ['postgresql', 'postgres']
|
||||||
|
mimetypes = ['text/x-postgresql']
|
||||||
|
|
||||||
|
flags = re.IGNORECASE
|
||||||
|
tokens = {
|
||||||
|
'root': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'--.*?\n', Comment.Single),
|
||||||
|
(r'/\*', Comment.Multiline, 'multiline-comments'),
|
||||||
|
(r'(' + '|'.join([s.replace(" ", "\s+")
|
||||||
|
for s in DATATYPES + PSEUDO_TYPES])
|
||||||
|
+ r')\b', Name.Builtin),
|
||||||
|
(r'(' + '|'.join(KEYWORDS) + r')\b', Keyword),
|
||||||
|
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
|
||||||
|
(r'::', Operator), # cast
|
||||||
|
(r'\$\d+', Name.Variable),
|
||||||
|
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
|
||||||
|
(r'[0-9]+', Number.Integer),
|
||||||
|
(r"(E|U&)?'(''|[^'])*'", String.Single),
|
||||||
|
(r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier
|
||||||
|
(r'(?s)(\$[^\$]*\$)(.*?)(\1)', language_callback),
|
||||||
|
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
|
||||||
|
|
||||||
|
# psql variable in SQL
|
||||||
|
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
|
||||||
|
|
||||||
|
(r'[;:()\[\]\{\},\.]', Punctuation),
|
||||||
|
],
|
||||||
|
'multiline-comments': [
|
||||||
|
(r'/\*', Comment.Multiline, 'multiline-comments'),
|
||||||
|
(r'\*/', Comment.Multiline, '#pop'),
|
||||||
|
(r'[^/\*]+', Comment.Multiline),
|
||||||
|
(r'[/*]', Comment.Multiline)
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class PlPgsqlLexer(PostgresBase, RegexLexer):
|
||||||
|
"""
|
||||||
|
Handle the extra syntax in Pl/pgSQL language.
|
||||||
|
|
||||||
|
*New in Pygments 1.5.*
|
||||||
|
"""
|
||||||
|
name = 'PL/pgSQL'
|
||||||
|
aliases = ['plpgsql']
|
||||||
|
mimetypes = ['text/x-plpgsql']
|
||||||
|
|
||||||
|
flags = re.IGNORECASE
|
||||||
|
tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
|
||||||
|
|
||||||
|
# extend the keywords list
|
||||||
|
for i, pattern in enumerate(tokens['root']):
|
||||||
|
if pattern[1] == Keyword:
|
||||||
|
tokens['root'][i] = (
|
||||||
|
r'(' + '|'.join(KEYWORDS + PLPGSQL_KEYWORDS) + r')\b',
|
||||||
|
Keyword)
|
||||||
|
del i
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert 0, "SQL keywords not found"
|
||||||
|
|
||||||
|
# Add specific PL/pgSQL rules (before the SQL ones)
|
||||||
|
tokens['root'][:0] = [
|
||||||
|
(r'\%[a-z][a-z0-9_]*\b', Name.Builtin), # actually, a datatype
|
||||||
|
(r':=', Operator),
|
||||||
|
(r'\<\<[a-z][a-z0-9_]*\>\>', Name.Label),
|
||||||
|
(r'\#[a-z][a-z0-9_]*\b', Keyword.Pseudo), # #variable_conflict
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class PsqlRegexLexer(PostgresBase, RegexLexer):
|
||||||
|
"""
|
||||||
|
Extend the PostgresLexer adding support specific for psql commands.
|
||||||
|
|
||||||
|
This is not a complete psql lexer yet as it lacks prompt support
|
||||||
|
and output rendering.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'PostgreSQL console - regexp based lexer'
|
||||||
|
aliases = [] # not public
|
||||||
|
|
||||||
|
flags = re.IGNORECASE
|
||||||
|
tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
|
||||||
|
|
||||||
|
tokens['root'].append(
|
||||||
|
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
|
||||||
|
tokens['psql-command'] = [
|
||||||
|
(r'\n', Text, 'root'),
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'\\[^\s]+', Keyword.Pseudo),
|
||||||
|
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
|
||||||
|
(r"'(''|[^'])*'", String.Single),
|
||||||
|
(r"`([^`])*`", String.Backtick),
|
||||||
|
(r"[^\s]+", String.Symbol),
|
||||||
|
]
|
||||||
|
|
||||||
|
re_prompt = re.compile(r'^(\S.*?)??[=\-\(\$\'\"][#>]')
|
||||||
|
re_psql_command = re.compile(r'\s*\\')
|
||||||
|
re_end_command = re.compile(r';\s*(--.*?)?$')
|
||||||
|
re_psql_command = re.compile(r'(\s*)(\\.+?)(\s+)$')
|
||||||
|
re_error = re.compile(r'(ERROR|FATAL):')
|
||||||
|
re_message = re.compile(
|
||||||
|
r'((?:DEBUG|INFO|NOTICE|WARNING|ERROR|'
|
||||||
|
r'FATAL|HINT|DETAIL|CONTEXT|LINE [0-9]+):)(.*?\n)')
|
||||||
|
|
||||||
|
|
||||||
|
class lookahead(object):
|
||||||
|
"""Wrap an iterator and allow pushing back an item."""
|
||||||
|
def __init__(self, x):
|
||||||
|
self.iter = iter(x)
|
||||||
|
self._nextitem = None
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
def send(self, i):
|
||||||
|
self._nextitem = i
|
||||||
|
return i
|
||||||
|
def next(self):
|
||||||
|
if self._nextitem is not None:
|
||||||
|
ni = self._nextitem
|
||||||
|
self._nextitem = None
|
||||||
|
return ni
|
||||||
|
return self.iter.next()
|
||||||
|
|
||||||
|
|
||||||
|
class PostgresConsoleLexer(Lexer):
|
||||||
|
"""
|
||||||
|
Lexer for psql sessions.
|
||||||
|
|
||||||
|
*New in Pygments 1.5.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'PostgreSQL console (psql)'
|
||||||
|
aliases = ['psql', 'postgresql-console', 'postgres-console']
|
||||||
|
mimetypes = ['text/x-postgresql-psql']
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, data):
|
||||||
|
sql = PsqlRegexLexer(**self.options)
|
||||||
|
|
||||||
|
lines = lookahead(line_re.findall(data))
|
||||||
|
|
||||||
|
# prompt-output cycle
|
||||||
|
while 1:
|
||||||
|
|
||||||
|
# consume the lines of the command: start with an optional prompt
|
||||||
|
# and continue until the end of command is detected
|
||||||
|
curcode = ''
|
||||||
|
insertions = []
|
||||||
|
while 1:
|
||||||
|
try:
|
||||||
|
line = lines.next()
|
||||||
|
except StopIteration:
|
||||||
|
# allow the emission of partially collected items
|
||||||
|
# the repl loop will be broken below
|
||||||
|
break
|
||||||
|
|
||||||
|
# Identify a shell prompt in case of psql commandline example
|
||||||
|
if line.startswith('$') and not curcode:
|
||||||
|
lexer = get_lexer_by_name('console', **self.options)
|
||||||
|
for x in lexer.get_tokens_unprocessed(line):
|
||||||
|
yield x
|
||||||
|
break
|
||||||
|
|
||||||
|
# Identify a psql prompt
|
||||||
|
mprompt = re_prompt.match(line)
|
||||||
|
if mprompt is not None:
|
||||||
|
insertions.append((len(curcode),
|
||||||
|
[(0, Generic.Prompt, mprompt.group())]))
|
||||||
|
curcode += line[len(mprompt.group()):]
|
||||||
|
else:
|
||||||
|
curcode += line
|
||||||
|
|
||||||
|
# Check if this is the end of the command
|
||||||
|
# TODO: better handle multiline comments at the end with
|
||||||
|
# a lexer with an external state?
|
||||||
|
if re_psql_command.match(curcode) \
|
||||||
|
or re_end_command.search(curcode):
|
||||||
|
break
|
||||||
|
|
||||||
|
# Emit the combined stream of command and prompt(s)
|
||||||
|
for item in do_insertions(insertions,
|
||||||
|
sql.get_tokens_unprocessed(curcode)):
|
||||||
|
yield item
|
||||||
|
|
||||||
|
# Emit the output lines
|
||||||
|
out_token = Generic.Output
|
||||||
|
while 1:
|
||||||
|
line = lines.next()
|
||||||
|
mprompt = re_prompt.match(line)
|
||||||
|
if mprompt is not None:
|
||||||
|
# push the line back to have it processed by the prompt
|
||||||
|
lines.send(line)
|
||||||
|
break
|
||||||
|
|
||||||
|
mmsg = re_message.match(line)
|
||||||
|
if mmsg is not None:
|
||||||
|
if mmsg.group(1).startswith("ERROR") \
|
||||||
|
or mmsg.group(1).startswith("FATAL"):
|
||||||
|
out_token = Generic.Error
|
||||||
|
yield (mmsg.start(1), Generic.Strong, mmsg.group(1))
|
||||||
|
yield (mmsg.start(2), out_token, mmsg.group(2))
|
||||||
|
else:
|
||||||
|
yield (0, out_token, line)
|
||||||
|
|
||||||
|
|
||||||
|
class SqlLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
Lexer for Structured Query Language. Currently, this lexer does
|
||||||
|
not recognize any special syntax except ANSI SQL.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'SQL'
|
||||||
|
aliases = ['sql']
|
||||||
|
filenames = ['*.sql']
|
||||||
|
mimetypes = ['text/x-sql']
|
||||||
|
|
||||||
|
flags = re.IGNORECASE
|
||||||
|
tokens = {
|
||||||
|
'root': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'--.*?\n', Comment.Single),
|
||||||
|
(r'/\*', Comment.Multiline, 'multiline-comments'),
|
||||||
|
(r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|'
|
||||||
|
r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|'
|
||||||
|
r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|'
|
||||||
|
r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|'
|
||||||
|
r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|'
|
||||||
|
r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|'
|
||||||
|
r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|'
|
||||||
|
r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|'
|
||||||
|
r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|'
|
||||||
|
r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|'
|
||||||
|
r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|'
|
||||||
|
r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|'
|
||||||
|
r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|'
|
||||||
|
r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|'
|
||||||
|
r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|'
|
||||||
|
r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|'
|
||||||
|
r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|'
|
||||||
|
r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|'
|
||||||
|
r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|'
|
||||||
|
r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|'
|
||||||
|
r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|'
|
||||||
|
r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|'
|
||||||
|
r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|'
|
||||||
|
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
|
||||||
|
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
|
||||||
|
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
|
||||||
|
r'EXCEPTION|EXCEPT|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
|
||||||
|
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
|
||||||
|
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
|
||||||
|
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
|
||||||
|
r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|'
|
||||||
|
r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|'
|
||||||
|
r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|'
|
||||||
|
r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|'
|
||||||
|
r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|'
|
||||||
|
r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|'
|
||||||
|
r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|'
|
||||||
|
r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|'
|
||||||
|
r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|'
|
||||||
|
r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|'
|
||||||
|
r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|'
|
||||||
|
r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|'
|
||||||
|
r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|'
|
||||||
|
r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|'
|
||||||
|
r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|'
|
||||||
|
r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|'
|
||||||
|
r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|'
|
||||||
|
r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|'
|
||||||
|
r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|'
|
||||||
|
r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|'
|
||||||
|
r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|'
|
||||||
|
r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|'
|
||||||
|
r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|'
|
||||||
|
r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|'
|
||||||
|
r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|'
|
||||||
|
r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|'
|
||||||
|
r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|'
|
||||||
|
r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|'
|
||||||
|
r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|'
|
||||||
|
r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|'
|
||||||
|
r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|'
|
||||||
|
r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|'
|
||||||
|
r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|'
|
||||||
|
r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|'
|
||||||
|
r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|'
|
||||||
|
r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|'
|
||||||
|
r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|'
|
||||||
|
r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|'
|
||||||
|
r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|'
|
||||||
|
r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|'
|
||||||
|
r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|'
|
||||||
|
r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|'
|
||||||
|
r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|'
|
||||||
|
r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|'
|
||||||
|
r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword),
|
||||||
|
(r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|'
|
||||||
|
r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|'
|
||||||
|
r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b',
|
||||||
|
Name.Builtin),
|
||||||
|
(r'[+*/<>=~!@#%^&|`?-]', Operator),
|
||||||
|
(r'[0-9]+', Number.Integer),
|
||||||
|
# TODO: Backslash escapes?
|
||||||
|
(r"'(''|[^'])*'", String.Single),
|
||||||
|
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
|
||||||
|
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
|
||||||
|
(r'[;:()\[\],\.]', Punctuation)
|
||||||
|
],
|
||||||
|
'multiline-comments': [
|
||||||
|
(r'/\*', Comment.Multiline, 'multiline-comments'),
|
||||||
|
(r'\*/', Comment.Multiline, '#pop'),
|
||||||
|
(r'[^/\*]+', Comment.Multiline),
|
||||||
|
(r'[/*]', Comment.Multiline)
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class MySqlLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
Special lexer for MySQL.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'MySQL'
|
||||||
|
aliases = ['mysql']
|
||||||
|
mimetypes = ['text/x-mysql']
|
||||||
|
|
||||||
|
flags = re.IGNORECASE
|
||||||
|
tokens = {
|
||||||
|
'root': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r'(#|--\s+).*?\n', Comment.Single),
|
||||||
|
(r'/\*', Comment.Multiline, 'multiline-comments'),
|
||||||
|
(r'[0-9]+', Number.Integer),
|
||||||
|
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
|
||||||
|
# TODO: add backslash escapes
|
||||||
|
(r"'(''|[^'])*'", String.Single),
|
||||||
|
(r'"(""|[^"])*"', String.Double),
|
||||||
|
(r"`(``|[^`])*`", String.Symbol),
|
||||||
|
(r'[+*/<>=~!@#%^&|`?-]', Operator),
|
||||||
|
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
|
||||||
|
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
|
||||||
|
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
|
||||||
|
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
|
||||||
|
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
|
||||||
|
bygroups(Keyword.Type, Text, Punctuation)),
|
||||||
|
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
|
||||||
|
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
|
||||||
|
r'character|check|collate|column|condition|constraint|continue|'
|
||||||
|
r'convert|create|cross|current_date|current_time|'
|
||||||
|
r'current_timestamp|current_user|cursor|database|databases|'
|
||||||
|
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
|
||||||
|
r'declare|default|delayed|delete|desc|describe|deterministic|'
|
||||||
|
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
|
||||||
|
r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8'
|
||||||
|
r'|for|force|foreign|from|fulltext|grant|group|having|'
|
||||||
|
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
|
||||||
|
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
|
||||||
|
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
|
||||||
|
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
|
||||||
|
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
|
||||||
|
r'minute_microsecond|minute_second|mod|modifies|natural|'
|
||||||
|
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
|
||||||
|
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
|
||||||
|
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
|
||||||
|
r'replace|require|restrict|return|revoke|right|rlike|schema|'
|
||||||
|
r'schemas|second_microsecond|select|sensitive|separator|set|'
|
||||||
|
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
|
||||||
|
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
|
||||||
|
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
|
||||||
|
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
|
||||||
|
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
|
||||||
|
r'varying|when|where|while|with|write|x509|xor|year_month|'
|
||||||
|
r'zerofill)\b', Keyword),
|
||||||
|
# TODO: this list is not complete
|
||||||
|
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
|
||||||
|
(r'(true|false|null)', Name.Constant),
|
||||||
|
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
|
||||||
|
bygroups(Name.Function, Text, Punctuation)),
|
||||||
|
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
|
||||||
|
(r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable),
|
||||||
|
(r'[;:()\[\],\.]', Punctuation)
|
||||||
|
],
|
||||||
|
'multiline-comments': [
|
||||||
|
(r'/\*', Comment.Multiline, 'multiline-comments'),
|
||||||
|
(r'\*/', Comment.Multiline, '#pop'),
|
||||||
|
(r'[^/\*]+', Comment.Multiline),
|
||||||
|
(r'[/*]', Comment.Multiline)
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SqliteConsoleLexer(Lexer):
|
||||||
|
"""
|
||||||
|
Lexer for example sessions using sqlite3.
|
||||||
|
|
||||||
|
*New in Pygments 0.11.*
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'sqlite3con'
|
||||||
|
aliases = ['sqlite3']
|
||||||
|
filenames = ['*.sqlite3-console']
|
||||||
|
mimetypes = ['text/x-sqlite3-console']
|
||||||
|
|
||||||
|
def get_tokens_unprocessed(self, data):
|
||||||
|
sql = SqlLexer(**self.options)
|
||||||
|
|
||||||
|
curcode = ''
|
||||||
|
insertions = []
|
||||||
|
for match in line_re.finditer(data):
|
||||||
|
line = match.group()
|
||||||
|
if line.startswith('sqlite> ') or line.startswith(' ...> '):
|
||||||
|
insertions.append((len(curcode),
|
||||||
|
[(0, Generic.Prompt, line[:8])]))
|
||||||
|
curcode += line[8:]
|
||||||
|
else:
|
||||||
|
if curcode:
|
||||||
|
for item in do_insertions(insertions,
|
||||||
|
sql.get_tokens_unprocessed(curcode)):
|
||||||
|
yield item
|
||||||
|
curcode = ''
|
||||||
|
insertions = []
|
||||||
|
if line.startswith('SQL error: '):
|
||||||
|
yield (match.start(), Generic.Traceback, line)
|
||||||
|
else:
|
||||||
|
yield (match.start(), Generic.Output, line)
|
||||||
|
if curcode:
|
||||||
|
for item in do_insertions(insertions,
|
||||||
|
sql.get_tokens_unprocessed(curcode)):
|
||||||
|
yield item
|
1893
packages/wakatime/wakatime/packages/pygments2/lexers/text.py
Normal file
1893
packages/wakatime/wakatime/packages/pygments2/lexers/text.py
Normal file
File diff suppressed because it is too large
Load diff
4045
packages/wakatime/wakatime/packages/pygments2/lexers/web.py
Normal file
4045
packages/wakatime/wakatime/packages/pygments2/lexers/web.py
Normal file
File diff suppressed because it is too large
Load diff
117
packages/wakatime/wakatime/packages/pygments2/style.py
Normal file
117
packages/wakatime/wakatime/packages/pygments2/style.py
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.style
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Basic style object.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pygments.token import Token, STANDARD_TYPES
|
||||||
|
|
||||||
|
|
||||||
|
class StyleMeta(type):
|
||||||
|
|
||||||
|
def __new__(mcs, name, bases, dct):
|
||||||
|
obj = type.__new__(mcs, name, bases, dct)
|
||||||
|
for token in STANDARD_TYPES:
|
||||||
|
if token not in obj.styles:
|
||||||
|
obj.styles[token] = ''
|
||||||
|
|
||||||
|
def colorformat(text):
|
||||||
|
if text[0:1] == '#':
|
||||||
|
col = text[1:]
|
||||||
|
if len(col) == 6:
|
||||||
|
return col
|
||||||
|
elif len(col) == 3:
|
||||||
|
return col[0]*2 + col[1]*2 + col[2]*2
|
||||||
|
elif text == '':
|
||||||
|
return ''
|
||||||
|
assert False, "wrong color format %r" % text
|
||||||
|
|
||||||
|
_styles = obj._styles = {}
|
||||||
|
|
||||||
|
for ttype in obj.styles:
|
||||||
|
for token in ttype.split():
|
||||||
|
if token in _styles:
|
||||||
|
continue
|
||||||
|
ndef = _styles.get(token.parent, None)
|
||||||
|
styledefs = obj.styles.get(token, '').split()
|
||||||
|
if not ndef or token is None:
|
||||||
|
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
|
||||||
|
elif 'noinherit' in styledefs and token is not Token:
|
||||||
|
ndef = _styles[Token][:]
|
||||||
|
else:
|
||||||
|
ndef = ndef[:]
|
||||||
|
_styles[token] = ndef
|
||||||
|
for styledef in obj.styles.get(token, '').split():
|
||||||
|
if styledef == 'noinherit':
|
||||||
|
pass
|
||||||
|
elif styledef == 'bold':
|
||||||
|
ndef[1] = 1
|
||||||
|
elif styledef == 'nobold':
|
||||||
|
ndef[1] = 0
|
||||||
|
elif styledef == 'italic':
|
||||||
|
ndef[2] = 1
|
||||||
|
elif styledef == 'noitalic':
|
||||||
|
ndef[2] = 0
|
||||||
|
elif styledef == 'underline':
|
||||||
|
ndef[3] = 1
|
||||||
|
elif styledef == 'nounderline':
|
||||||
|
ndef[3] = 0
|
||||||
|
elif styledef[:3] == 'bg:':
|
||||||
|
ndef[4] = colorformat(styledef[3:])
|
||||||
|
elif styledef[:7] == 'border:':
|
||||||
|
ndef[5] = colorformat(styledef[7:])
|
||||||
|
elif styledef == 'roman':
|
||||||
|
ndef[6] = 1
|
||||||
|
elif styledef == 'sans':
|
||||||
|
ndef[7] = 1
|
||||||
|
elif styledef == 'mono':
|
||||||
|
ndef[8] = 1
|
||||||
|
else:
|
||||||
|
ndef[0] = colorformat(styledef)
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def style_for_token(cls, token):
|
||||||
|
t = cls._styles[token]
|
||||||
|
return {
|
||||||
|
'color': t[0] or None,
|
||||||
|
'bold': bool(t[1]),
|
||||||
|
'italic': bool(t[2]),
|
||||||
|
'underline': bool(t[3]),
|
||||||
|
'bgcolor': t[4] or None,
|
||||||
|
'border': t[5] or None,
|
||||||
|
'roman': bool(t[6]) or None,
|
||||||
|
'sans': bool(t[7]) or None,
|
||||||
|
'mono': bool(t[8]) or None,
|
||||||
|
}
|
||||||
|
|
||||||
|
def list_styles(cls):
|
||||||
|
return list(cls)
|
||||||
|
|
||||||
|
def styles_token(cls, ttype):
|
||||||
|
return ttype in cls._styles
|
||||||
|
|
||||||
|
def __iter__(cls):
|
||||||
|
for token in cls._styles:
|
||||||
|
yield token, cls.style_for_token(token)
|
||||||
|
|
||||||
|
def __len__(cls):
|
||||||
|
return len(cls._styles)
|
||||||
|
|
||||||
|
|
||||||
|
class Style(object):
|
||||||
|
__metaclass__ = StyleMeta
|
||||||
|
|
||||||
|
#: overall background color (``None`` means transparent)
|
||||||
|
background_color = '#ffffff'
|
||||||
|
|
||||||
|
#: highlight background color
|
||||||
|
highlight_color = '#ffffcc'
|
||||||
|
|
||||||
|
#: Style definitions for individual token types.
|
||||||
|
styles = {}
|
140
packages/wakatime/wakatime/packages/pygments2/unistring.py
Normal file
140
packages/wakatime/wakatime/packages/pygments2/unistring.py
Normal file
File diff suppressed because one or more lines are too long
277
packages/wakatime/wakatime/packages/pygments2/util.py
Normal file
277
packages/wakatime/wakatime/packages/pygments2/util.py
Normal file
|
@ -0,0 +1,277 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.util
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Utility functions.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import codecs
|
||||||
|
|
||||||
|
|
||||||
|
split_path_re = re.compile(r'[/\\ ]')
|
||||||
|
doctype_lookup_re = re.compile(r'''(?smx)
|
||||||
|
(<\?.*?\?>)?\s*
|
||||||
|
<!DOCTYPE\s+(
|
||||||
|
[a-zA-Z_][a-zA-Z0-9]*\s+
|
||||||
|
[a-zA-Z_][a-zA-Z0-9]*\s+
|
||||||
|
"[^"]*")
|
||||||
|
[^>]*>
|
||||||
|
''')
|
||||||
|
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>(?uism)')
|
||||||
|
|
||||||
|
|
||||||
|
class ClassNotFound(ValueError):
|
||||||
|
"""
|
||||||
|
If one of the get_*_by_* functions didn't find a matching class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class OptionError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
|
||||||
|
string = options.get(optname, default)
|
||||||
|
if normcase:
|
||||||
|
string = string.lower()
|
||||||
|
if string not in allowed:
|
||||||
|
raise OptionError('Value for option %s must be one of %s' %
|
||||||
|
(optname, ', '.join(map(str, allowed))))
|
||||||
|
return string
|
||||||
|
|
||||||
|
|
||||||
|
def get_bool_opt(options, optname, default=None):
|
||||||
|
string = options.get(optname, default)
|
||||||
|
if isinstance(string, bool):
|
||||||
|
return string
|
||||||
|
elif isinstance(string, int):
|
||||||
|
return bool(string)
|
||||||
|
elif not isinstance(string, basestring):
|
||||||
|
raise OptionError('Invalid type %r for option %s; use '
|
||||||
|
'1/0, yes/no, true/false, on/off' % (
|
||||||
|
string, optname))
|
||||||
|
elif string.lower() in ('1', 'yes', 'true', 'on'):
|
||||||
|
return True
|
||||||
|
elif string.lower() in ('0', 'no', 'false', 'off'):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise OptionError('Invalid value %r for option %s; use '
|
||||||
|
'1/0, yes/no, true/false, on/off' % (
|
||||||
|
string, optname))
|
||||||
|
|
||||||
|
|
||||||
|
def get_int_opt(options, optname, default=None):
|
||||||
|
string = options.get(optname, default)
|
||||||
|
try:
|
||||||
|
return int(string)
|
||||||
|
except TypeError:
|
||||||
|
raise OptionError('Invalid type %r for option %s; you '
|
||||||
|
'must give an integer value' % (
|
||||||
|
string, optname))
|
||||||
|
except ValueError:
|
||||||
|
raise OptionError('Invalid value %r for option %s; you '
|
||||||
|
'must give an integer value' % (
|
||||||
|
string, optname))
|
||||||
|
|
||||||
|
|
||||||
|
def get_list_opt(options, optname, default=None):
|
||||||
|
val = options.get(optname, default)
|
||||||
|
if isinstance(val, basestring):
|
||||||
|
return val.split()
|
||||||
|
elif isinstance(val, (list, tuple)):
|
||||||
|
return list(val)
|
||||||
|
else:
|
||||||
|
raise OptionError('Invalid type %r for option %s; you '
|
||||||
|
'must give a list value' % (
|
||||||
|
val, optname))
|
||||||
|
|
||||||
|
|
||||||
|
def docstring_headline(obj):
|
||||||
|
if not obj.__doc__:
|
||||||
|
return ''
|
||||||
|
res = []
|
||||||
|
for line in obj.__doc__.strip().splitlines():
|
||||||
|
if line.strip():
|
||||||
|
res.append(" " + line.strip())
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return ''.join(res).lstrip()
|
||||||
|
|
||||||
|
|
||||||
|
def make_analysator(f):
|
||||||
|
"""
|
||||||
|
Return a static text analysation function that
|
||||||
|
returns float values.
|
||||||
|
"""
|
||||||
|
def text_analyse(text):
|
||||||
|
try:
|
||||||
|
rv = f(text)
|
||||||
|
except Exception:
|
||||||
|
return 0.0
|
||||||
|
if not rv:
|
||||||
|
return 0.0
|
||||||
|
try:
|
||||||
|
return min(1.0, max(0.0, float(rv)))
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return 0.0
|
||||||
|
text_analyse.__doc__ = f.__doc__
|
||||||
|
return staticmethod(text_analyse)
|
||||||
|
|
||||||
|
|
||||||
|
def shebang_matches(text, regex):
|
||||||
|
"""
|
||||||
|
Check if the given regular expression matches the last part of the
|
||||||
|
shebang if one exists.
|
||||||
|
|
||||||
|
>>> from pygments.util import shebang_matches
|
||||||
|
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
|
||||||
|
True
|
||||||
|
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
|
||||||
|
True
|
||||||
|
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
|
||||||
|
False
|
||||||
|
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
|
||||||
|
False
|
||||||
|
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
|
||||||
|
... r'python(2\.\d)?')
|
||||||
|
True
|
||||||
|
|
||||||
|
It also checks for common windows executable file extensions::
|
||||||
|
|
||||||
|
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
|
||||||
|
True
|
||||||
|
|
||||||
|
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
|
||||||
|
the same as ``'perl -e'``)
|
||||||
|
|
||||||
|
Note that this method automatically searches the whole string (eg:
|
||||||
|
the regular expression is wrapped in ``'^$'``)
|
||||||
|
"""
|
||||||
|
index = text.find('\n')
|
||||||
|
if index >= 0:
|
||||||
|
first_line = text[:index].lower()
|
||||||
|
else:
|
||||||
|
first_line = text.lower()
|
||||||
|
if first_line.startswith('#!'):
|
||||||
|
try:
|
||||||
|
found = [x for x in split_path_re.split(first_line[2:].strip())
|
||||||
|
if x and not x.startswith('-')][-1]
|
||||||
|
except IndexError:
|
||||||
|
return False
|
||||||
|
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
|
||||||
|
if regex.search(found) is not None:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def doctype_matches(text, regex):
|
||||||
|
"""
|
||||||
|
Check if the doctype matches a regular expression (if present).
|
||||||
|
Note that this method only checks the first part of a DOCTYPE.
|
||||||
|
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
|
||||||
|
"""
|
||||||
|
m = doctype_lookup_re.match(text)
|
||||||
|
if m is None:
|
||||||
|
return False
|
||||||
|
doctype = m.group(2)
|
||||||
|
return re.compile(regex).match(doctype.strip()) is not None
|
||||||
|
|
||||||
|
|
||||||
|
def html_doctype_matches(text):
|
||||||
|
"""
|
||||||
|
Check if the file looks like it has a html doctype.
|
||||||
|
"""
|
||||||
|
return doctype_matches(text, r'html\s+PUBLIC\s+"-//W3C//DTD X?HTML.*')
|
||||||
|
|
||||||
|
|
||||||
|
_looks_like_xml_cache = {}
|
||||||
|
def looks_like_xml(text):
|
||||||
|
"""
|
||||||
|
Check if a doctype exists or if we have some tags.
|
||||||
|
"""
|
||||||
|
key = hash(text)
|
||||||
|
try:
|
||||||
|
return _looks_like_xml_cache[key]
|
||||||
|
except KeyError:
|
||||||
|
m = doctype_lookup_re.match(text)
|
||||||
|
if m is not None:
|
||||||
|
return True
|
||||||
|
rv = tag_re.search(text[:1000]) is not None
|
||||||
|
_looks_like_xml_cache[key] = rv
|
||||||
|
return rv
|
||||||
|
|
||||||
|
# Python narrow build compatibility
|
||||||
|
|
||||||
|
def _surrogatepair(c):
|
||||||
|
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
|
||||||
|
|
||||||
|
def unirange(a, b):
|
||||||
|
"""
|
||||||
|
Returns a regular expression string to match the given non-BMP range.
|
||||||
|
"""
|
||||||
|
if b < a:
|
||||||
|
raise ValueError("Bad character range")
|
||||||
|
if a < 0x10000 or b < 0x10000:
|
||||||
|
raise ValueError("unirange is only defined for non-BMP ranges")
|
||||||
|
|
||||||
|
if sys.maxunicode > 0xffff:
|
||||||
|
# wide build
|
||||||
|
return u'[%s-%s]' % (unichr(a), unichr(b))
|
||||||
|
else:
|
||||||
|
# narrow build stores surrogates, and the 're' module handles them
|
||||||
|
# (incorrectly) as characters. Since there is still ordering among
|
||||||
|
# these characters, expand the range to one that it understands. Some
|
||||||
|
# background in http://bugs.python.org/issue3665 and
|
||||||
|
# http://bugs.python.org/issue12749
|
||||||
|
#
|
||||||
|
# Additionally, the lower constants are using unichr rather than
|
||||||
|
# literals because jython [which uses the wide path] can't load this
|
||||||
|
# file if they are literals.
|
||||||
|
ah, al = _surrogatepair(a)
|
||||||
|
bh, bl = _surrogatepair(b)
|
||||||
|
if ah == bh:
|
||||||
|
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
|
||||||
|
else:
|
||||||
|
buf = []
|
||||||
|
buf.append(u'%s[%s-%s]' %
|
||||||
|
(unichr(ah), unichr(al),
|
||||||
|
ah == bh and unichr(bl) or unichr(0xdfff)))
|
||||||
|
if ah - bh > 1:
|
||||||
|
buf.append(u'[%s-%s][%s-%s]' %
|
||||||
|
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
|
||||||
|
if ah != bh:
|
||||||
|
buf.append(u'%s[%s-%s]' %
|
||||||
|
(unichr(bh), unichr(0xdc00), unichr(bl)))
|
||||||
|
|
||||||
|
return u'(?:' + u'|'.join(buf) + u')'
|
||||||
|
|
||||||
|
# Python 2/3 compatibility
|
||||||
|
|
||||||
|
if sys.version_info < (3,0):
|
||||||
|
b = bytes = str
|
||||||
|
u_prefix = 'u'
|
||||||
|
import StringIO, cStringIO
|
||||||
|
BytesIO = cStringIO.StringIO
|
||||||
|
StringIO = StringIO.StringIO
|
||||||
|
uni_open = codecs.open
|
||||||
|
else:
|
||||||
|
import builtins
|
||||||
|
bytes = builtins.bytes
|
||||||
|
u_prefix = ''
|
||||||
|
def b(s):
|
||||||
|
if isinstance(s, str):
|
||||||
|
return bytes(map(ord, s))
|
||||||
|
elif isinstance(s, bytes):
|
||||||
|
return s
|
||||||
|
else:
|
||||||
|
raise TypeError("Invalid argument %r for b()" % (s,))
|
||||||
|
import io
|
||||||
|
BytesIO = io.BytesIO
|
||||||
|
StringIO = io.StringIO
|
||||||
|
uni_open = builtins.open
|
74
packages/wakatime/wakatime/packages/pygments3/console.py
Normal file
74
packages/wakatime/wakatime/packages/pygments3/console.py
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.console
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Format colored console output.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
esc = "\x1b["
|
||||||
|
|
||||||
|
codes = {}
|
||||||
|
codes[""] = ""
|
||||||
|
codes["reset"] = esc + "39;49;00m"
|
||||||
|
|
||||||
|
codes["bold"] = esc + "01m"
|
||||||
|
codes["faint"] = esc + "02m"
|
||||||
|
codes["standout"] = esc + "03m"
|
||||||
|
codes["underline"] = esc + "04m"
|
||||||
|
codes["blink"] = esc + "05m"
|
||||||
|
codes["overline"] = esc + "06m"
|
||||||
|
|
||||||
|
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
|
||||||
|
"purple", "teal", "lightgray"]
|
||||||
|
light_colors = ["darkgray", "red", "green", "yellow", "blue",
|
||||||
|
"fuchsia", "turquoise", "white"]
|
||||||
|
|
||||||
|
x = 30
|
||||||
|
for d, l in zip(dark_colors, light_colors):
|
||||||
|
codes[d] = esc + "%im" % x
|
||||||
|
codes[l] = esc + "%i;01m" % x
|
||||||
|
x += 1
|
||||||
|
|
||||||
|
del d, l, x
|
||||||
|
|
||||||
|
codes["darkteal"] = codes["turquoise"]
|
||||||
|
codes["darkyellow"] = codes["brown"]
|
||||||
|
codes["fuscia"] = codes["fuchsia"]
|
||||||
|
codes["white"] = codes["bold"]
|
||||||
|
|
||||||
|
|
||||||
|
def reset_color():
|
||||||
|
return codes["reset"]
|
||||||
|
|
||||||
|
|
||||||
|
def colorize(color_key, text):
|
||||||
|
return codes[color_key] + text + codes["reset"]
|
||||||
|
|
||||||
|
|
||||||
|
def ansiformat(attr, text):
|
||||||
|
"""
|
||||||
|
Format ``text`` with a color and/or some attributes::
|
||||||
|
|
||||||
|
color normal color
|
||||||
|
*color* bold color
|
||||||
|
_color_ underlined color
|
||||||
|
+color+ blinking color
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
if attr[:1] == attr[-1:] == '+':
|
||||||
|
result.append(codes['blink'])
|
||||||
|
attr = attr[1:-1]
|
||||||
|
if attr[:1] == attr[-1:] == '*':
|
||||||
|
result.append(codes['bold'])
|
||||||
|
attr = attr[1:-1]
|
||||||
|
if attr[:1] == attr[-1:] == '_':
|
||||||
|
result.append(codes['underline'])
|
||||||
|
attr = attr[1:-1]
|
||||||
|
result.append(codes[attr])
|
||||||
|
result.append(text)
|
||||||
|
result.append(codes['reset'])
|
||||||
|
return ''.join(result)
|
74
packages/wakatime/wakatime/packages/pygments3/filter.py
Normal file
74
packages/wakatime/wakatime/packages/pygments3/filter.py
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.filter
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Module that implements the default filter.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def apply_filters(stream, filters, lexer=None):
|
||||||
|
"""
|
||||||
|
Use this method to apply an iterable of filters to
|
||||||
|
a stream. If lexer is given it's forwarded to the
|
||||||
|
filter, otherwise the filter receives `None`.
|
||||||
|
"""
|
||||||
|
def _apply(filter_, stream):
|
||||||
|
for token in filter_.filter(lexer, stream):
|
||||||
|
yield token
|
||||||
|
for filter_ in filters:
|
||||||
|
stream = _apply(filter_, stream)
|
||||||
|
return stream
|
||||||
|
|
||||||
|
|
||||||
|
def simplefilter(f):
|
||||||
|
"""
|
||||||
|
Decorator that converts a function into a filter::
|
||||||
|
|
||||||
|
@simplefilter
|
||||||
|
def lowercase(lexer, stream, options):
|
||||||
|
for ttype, value in stream:
|
||||||
|
yield ttype, value.lower()
|
||||||
|
"""
|
||||||
|
return type(f.__name__, (FunctionFilter,), {
|
||||||
|
'function': f,
|
||||||
|
'__module__': getattr(f, '__module__'),
|
||||||
|
'__doc__': f.__doc__
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
class Filter(object):
|
||||||
|
"""
|
||||||
|
Default filter. Subclass this class or use the `simplefilter`
|
||||||
|
decorator to create own filters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
self.options = options
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class FunctionFilter(Filter):
|
||||||
|
"""
|
||||||
|
Abstract class used by `simplefilter` to create simple
|
||||||
|
function filters on the fly. The `simplefilter` decorator
|
||||||
|
automatically creates subclasses of this class for
|
||||||
|
functions passed to it.
|
||||||
|
"""
|
||||||
|
function = None
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
if not hasattr(self, 'function'):
|
||||||
|
raise TypeError('%r used without bound function' %
|
||||||
|
self.__class__.__name__)
|
||||||
|
Filter.__init__(self, **options)
|
||||||
|
|
||||||
|
def filter(self, lexer, stream):
|
||||||
|
# pylint: disable-msg=E1102
|
||||||
|
for ttype, value in self.function(lexer, stream, self.options):
|
||||||
|
yield ttype, value
|
|
@ -0,0 +1,68 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Pygments formatters.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
import os.path
|
||||||
|
import fnmatch
|
||||||
|
|
||||||
|
from pygments.formatters._mapping import FORMATTERS
|
||||||
|
from pygments.plugin import find_plugin_formatters
|
||||||
|
from pygments.util import ClassNotFound
|
||||||
|
|
||||||
|
ns = globals()
|
||||||
|
for fcls in FORMATTERS:
|
||||||
|
ns[fcls.__name__] = fcls
|
||||||
|
del fcls
|
||||||
|
|
||||||
|
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
|
||||||
|
'get_all_formatters'] + [cls.__name__ for cls in FORMATTERS]
|
||||||
|
|
||||||
|
|
||||||
|
_formatter_alias_cache = {}
|
||||||
|
_formatter_filename_cache = []
|
||||||
|
|
||||||
|
def _init_formatter_cache():
|
||||||
|
if _formatter_alias_cache:
|
||||||
|
return
|
||||||
|
for cls in get_all_formatters():
|
||||||
|
for alias in cls.aliases:
|
||||||
|
_formatter_alias_cache[alias] = cls
|
||||||
|
for fn in cls.filenames:
|
||||||
|
_formatter_filename_cache.append((fn, cls))
|
||||||
|
|
||||||
|
|
||||||
|
def find_formatter_class(name):
|
||||||
|
_init_formatter_cache()
|
||||||
|
cls = _formatter_alias_cache.get(name, None)
|
||||||
|
return cls
|
||||||
|
|
||||||
|
|
||||||
|
def get_formatter_by_name(name, **options):
|
||||||
|
_init_formatter_cache()
|
||||||
|
cls = _formatter_alias_cache.get(name, None)
|
||||||
|
if not cls:
|
||||||
|
raise ClassNotFound("No formatter found for name %r" % name)
|
||||||
|
return cls(**options)
|
||||||
|
|
||||||
|
|
||||||
|
def get_formatter_for_filename(fn, **options):
|
||||||
|
_init_formatter_cache()
|
||||||
|
fn = os.path.basename(fn)
|
||||||
|
for pattern, cls in _formatter_filename_cache:
|
||||||
|
if fnmatch.fnmatch(fn, pattern):
|
||||||
|
return cls(**options)
|
||||||
|
raise ClassNotFound("No formatter found for file name %r" % fn)
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_formatters():
|
||||||
|
"""Return a generator for all formatters."""
|
||||||
|
for formatter in FORMATTERS:
|
||||||
|
yield formatter
|
||||||
|
for _, formatter in find_plugin_formatters():
|
||||||
|
yield formatter
|
|
@ -0,0 +1,109 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.bbcode
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
BBcode formatter.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
from pygments.util import get_bool_opt
|
||||||
|
|
||||||
|
__all__ = ['BBCodeFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
class BBCodeFormatter(Formatter):
|
||||||
|
"""
|
||||||
|
Format tokens with BBcodes. These formatting codes are used by many
|
||||||
|
bulletin boards, so you can highlight your sourcecode with pygments before
|
||||||
|
posting it there.
|
||||||
|
|
||||||
|
This formatter has no support for background colors and borders, as there
|
||||||
|
are no common BBcode tags for that.
|
||||||
|
|
||||||
|
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
|
||||||
|
so you can't use the highlighting together with that tag.
|
||||||
|
Text in a [code] tag usually is shown with a monospace font (which this
|
||||||
|
formatter can do with the ``monofont`` option) and no spaces (which you
|
||||||
|
need for indentation) are removed.
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`style`
|
||||||
|
The style to use, can be a string or a Style subclass (default:
|
||||||
|
``'default'``).
|
||||||
|
|
||||||
|
`codetag`
|
||||||
|
If set to true, put the output into ``[code]`` tags (default:
|
||||||
|
``false``)
|
||||||
|
|
||||||
|
`monofont`
|
||||||
|
If set to true, add a tag to show the code with a monospace font
|
||||||
|
(default: ``false``).
|
||||||
|
"""
|
||||||
|
name = 'BBCode'
|
||||||
|
aliases = ['bbcode', 'bb']
|
||||||
|
filenames = []
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
self._code = get_bool_opt(options, 'codetag', False)
|
||||||
|
self._mono = get_bool_opt(options, 'monofont', False)
|
||||||
|
|
||||||
|
self.styles = {}
|
||||||
|
self._make_styles()
|
||||||
|
|
||||||
|
def _make_styles(self):
|
||||||
|
for ttype, ndef in self.style:
|
||||||
|
start = end = ''
|
||||||
|
if ndef['color']:
|
||||||
|
start += '[color=#%s]' % ndef['color']
|
||||||
|
end = '[/color]' + end
|
||||||
|
if ndef['bold']:
|
||||||
|
start += '[b]'
|
||||||
|
end = '[/b]' + end
|
||||||
|
if ndef['italic']:
|
||||||
|
start += '[i]'
|
||||||
|
end = '[/i]' + end
|
||||||
|
if ndef['underline']:
|
||||||
|
start += '[u]'
|
||||||
|
end = '[/u]' + end
|
||||||
|
# there are no common BBcodes for background-color and border
|
||||||
|
|
||||||
|
self.styles[ttype] = start, end
|
||||||
|
|
||||||
|
def format_unencoded(self, tokensource, outfile):
|
||||||
|
if self._code:
|
||||||
|
outfile.write('[code]')
|
||||||
|
if self._mono:
|
||||||
|
outfile.write('[font=monospace]')
|
||||||
|
|
||||||
|
lastval = ''
|
||||||
|
lasttype = None
|
||||||
|
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
while ttype not in self.styles:
|
||||||
|
ttype = ttype.parent
|
||||||
|
if ttype == lasttype:
|
||||||
|
lastval += value
|
||||||
|
else:
|
||||||
|
if lastval:
|
||||||
|
start, end = self.styles[lasttype]
|
||||||
|
outfile.write(''.join((start, lastval, end)))
|
||||||
|
lastval = value
|
||||||
|
lasttype = ttype
|
||||||
|
|
||||||
|
if lastval:
|
||||||
|
start, end = self.styles[lasttype]
|
||||||
|
outfile.write(''.join((start, lastval, end)))
|
||||||
|
|
||||||
|
if self._mono:
|
||||||
|
outfile.write('[/font]')
|
||||||
|
if self._code:
|
||||||
|
outfile.write('[/code]')
|
||||||
|
if self._code or self._mono:
|
||||||
|
outfile.write('\n')
|
|
@ -0,0 +1,115 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.other
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Other formatters: NullFormatter, RawTokenFormatter.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
from pygments.util import OptionError, get_choice_opt, b
|
||||||
|
from pygments.token import Token
|
||||||
|
from pygments.console import colorize
|
||||||
|
|
||||||
|
__all__ = ['NullFormatter', 'RawTokenFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
class NullFormatter(Formatter):
|
||||||
|
"""
|
||||||
|
Output the text unchanged without any formatting.
|
||||||
|
"""
|
||||||
|
name = 'Text only'
|
||||||
|
aliases = ['text', 'null']
|
||||||
|
filenames = ['*.txt']
|
||||||
|
|
||||||
|
def format(self, tokensource, outfile):
|
||||||
|
enc = self.encoding
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
if enc:
|
||||||
|
outfile.write(value.encode(enc))
|
||||||
|
else:
|
||||||
|
outfile.write(value)
|
||||||
|
|
||||||
|
|
||||||
|
class RawTokenFormatter(Formatter):
|
||||||
|
r"""
|
||||||
|
Format tokens as a raw representation for storing token streams.
|
||||||
|
|
||||||
|
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
|
||||||
|
be converted to a token stream with the `RawTokenLexer`, described in the
|
||||||
|
`lexer list <lexers.txt>`_.
|
||||||
|
|
||||||
|
Only two options are accepted:
|
||||||
|
|
||||||
|
`compress`
|
||||||
|
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
|
||||||
|
compression algorithm after encoding (default: ``''``).
|
||||||
|
`error_color`
|
||||||
|
If set to a color name, highlight error tokens using that color. If
|
||||||
|
set but with no value, defaults to ``'red'``.
|
||||||
|
*New in Pygments 0.11.*
|
||||||
|
|
||||||
|
"""
|
||||||
|
name = 'Raw tokens'
|
||||||
|
aliases = ['raw', 'tokens']
|
||||||
|
filenames = ['*.raw']
|
||||||
|
|
||||||
|
unicodeoutput = False
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
if self.encoding:
|
||||||
|
raise OptionError('the raw formatter does not support the '
|
||||||
|
'encoding option')
|
||||||
|
self.encoding = 'ascii' # let pygments.format() do the right thing
|
||||||
|
self.compress = get_choice_opt(options, 'compress',
|
||||||
|
['', 'none', 'gz', 'bz2'], '')
|
||||||
|
self.error_color = options.get('error_color', None)
|
||||||
|
if self.error_color is True:
|
||||||
|
self.error_color = 'red'
|
||||||
|
if self.error_color is not None:
|
||||||
|
try:
|
||||||
|
colorize(self.error_color, '')
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError("Invalid color %r specified" %
|
||||||
|
self.error_color)
|
||||||
|
|
||||||
|
def format(self, tokensource, outfile):
|
||||||
|
try:
|
||||||
|
outfile.write(b(''))
|
||||||
|
except TypeError:
|
||||||
|
raise TypeError('The raw tokens formatter needs a binary '
|
||||||
|
'output file')
|
||||||
|
if self.compress == 'gz':
|
||||||
|
import gzip
|
||||||
|
outfile = gzip.GzipFile('', 'wb', 9, outfile)
|
||||||
|
def write(text):
|
||||||
|
outfile.write(text.encode())
|
||||||
|
flush = outfile.flush
|
||||||
|
elif self.compress == 'bz2':
|
||||||
|
import bz2
|
||||||
|
compressor = bz2.BZ2Compressor(9)
|
||||||
|
def write(text):
|
||||||
|
outfile.write(compressor.compress(text.encode()))
|
||||||
|
def flush():
|
||||||
|
outfile.write(compressor.flush())
|
||||||
|
outfile.flush()
|
||||||
|
else:
|
||||||
|
def write(text):
|
||||||
|
outfile.write(text.encode())
|
||||||
|
flush = outfile.flush
|
||||||
|
|
||||||
|
if self.error_color:
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
line = "%s\t%r\n" % (ttype, value)
|
||||||
|
if ttype is Token.Error:
|
||||||
|
write(colorize(self.error_color, line))
|
||||||
|
else:
|
||||||
|
write(line)
|
||||||
|
else:
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
write("%s\t%r\n" % (ttype, value))
|
||||||
|
flush()
|
136
packages/wakatime/wakatime/packages/pygments3/formatters/rtf.py
Normal file
136
packages/wakatime/wakatime/packages/pygments3/formatters/rtf.py
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.rtf
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A formatter that generates RTF files.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['RtfFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
class RtfFormatter(Formatter):
|
||||||
|
"""
|
||||||
|
Format tokens as RTF markup. This formatter automatically outputs full RTF
|
||||||
|
documents with color information and other useful stuff. Perfect for Copy and
|
||||||
|
Paste into Microsoft® Word® documents.
|
||||||
|
|
||||||
|
*New in Pygments 0.6.*
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`style`
|
||||||
|
The style to use, can be a string or a Style subclass (default:
|
||||||
|
``'default'``).
|
||||||
|
|
||||||
|
`fontface`
|
||||||
|
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
|
||||||
|
some generic font which is supposed to have fixed width.
|
||||||
|
"""
|
||||||
|
name = 'RTF'
|
||||||
|
aliases = ['rtf']
|
||||||
|
filenames = ['*.rtf']
|
||||||
|
|
||||||
|
unicodeoutput = False
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
"""
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
``fontface``
|
||||||
|
Name of the font used. Could for example be ``'Courier New'``
|
||||||
|
to further specify the default which is ``'\fmodern'``. The RTF
|
||||||
|
specification claims that ``\fmodern`` are "Fixed-pitch serif
|
||||||
|
and sans serif fonts". Hope every RTF implementation thinks
|
||||||
|
the same about modern...
|
||||||
|
"""
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
self.fontface = options.get('fontface') or ''
|
||||||
|
|
||||||
|
def _escape(self, text):
|
||||||
|
return text.replace('\\', '\\\\') \
|
||||||
|
.replace('{', '\\{') \
|
||||||
|
.replace('}', '\\}')
|
||||||
|
|
||||||
|
def _escape_text(self, text):
|
||||||
|
# empty strings, should give a small performance improvment
|
||||||
|
if not text:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
# escape text
|
||||||
|
text = self._escape(text)
|
||||||
|
if self.encoding in ('utf-8', 'utf-16', 'utf-32'):
|
||||||
|
encoding = 'iso-8859-15'
|
||||||
|
else:
|
||||||
|
encoding = self.encoding or 'iso-8859-15'
|
||||||
|
|
||||||
|
buf = []
|
||||||
|
for c in text:
|
||||||
|
if ord(c) > 128:
|
||||||
|
ansic = c.encode(encoding, 'ignore') or '?'
|
||||||
|
if ord(ansic) > 128:
|
||||||
|
ansic = '\\\'%x' % ord(ansic)
|
||||||
|
else:
|
||||||
|
ansic = c
|
||||||
|
buf.append(r'\ud{\u%d%s}' % (ord(c), ansic))
|
||||||
|
else:
|
||||||
|
buf.append(str(c))
|
||||||
|
|
||||||
|
return ''.join(buf).replace('\n', '\\par\n')
|
||||||
|
|
||||||
|
def format_unencoded(self, tokensource, outfile):
|
||||||
|
# rtf 1.8 header
|
||||||
|
outfile.write(r'{\rtf1\ansi\deff0'
|
||||||
|
r'{\fonttbl{\f0\fmodern\fprq1\fcharset0%s;}}'
|
||||||
|
r'{\colortbl;' % (self.fontface and
|
||||||
|
' ' + self._escape(self.fontface) or
|
||||||
|
''))
|
||||||
|
|
||||||
|
# convert colors and save them in a mapping to access them later.
|
||||||
|
color_mapping = {}
|
||||||
|
offset = 1
|
||||||
|
for _, style in self.style:
|
||||||
|
for color in style['color'], style['bgcolor'], style['border']:
|
||||||
|
if color and color not in color_mapping:
|
||||||
|
color_mapping[color] = offset
|
||||||
|
outfile.write(r'\red%d\green%d\blue%d;' % (
|
||||||
|
int(color[0:2], 16),
|
||||||
|
int(color[2:4], 16),
|
||||||
|
int(color[4:6], 16)
|
||||||
|
))
|
||||||
|
offset += 1
|
||||||
|
outfile.write(r'}\f0')
|
||||||
|
|
||||||
|
# highlight stream
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
while not self.style.styles_token(ttype) and ttype.parent:
|
||||||
|
ttype = ttype.parent
|
||||||
|
style = self.style.style_for_token(ttype)
|
||||||
|
buf = []
|
||||||
|
if style['bgcolor']:
|
||||||
|
buf.append(r'\cb%d' % color_mapping[style['bgcolor']])
|
||||||
|
if style['color']:
|
||||||
|
buf.append(r'\cf%d' % color_mapping[style['color']])
|
||||||
|
if style['bold']:
|
||||||
|
buf.append(r'\b')
|
||||||
|
if style['italic']:
|
||||||
|
buf.append(r'\i')
|
||||||
|
if style['underline']:
|
||||||
|
buf.append(r'\ul')
|
||||||
|
if style['border']:
|
||||||
|
buf.append(r'\chbrdr\chcfpat%d' %
|
||||||
|
color_mapping[style['border']])
|
||||||
|
start = ''.join(buf)
|
||||||
|
if start:
|
||||||
|
outfile.write('{%s ' % start)
|
||||||
|
outfile.write(self._escape_text(value))
|
||||||
|
if start:
|
||||||
|
outfile.write('}')
|
||||||
|
|
||||||
|
outfile.write('}')
|
154
packages/wakatime/wakatime/packages/pygments3/formatters/svg.py
Normal file
154
packages/wakatime/wakatime/packages/pygments3/formatters/svg.py
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.svg
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Formatter for SVG output.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
from pygments.util import get_bool_opt, get_int_opt
|
||||||
|
|
||||||
|
__all__ = ['SvgFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
def escape_html(text):
|
||||||
|
"""Escape &, <, > as well as single and double quotes for HTML."""
|
||||||
|
return text.replace('&', '&'). \
|
||||||
|
replace('<', '<'). \
|
||||||
|
replace('>', '>'). \
|
||||||
|
replace('"', '"'). \
|
||||||
|
replace("'", ''')
|
||||||
|
|
||||||
|
|
||||||
|
class2style = {}
|
||||||
|
|
||||||
|
class SvgFormatter(Formatter):
|
||||||
|
"""
|
||||||
|
Format tokens as an SVG graphics file. This formatter is still experimental.
|
||||||
|
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
|
||||||
|
coordinates containing ``<tspan>`` elements with the individual token styles.
|
||||||
|
|
||||||
|
By default, this formatter outputs a full SVG document including doctype
|
||||||
|
declaration and the ``<svg>`` root element.
|
||||||
|
|
||||||
|
*New in Pygments 0.9.*
|
||||||
|
|
||||||
|
Additional options accepted:
|
||||||
|
|
||||||
|
`nowrap`
|
||||||
|
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
|
||||||
|
don't add a XML declaration and a doctype. If true, the `fontfamily`
|
||||||
|
and `fontsize` options are ignored. Defaults to ``False``.
|
||||||
|
|
||||||
|
`fontfamily`
|
||||||
|
The value to give the wrapping ``<g>`` element's ``font-family``
|
||||||
|
attribute, defaults to ``"monospace"``.
|
||||||
|
|
||||||
|
`fontsize`
|
||||||
|
The value to give the wrapping ``<g>`` element's ``font-size``
|
||||||
|
attribute, defaults to ``"14px"``.
|
||||||
|
|
||||||
|
`xoffset`
|
||||||
|
Starting offset in X direction, defaults to ``0``.
|
||||||
|
|
||||||
|
`yoffset`
|
||||||
|
Starting offset in Y direction, defaults to the font size if it is given
|
||||||
|
in pixels, or ``20`` else. (This is necessary since text coordinates
|
||||||
|
refer to the text baseline, not the top edge.)
|
||||||
|
|
||||||
|
`ystep`
|
||||||
|
Offset to add to the Y coordinate for each subsequent line. This should
|
||||||
|
roughly be the text size plus 5. It defaults to that value if the text
|
||||||
|
size is given in pixels, or ``25`` else.
|
||||||
|
|
||||||
|
`spacehack`
|
||||||
|
Convert spaces in the source to `` ``, which are non-breaking
|
||||||
|
spaces. SVG provides the ``xml:space`` attribute to control how
|
||||||
|
whitespace inside tags is handled, in theory, the ``preserve`` value
|
||||||
|
could be used to keep all whitespace as-is. However, many current SVG
|
||||||
|
viewers don't obey that rule, so this option is provided as a workaround
|
||||||
|
and defaults to ``True``.
|
||||||
|
"""
|
||||||
|
name = 'SVG'
|
||||||
|
aliases = ['svg']
|
||||||
|
filenames = ['*.svg']
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
# XXX outencoding
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
||||||
|
self.fontfamily = options.get('fontfamily', 'monospace')
|
||||||
|
self.fontsize = options.get('fontsize', '14px')
|
||||||
|
self.xoffset = get_int_opt(options, 'xoffset', 0)
|
||||||
|
fs = self.fontsize.strip()
|
||||||
|
if fs.endswith('px'): fs = fs[:-2].strip()
|
||||||
|
try:
|
||||||
|
int_fs = int(fs)
|
||||||
|
except:
|
||||||
|
int_fs = 20
|
||||||
|
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
|
||||||
|
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
|
||||||
|
self.spacehack = get_bool_opt(options, 'spacehack', True)
|
||||||
|
self._stylecache = {}
|
||||||
|
|
||||||
|
def format_unencoded(self, tokensource, outfile):
|
||||||
|
"""
|
||||||
|
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
||||||
|
tuples and write it into ``outfile``.
|
||||||
|
|
||||||
|
For our implementation we put all lines in their own 'line group'.
|
||||||
|
"""
|
||||||
|
x = self.xoffset
|
||||||
|
y = self.yoffset
|
||||||
|
if not self.nowrap:
|
||||||
|
if self.encoding:
|
||||||
|
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
|
||||||
|
self.encoding)
|
||||||
|
else:
|
||||||
|
outfile.write('<?xml version="1.0"?>\n')
|
||||||
|
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
|
||||||
|
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
|
||||||
|
'svg10.dtd">\n')
|
||||||
|
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
|
||||||
|
outfile.write('<g font-family="%s" font-size="%s">\n' %
|
||||||
|
(self.fontfamily, self.fontsize))
|
||||||
|
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
style = self._get_style(ttype)
|
||||||
|
tspan = style and '<tspan' + style + '>' or ''
|
||||||
|
tspanend = tspan and '</tspan>' or ''
|
||||||
|
value = escape_html(value)
|
||||||
|
if self.spacehack:
|
||||||
|
value = value.expandtabs().replace(' ', ' ')
|
||||||
|
parts = value.split('\n')
|
||||||
|
for part in parts[:-1]:
|
||||||
|
outfile.write(tspan + part + tspanend)
|
||||||
|
y += self.ystep
|
||||||
|
outfile.write('</text>\n<text x="%s" y="%s" '
|
||||||
|
'xml:space="preserve">' % (x, y))
|
||||||
|
outfile.write(tspan + parts[-1] + tspanend)
|
||||||
|
outfile.write('</text>')
|
||||||
|
|
||||||
|
if not self.nowrap:
|
||||||
|
outfile.write('</g></svg>\n')
|
||||||
|
|
||||||
|
def _get_style(self, tokentype):
|
||||||
|
if tokentype in self._stylecache:
|
||||||
|
return self._stylecache[tokentype]
|
||||||
|
otokentype = tokentype
|
||||||
|
while not self.style.styles_token(tokentype):
|
||||||
|
tokentype = tokentype.parent
|
||||||
|
value = self.style.style_for_token(tokentype)
|
||||||
|
result = ''
|
||||||
|
if value['color']:
|
||||||
|
result = ' fill="#' + value['color'] + '"'
|
||||||
|
if value['bold']:
|
||||||
|
result += ' font-weight="bold"'
|
||||||
|
if value['italic']:
|
||||||
|
result += ' font-style="italic"'
|
||||||
|
self._stylecache[otokentype] = result
|
||||||
|
return result
|
|
@ -0,0 +1,112 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.terminal
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Formatter for terminal output with ANSI sequences.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
from pygments.token import Keyword, Name, Comment, String, Error, \
|
||||||
|
Number, Operator, Generic, Token, Whitespace
|
||||||
|
from pygments.console import ansiformat
|
||||||
|
from pygments.util import get_choice_opt
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['TerminalFormatter']
|
||||||
|
|
||||||
|
|
||||||
|
#: Map token types to a tuple of color values for light and dark
|
||||||
|
#: backgrounds.
|
||||||
|
TERMINAL_COLORS = {
|
||||||
|
Token: ('', ''),
|
||||||
|
|
||||||
|
Whitespace: ('lightgray', 'darkgray'),
|
||||||
|
Comment: ('lightgray', 'darkgray'),
|
||||||
|
Comment.Preproc: ('teal', 'turquoise'),
|
||||||
|
Keyword: ('darkblue', 'blue'),
|
||||||
|
Keyword.Type: ('teal', 'turquoise'),
|
||||||
|
Operator.Word: ('purple', 'fuchsia'),
|
||||||
|
Name.Builtin: ('teal', 'turquoise'),
|
||||||
|
Name.Function: ('darkgreen', 'green'),
|
||||||
|
Name.Namespace: ('_teal_', '_turquoise_'),
|
||||||
|
Name.Class: ('_darkgreen_', '_green_'),
|
||||||
|
Name.Exception: ('teal', 'turquoise'),
|
||||||
|
Name.Decorator: ('darkgray', 'lightgray'),
|
||||||
|
Name.Variable: ('darkred', 'red'),
|
||||||
|
Name.Constant: ('darkred', 'red'),
|
||||||
|
Name.Attribute: ('teal', 'turquoise'),
|
||||||
|
Name.Tag: ('blue', 'blue'),
|
||||||
|
String: ('brown', 'brown'),
|
||||||
|
Number: ('darkblue', 'blue'),
|
||||||
|
|
||||||
|
Generic.Deleted: ('red', 'red'),
|
||||||
|
Generic.Inserted: ('darkgreen', 'green'),
|
||||||
|
Generic.Heading: ('**', '**'),
|
||||||
|
Generic.Subheading: ('*purple*', '*fuchsia*'),
|
||||||
|
Generic.Error: ('red', 'red'),
|
||||||
|
|
||||||
|
Error: ('_red_', '_red_'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TerminalFormatter(Formatter):
|
||||||
|
r"""
|
||||||
|
Format tokens with ANSI color sequences, for output in a text console.
|
||||||
|
Color sequences are terminated at newlines, so that paging the output
|
||||||
|
works correctly.
|
||||||
|
|
||||||
|
The `get_style_defs()` method doesn't do anything special since there is
|
||||||
|
no support for common styles.
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`bg`
|
||||||
|
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
|
||||||
|
(default: ``"light"``).
|
||||||
|
|
||||||
|
`colorscheme`
|
||||||
|
A dictionary mapping token types to (lightbg, darkbg) color names or
|
||||||
|
``None`` (default: ``None`` = use builtin colorscheme).
|
||||||
|
"""
|
||||||
|
name = 'Terminal'
|
||||||
|
aliases = ['terminal', 'console']
|
||||||
|
filenames = []
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
self.darkbg = get_choice_opt(options, 'bg',
|
||||||
|
['light', 'dark'], 'light') == 'dark'
|
||||||
|
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
|
||||||
|
|
||||||
|
def format(self, tokensource, outfile):
|
||||||
|
# hack: if the output is a terminal and has an encoding set,
|
||||||
|
# use that to avoid unicode encode problems
|
||||||
|
if not self.encoding and hasattr(outfile, "encoding") and \
|
||||||
|
hasattr(outfile, "isatty") and outfile.isatty() and \
|
||||||
|
sys.version_info < (3,):
|
||||||
|
self.encoding = outfile.encoding
|
||||||
|
return Formatter.format(self, tokensource, outfile)
|
||||||
|
|
||||||
|
def format_unencoded(self, tokensource, outfile):
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
color = self.colorscheme.get(ttype)
|
||||||
|
while color is None:
|
||||||
|
ttype = ttype[:-1]
|
||||||
|
color = self.colorscheme.get(ttype)
|
||||||
|
if color:
|
||||||
|
color = color[self.darkbg]
|
||||||
|
spl = value.split('\n')
|
||||||
|
for line in spl[:-1]:
|
||||||
|
if line:
|
||||||
|
outfile.write(ansiformat(color, line))
|
||||||
|
outfile.write('\n')
|
||||||
|
if spl[-1]:
|
||||||
|
outfile.write(ansiformat(color, spl[-1]))
|
||||||
|
else:
|
||||||
|
outfile.write(value)
|
|
@ -0,0 +1,222 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.formatters.terminal256
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Formatter for 256-color terminal output with ANSI sequences.
|
||||||
|
|
||||||
|
RGB-to-XTERM color conversion routines adapted from xterm256-conv
|
||||||
|
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
|
||||||
|
by Wolfgang Frisch.
|
||||||
|
|
||||||
|
Formatter version 1.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# - Options to map style's bold/underline/italic/border attributes
|
||||||
|
# to some ANSI attrbutes (something like 'italic=underline')
|
||||||
|
# - An option to output "style RGB to xterm RGB/index" conversion table
|
||||||
|
# - An option to indicate that we are running in "reverse background"
|
||||||
|
# xterm. This means that default colors are white-on-black, not
|
||||||
|
# black-on-while, so colors like "white background" need to be converted
|
||||||
|
# to "white background, black foreground", etc...
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from pygments.formatter import Formatter
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['Terminal256Formatter']
|
||||||
|
|
||||||
|
|
||||||
|
class EscapeSequence:
|
||||||
|
def __init__(self, fg=None, bg=None, bold=False, underline=False):
|
||||||
|
self.fg = fg
|
||||||
|
self.bg = bg
|
||||||
|
self.bold = bold
|
||||||
|
self.underline = underline
|
||||||
|
|
||||||
|
def escape(self, attrs):
|
||||||
|
if len(attrs):
|
||||||
|
return "\x1b[" + ";".join(attrs) + "m"
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def color_string(self):
|
||||||
|
attrs = []
|
||||||
|
if self.fg is not None:
|
||||||
|
attrs.extend(("38", "5", "%i" % self.fg))
|
||||||
|
if self.bg is not None:
|
||||||
|
attrs.extend(("48", "5", "%i" % self.bg))
|
||||||
|
if self.bold:
|
||||||
|
attrs.append("01")
|
||||||
|
if self.underline:
|
||||||
|
attrs.append("04")
|
||||||
|
return self.escape(attrs)
|
||||||
|
|
||||||
|
def reset_string(self):
|
||||||
|
attrs = []
|
||||||
|
if self.fg is not None:
|
||||||
|
attrs.append("39")
|
||||||
|
if self.bg is not None:
|
||||||
|
attrs.append("49")
|
||||||
|
if self.bold or self.underline:
|
||||||
|
attrs.append("00")
|
||||||
|
return self.escape(attrs)
|
||||||
|
|
||||||
|
class Terminal256Formatter(Formatter):
|
||||||
|
r"""
|
||||||
|
Format tokens with ANSI color sequences, for output in a 256-color
|
||||||
|
terminal or console. Like in `TerminalFormatter` color sequences
|
||||||
|
are terminated at newlines, so that paging the output works correctly.
|
||||||
|
|
||||||
|
The formatter takes colors from a style defined by the `style` option
|
||||||
|
and converts them to nearest ANSI 256-color escape sequences. Bold and
|
||||||
|
underline attributes from the style are preserved (and displayed).
|
||||||
|
|
||||||
|
*New in Pygments 0.9.*
|
||||||
|
|
||||||
|
Options accepted:
|
||||||
|
|
||||||
|
`style`
|
||||||
|
The style to use, can be a string or a Style subclass (default:
|
||||||
|
``'default'``).
|
||||||
|
"""
|
||||||
|
name = 'Terminal256'
|
||||||
|
aliases = ['terminal256', 'console256', '256']
|
||||||
|
filenames = []
|
||||||
|
|
||||||
|
def __init__(self, **options):
|
||||||
|
Formatter.__init__(self, **options)
|
||||||
|
|
||||||
|
self.xterm_colors = []
|
||||||
|
self.best_match = {}
|
||||||
|
self.style_string = {}
|
||||||
|
|
||||||
|
self.usebold = 'nobold' not in options
|
||||||
|
self.useunderline = 'nounderline' not in options
|
||||||
|
|
||||||
|
self._build_color_table() # build an RGB-to-256 color conversion table
|
||||||
|
self._setup_styles() # convert selected style's colors to term. colors
|
||||||
|
|
||||||
|
def _build_color_table(self):
|
||||||
|
# colors 0..15: 16 basic colors
|
||||||
|
|
||||||
|
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
|
||||||
|
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
|
||||||
|
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
|
||||||
|
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
|
||||||
|
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
|
||||||
|
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
|
||||||
|
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
|
||||||
|
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
|
||||||
|
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
|
||||||
|
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
|
||||||
|
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
|
||||||
|
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
|
||||||
|
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
|
||||||
|
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
|
||||||
|
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
|
||||||
|
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
|
||||||
|
|
||||||
|
# colors 16..232: the 6x6x6 color cube
|
||||||
|
|
||||||
|
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
|
||||||
|
|
||||||
|
for i in range(217):
|
||||||
|
r = valuerange[(i // 36) % 6]
|
||||||
|
g = valuerange[(i // 6) % 6]
|
||||||
|
b = valuerange[i % 6]
|
||||||
|
self.xterm_colors.append((r, g, b))
|
||||||
|
|
||||||
|
# colors 233..253: grayscale
|
||||||
|
|
||||||
|
for i in range(1, 22):
|
||||||
|
v = 8 + i * 10
|
||||||
|
self.xterm_colors.append((v, v, v))
|
||||||
|
|
||||||
|
def _closest_color(self, r, g, b):
|
||||||
|
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
|
||||||
|
match = 0
|
||||||
|
|
||||||
|
for i in range(0, 254):
|
||||||
|
values = self.xterm_colors[i]
|
||||||
|
|
||||||
|
rd = r - values[0]
|
||||||
|
gd = g - values[1]
|
||||||
|
bd = b - values[2]
|
||||||
|
d = rd*rd + gd*gd + bd*bd
|
||||||
|
|
||||||
|
if d < distance:
|
||||||
|
match = i
|
||||||
|
distance = d
|
||||||
|
return match
|
||||||
|
|
||||||
|
def _color_index(self, color):
|
||||||
|
index = self.best_match.get(color, None)
|
||||||
|
if index is None:
|
||||||
|
try:
|
||||||
|
rgb = int(str(color), 16)
|
||||||
|
except ValueError:
|
||||||
|
rgb = 0
|
||||||
|
|
||||||
|
r = (rgb >> 16) & 0xff
|
||||||
|
g = (rgb >> 8) & 0xff
|
||||||
|
b = rgb & 0xff
|
||||||
|
index = self._closest_color(r, g, b)
|
||||||
|
self.best_match[color] = index
|
||||||
|
return index
|
||||||
|
|
||||||
|
def _setup_styles(self):
|
||||||
|
for ttype, ndef in self.style:
|
||||||
|
escape = EscapeSequence()
|
||||||
|
if ndef['color']:
|
||||||
|
escape.fg = self._color_index(ndef['color'])
|
||||||
|
if ndef['bgcolor']:
|
||||||
|
escape.bg = self._color_index(ndef['bgcolor'])
|
||||||
|
if self.usebold and ndef['bold']:
|
||||||
|
escape.bold = True
|
||||||
|
if self.useunderline and ndef['underline']:
|
||||||
|
escape.underline = True
|
||||||
|
self.style_string[str(ttype)] = (escape.color_string(),
|
||||||
|
escape.reset_string())
|
||||||
|
|
||||||
|
def format(self, tokensource, outfile):
|
||||||
|
# hack: if the output is a terminal and has an encoding set,
|
||||||
|
# use that to avoid unicode encode problems
|
||||||
|
if not self.encoding and hasattr(outfile, "encoding") and \
|
||||||
|
hasattr(outfile, "isatty") and outfile.isatty() and \
|
||||||
|
sys.version_info < (3,):
|
||||||
|
self.encoding = outfile.encoding
|
||||||
|
return Formatter.format(self, tokensource, outfile)
|
||||||
|
|
||||||
|
def format_unencoded(self, tokensource, outfile):
|
||||||
|
for ttype, value in tokensource:
|
||||||
|
not_found = True
|
||||||
|
while ttype and not_found:
|
||||||
|
try:
|
||||||
|
#outfile.write( "<" + str(ttype) + ">" )
|
||||||
|
on, off = self.style_string[str(ttype)]
|
||||||
|
|
||||||
|
# Like TerminalFormatter, add "reset colors" escape sequence
|
||||||
|
# on newline.
|
||||||
|
spl = value.split('\n')
|
||||||
|
for line in spl[:-1]:
|
||||||
|
if line:
|
||||||
|
outfile.write(on + line + off)
|
||||||
|
outfile.write('\n')
|
||||||
|
if spl[-1]:
|
||||||
|
outfile.write(on + spl[-1] + off)
|
||||||
|
|
||||||
|
not_found = False
|
||||||
|
#outfile.write( '#' + str(ttype) + '#' )
|
||||||
|
|
||||||
|
except KeyError:
|
||||||
|
#ottype = ttype
|
||||||
|
ttype = ttype[:-1]
|
||||||
|
#outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
|
||||||
|
|
||||||
|
if not_found:
|
||||||
|
outfile.write(value)
|
1645
packages/wakatime/wakatime/packages/pygments3/lexers/_asybuiltins.py
Normal file
1645
packages/wakatime/wakatime/packages/pygments3/lexers/_asybuiltins.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,232 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
pygments.lexers._clbuiltins
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
ANSI Common Lisp builtins.
|
||||||
|
|
||||||
|
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||||
|
:license: BSD, see LICENSE for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
BUILTIN_FUNCTIONS = [ # 638 functions
|
||||||
|
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
|
||||||
|
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
|
||||||
|
'adjustable-array-p', 'adjust-array', 'allocate-instance',
|
||||||
|
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
|
||||||
|
'apropos-list', 'aref', 'arithmetic-error-operands',
|
||||||
|
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
|
||||||
|
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
|
||||||
|
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
|
||||||
|
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
|
||||||
|
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
|
||||||
|
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
|
||||||
|
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
|
||||||
|
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
|
||||||
|
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
|
||||||
|
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
|
||||||
|
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
|
||||||
|
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
|
||||||
|
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
|
||||||
|
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
|
||||||
|
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
|
||||||
|
'characterp', 'char-code', 'char-downcase', 'char-equal',
|
||||||
|
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
|
||||||
|
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
|
||||||
|
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
|
||||||
|
'close', 'clrhash', 'code-char', 'coerce', 'compile',
|
||||||
|
'compiled-function-p', 'compile-file', 'compile-file-pathname',
|
||||||
|
'compiler-macro-function', 'complement', 'complex', 'complexp',
|
||||||
|
'compute-applicable-methods', 'compute-restarts', 'concatenate',
|
||||||
|
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
|
||||||
|
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
|
||||||
|
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
|
||||||
|
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
|
||||||
|
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
|
||||||
|
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
|
||||||
|
'delete-package', 'denominator', 'deposit-field', 'describe',
|
||||||
|
'describe-object', 'digit-char', 'digit-char-p', 'directory',
|
||||||
|
'directory-namestring', 'disassemble', 'documentation', 'dpb',
|
||||||
|
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
|
||||||
|
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
|
||||||
|
'enough-namestring', 'ensure-directories-exist',
|
||||||
|
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
|
||||||
|
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
|
||||||
|
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
|
||||||
|
'file-error-pathname', 'file-length', 'file-namestring',
|
||||||
|
'file-position', 'file-string-length', 'file-write-date',
|
||||||
|
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
|
||||||
|
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
|
||||||
|
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
|
||||||
|
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
|
||||||
|
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
|
||||||
|
'fround', 'ftruncate', 'funcall', 'function-keywords',
|
||||||
|
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
|
||||||
|
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
|
||||||
|
'gethash', 'get-internal-real-time', 'get-internal-run-time',
|
||||||
|
'get-macro-character', 'get-output-stream-string', 'get-properties',
|
||||||
|
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
|
||||||
|
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
|
||||||
|
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
|
||||||
|
'host-namestring', 'identity', 'imagpart', 'import',
|
||||||
|
'initialize-instance', 'input-stream-p', 'inspect',
|
||||||
|
'integer-decode-float', 'integer-length', 'integerp',
|
||||||
|
'interactive-stream-p', 'intern', 'intersection',
|
||||||
|
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
|
||||||
|
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
|
||||||
|
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
|
||||||
|
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
|
||||||
|
'listen', 'list-length', 'listp', 'load',
|
||||||
|
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
|
||||||
|
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
|
||||||
|
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
|
||||||
|
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
|
||||||
|
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
|
||||||
|
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
|
||||||
|
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
|
||||||
|
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
|
||||||
|
'make-instance', 'make-instances-obsolete', 'make-list',
|
||||||
|
'make-load-form', 'make-load-form-saving-slots', 'make-package',
|
||||||
|
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
|
||||||
|
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
|
||||||
|
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
|
||||||
|
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
|
||||||
|
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
|
||||||
|
'merge', 'merge-pathnames', 'method-combination-error',
|
||||||
|
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
|
||||||
|
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
|
||||||
|
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
|
||||||
|
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
|
||||||
|
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
|
||||||
|
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
|
||||||
|
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
|
||||||
|
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
|
||||||
|
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
|
||||||
|
'package-name', 'package-nicknames', 'packagep',
|
||||||
|
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
|
||||||
|
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
|
||||||
|
'pathname-device', 'pathname-directory', 'pathname-host',
|
||||||
|
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
|
||||||
|
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
|
||||||
|
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
|
||||||
|
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
|
||||||
|
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
|
||||||
|
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
|
||||||
|
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
|
||||||
|
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
|
||||||
|
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
|
||||||
|
'read-from-string', 'read-line', 'read-preserving-whitespace',
|
||||||
|
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
|
||||||
|
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
|
||||||
|
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
|
||||||
|
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
|
||||||
|
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
|
||||||
|
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
|
||||||
|
'search', 'second', 'set', 'set-difference',
|
||||||
|
'set-dispatch-macro-character', 'set-exclusive-or',
|
||||||
|
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
|
||||||
|
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
|
||||||
|
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
|
||||||
|
'simple-condition-format-arguments', 'simple-condition-format-control',
|
||||||
|
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
|
||||||
|
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
|
||||||
|
'slot-unbound', 'slot-value', 'software-type', 'software-version',
|
||||||
|
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
|
||||||
|
'standard-char-p', 'store-value', 'stream-element-type',
|
||||||
|
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
|
||||||
|
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
|
||||||
|
'string-capitalize', 'string-downcase', 'string-equal',
|
||||||
|
'string-greaterp', 'string-left-trim', 'string-lessp',
|
||||||
|
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
|
||||||
|
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
|
||||||
|
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
|
||||||
|
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
|
||||||
|
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
|
||||||
|
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
|
||||||
|
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
|
||||||
|
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
|
||||||
|
'truename', 'truncate', 'two-way-stream-input-stream',
|
||||||
|
'two-way-stream-output-stream', 'type-error-datum',
|
||||||
|
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
|
||||||
|
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
|
||||||
|
'update-instance-for-different-class',
|
||||||
|
'update-instance-for-redefined-class', 'upgraded-array-element-type',
|
||||||
|
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
|
||||||
|
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
|
||||||
|
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
|
||||||
|
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
|
||||||
|
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
|
||||||
|
'y-or-n-p', 'zerop',
|
||||||
|
]
|
||||||
|
|
||||||
|
SPECIAL_FORMS = [
|
||||||
|
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
|
||||||
|
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
|
||||||
|
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
|
||||||
|
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
|
||||||
|
'unwind-protect',
|
||||||
|
]
|
||||||
|
|
||||||
|
MACROS = [
|
||||||
|
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
|
||||||
|
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
|
||||||
|
'define-compiler-macro', 'define-condition', 'define-method-combination',
|
||||||
|
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
|
||||||
|
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
|
||||||
|
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
|
||||||
|
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
|
||||||
|
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
|
||||||
|
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
|
||||||
|
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
|
||||||
|
'multiple-value-setq', 'nth-value', 'or', 'pop',
|
||||||
|
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
|
||||||
|
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
|
||||||
|
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
|
||||||
|
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
|
||||||
|
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
|
||||||
|
'with-condition-restarts', 'with-hash-table-iterator',
|
||||||
|
'with-input-from-string', 'with-open-file', 'with-open-stream',
|
||||||
|
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
|
||||||
|
'with-slots', 'with-standard-io-syntax',
|
||||||
|
]
|
||||||
|
|
||||||
|
LAMBDA_LIST_KEYWORDS = [
|
||||||
|
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
|
||||||
|
'&rest', '&whole',
|
||||||
|
]
|
||||||
|
|
||||||
|
DECLARATIONS = [
|
||||||
|
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
|
||||||
|
'ignorable', 'notinline', 'type',
|
||||||
|
]
|
||||||
|
|
||||||
|
BUILTIN_TYPES = [
|
||||||
|
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
|
||||||
|
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
|
||||||
|
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
|
||||||
|
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
|
||||||
|
'simple-vector', 'standard-char', 'unsigned-byte',
|
||||||
|
|
||||||
|
# Condition Types
|
||||||
|
'arithmetic-error', 'cell-error', 'condition', 'control-error',
|
||||||
|
'division-by-zero', 'end-of-file', 'error', 'file-error',
|
||||||
|
'floating-point-inexact', 'floating-point-overflow',
|
||||||
|
'floating-point-underflow', 'floating-point-invalid-operation',
|
||||||
|
'parse-error', 'package-error', 'print-not-readable', 'program-error',
|
||||||
|
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
|
||||||
|
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
|
||||||
|
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
|
||||||
|
'undefined-function', 'warning',
|
||||||
|
]
|
||||||
|
|
||||||
|
BUILTIN_CLASSES = [
|
||||||
|
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
|
||||||
|
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
|
||||||
|
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
|
||||||
|
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
|
||||||
|
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
|
||||||
|
'real', 'random-state', 'restart', 'sequence', 'standard-class',
|
||||||
|
'standard-generic-function', 'standard-method', 'standard-object',
|
||||||
|
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
|
||||||
|
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
|
||||||
|
]
|
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue