rana-cli/wakatime/packages/pygments/lexers/templates.py

2177 lines
68 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
"""
pygments.lexers.templates
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for various template engines' markup.
2016-06-13 14:41:17 +00:00
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
2014-12-01 06:10:30 +00:00
from pygments.lexers.html import HtmlLexer, XmlLexer
from pygments.lexers.javascript import JavascriptLexer, LassoLexer
from pygments.lexers.css import CssLexer
from pygments.lexers.php import PhpLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.perl import PerlLexer
from pygments.lexers.jvm import JavaLexer, TeaLangLexer
from pygments.lexers.data import YamlLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
2014-12-01 06:10:30 +00:00
include, using, this, default, combined
from pygments.token import Error, Punctuation, Whitespace, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
from pygments.util import html_doctype_matches, looks_like_xml
__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
2014-12-01 06:10:30 +00:00
'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
'TwigLexer', 'TwigHtmlLexer']
class ErbLexer(Lexer):
"""
Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
lexer.
Just highlights ruby code between the preprocessor directives, other data
is left untouched by the lexer.
All options are also forwarded to the `RubyLexer`.
"""
name = 'ERB'
aliases = ['erb']
mimetypes = ['application/x-ruby-templating']
_block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
def __init__(self, **options):
2014-12-01 06:10:30 +00:00
from pygments.lexers.ruby import RubyLexer
self.ruby_lexer = RubyLexer(**options)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
2014-12-01 06:10:30 +00:00
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
2014-12-01 06:10:30 +00:00
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
def analyse_text(text):
if '<%' in text and '%>' in text:
return 0.4
class SmartyLexer(RegexLexer):
"""
Generic `Smarty <http://smarty.php.net/>`_ template lexer.
Just highlights smarty code between the preprocessor directives, other
data is left untouched by the lexer.
"""
name = 'Smarty'
aliases = ['smarty']
filenames = ['*.tpl']
mimetypes = ['application/x-smarty']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'[^{]+', Other),
(r'(\{)(\*.*?\*)(\})',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(\{php\})(.*?)(\{/php\})',
bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
Comment.Preproc)),
2014-12-01 06:10:30 +00:00
(r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
(r'\{', Comment.Preproc, 'smarty')
],
'smarty': [
(r'\s+', Text),
2014-12-01 06:10:30 +00:00
(r'\{', Comment.Preproc, '#push'),
(r'\}', Comment.Preproc, '#pop'),
2014-12-01 06:10:30 +00:00
(r'#[a-zA-Z_]\w*#', Name.Variable),
(r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
(r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
(r'(true|false|null)\b', Keyword.Constant),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
2014-12-01 06:10:30 +00:00
(r'[a-zA-Z_]\w*', Name.Attribute)
]
}
def analyse_text(text):
rv = 0.0
if re.search('\{if\s+.*?\}.*?\{/if\}', text):
rv += 0.15
if re.search('\{include\s+file=.*?\}', text):
rv += 0.15
if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
rv += 0.15
if re.search('\{\$.*?\}', text):
rv += 0.01
return rv
class VelocityLexer(RegexLexer):
"""
Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
Just highlights velocity directives and variable references, other
data is left untouched by the lexer.
"""
name = 'Velocity'
aliases = ['velocity']
2014-12-01 06:10:30 +00:00
filenames = ['*.vm', '*.fhtml']
flags = re.MULTILINE | re.DOTALL
2014-12-01 06:10:30 +00:00
identifier = r'[a-zA-Z_]\w*'
tokens = {
'root': [
(r'[^{#$]+', Other),
(r'(#)(\*.*?\*)(#)',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(##)(.*?$)',
bygroups(Comment.Preproc, Comment)),
(r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
'directiveparams'),
(r'(#\{?)(' + identifier + r')(\}|\b)',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
(r'\$\{?', Punctuation, 'variable')
],
'variable': [
(identifier, Name.Variable),
(r'\(', Punctuation, 'funcparams'),
(r'(\.)(' + identifier + r')',
bygroups(Punctuation, Name.Variable), '#push'),
(r'\}', Punctuation, '#pop'),
2014-12-01 06:10:30 +00:00
default('#pop')
],
'directiveparams': [
2014-12-01 06:10:30 +00:00
(r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
Operator),
(r'\[', Operator, 'rangeoperator'),
(r'\b' + identifier + r'\b', Name.Function),
include('funcparams')
],
'rangeoperator': [
(r'\.\.', Operator),
include('funcparams'),
(r'\]', Operator, '#pop')
],
'funcparams': [
(r'\$\{?', Punctuation, 'variable'),
(r'\s+', Text),
2016-06-13 14:41:17 +00:00
(r'[,:]', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"\b[0-9]+\b", Number),
(r'(true|false|null)\b', Keyword.Constant),
(r'\(', Punctuation, '#push'),
2014-12-01 06:10:30 +00:00
(r'\)', Punctuation, '#pop'),
2016-06-13 14:41:17 +00:00
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
2014-12-01 06:10:30 +00:00
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
rv += 0.25
if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
2014-12-01 06:10:30 +00:00
if re.search(r'\$\{?[a-zA-Z_]\w*(\([^)]*\))?'
r'(\.\w+(\([^)]*\))?)*\}?', text):
rv += 0.01
return rv
class VelocityHtmlLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `VelocityLexer` that highlights unlexed data
with the `HtmlLexer`.
"""
name = 'HTML+Velocity'
aliases = ['html+velocity']
2014-12-01 06:10:30 +00:00
alias_filenames = ['*.html', '*.fhtml']
mimetypes = ['text/html+velocity']
def __init__(self, **options):
super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
2014-12-01 06:10:30 +00:00
**options)
class VelocityXmlLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `VelocityLexer` that highlights unlexed data
with the `XmlLexer`.
"""
name = 'XML+Velocity'
aliases = ['xml+velocity']
2014-12-01 06:10:30 +00:00
alias_filenames = ['*.xml', '*.vm']
mimetypes = ['application/xml+velocity']
def __init__(self, **options):
super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer,
**options)
def analyse_text(text):
rv = VelocityLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
2014-12-01 06:10:30 +00:00
rv += 0.4
return rv
class DjangoLexer(RegexLexer):
"""
Generic `django <http://www.djangoproject.com/documentation/templates/>`_
and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
It just highlights django/jinja code between the preprocessor directives,
other data is left untouched by the lexer.
"""
name = 'Django/Jinja'
aliases = ['django', 'jinja']
mimetypes = ['application/x-django-templating', 'application/x-jinja']
flags = re.M | re.S
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# jinja/django comments
(r'\{[*#].*?[*#]\}', Comment),
# django comments
(r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Comment, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# raw jinja blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Text, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
2014-12-01 06:10:30 +00:00
(r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'block'),
2014-12-01 06:10:30 +00:00
(r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword), 'block'),
(r'\{', Other)
],
'varnames': [
2014-12-01 06:10:30 +00:00
(r'(\|)(\s*)([a-zA-Z_]\w*)',
bygroups(Operator, Text, Name.Function)),
2014-12-01 06:10:30 +00:00
(r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
(r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
Keyword),
(r'(loop|block|super|forloop)\b', Name.Builtin),
2016-06-13 14:41:17 +00:00
(r'[a-zA-Z_][\w-]*', Name.Variable),
2014-12-01 06:10:30 +00:00
(r'\.\w+', Name.Variable),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'block': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation)
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'\{%\s*(block|extends)', text) is not None:
rv += 0.4
if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
rv += 0.1
if re.search(r'\{\{.*?\}\}', text) is not None:
rv += 0.1
return rv
class MyghtyLexer(RegexLexer):
"""
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.6
.. _myghty templates: http://www.myghty.org/
"""
name = 'Myghty'
aliases = ['myghty']
filenames = ['*.myt', 'autodelegate']
mimetypes = ['application/x-myghty']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PythonLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PythonLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(Other, Operator)),
]
}
class MyghtyHtmlLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `HtmlLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.6
"""
name = 'HTML+Myghty'
aliases = ['html+myghty']
mimetypes = ['text/html+myghty']
def __init__(self, **options):
super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
**options)
class MyghtyXmlLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `XmlLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.6
"""
name = 'XML+Myghty'
aliases = ['xml+myghty']
mimetypes = ['application/xml+myghty']
def __init__(self, **options):
super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
**options)
class MyghtyJavascriptLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `JavascriptLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.6
"""
name = 'JavaScript+Myghty'
aliases = ['js+myghty', 'javascript+myghty']
mimetypes = ['application/x-javascript+myghty',
'text/x-javascript+myghty',
'text/javascript+mygthy']
def __init__(self, **options):
super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
MyghtyLexer, **options)
class MyghtyCssLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `CssLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.6
"""
name = 'CSS+Myghty'
aliases = ['css+myghty']
mimetypes = ['text/css+myghty']
def __init__(self, **options):
super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
**options)
class MasonLexer(RegexLexer):
"""
Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't
Mason markup is HTML.
.. _mason templates: http://www.masonhq.com/
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.4
"""
name = 'Mason'
aliases = ['mason']
filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
mimetypes = ['application/x-mason']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%doc>)(.*?)(</%doc>)(?s)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PerlLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PerlLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(using(HtmlLexer), Operator)),
]
}
def analyse_text(text):
2016-06-13 14:41:17 +00:00
result = 0.0
if re.search(r'</%(class|doc|init)%>', text) is not None:
result = 1.0
elif re.search(r'<&.+&>', text, re.DOTALL) is not None:
result = 0.11
return result
class MakoLexer(RegexLexer):
"""
Generic `mako templates`_ lexer. Code that isn't Mako
markup is yielded as `Token.Other`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.7
.. _mako templates: http://www.makotemplates.org/
"""
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
mimetypes = ['application/x-mako']
tokens = {
'root': [
(r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(%)([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
2014-12-01 06:10:30 +00:00
(r'(<%)([\w.:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
2014-12-01 06:10:30 +00:00
(r'(</%)([\w.:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
2014-12-01 06:10:30 +00:00
(r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)(\s*)(".*?")',
bygroups(Name.Attribute, Text, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `HtmlLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.7
"""
name = 'HTML+Mako'
aliases = ['html+mako']
mimetypes = ['text/html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
2014-12-01 06:10:30 +00:00
**options)
class MakoXmlLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `MakoLexer` that highlights unlexed data
with the `XmlLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.7
"""
name = 'XML+Mako'
aliases = ['xml+mako']
mimetypes = ['application/xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
2014-12-01 06:10:30 +00:00
**options)
class MakoJavascriptLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `MakoLexer` that highlights unlexed data
with the `JavascriptLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.7
"""
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
mimetypes = ['application/x-javascript+mako',
'text/x-javascript+mako',
'text/javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
2014-12-01 06:10:30 +00:00
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `MakoLexer` that highlights unlexed data
with the `CssLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.7
"""
name = 'CSS+Mako'
aliases = ['css+mako']
mimetypes = ['text/css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
2014-12-01 06:10:30 +00:00
**options)
# Genshi and Cheetah lexers courtesy of Matt Good.
class CheetahPythonLexer(Lexer):
"""
Lexer for handling Cheetah's special $ tokens in Python syntax.
"""
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
if type_ == Token.Error and value == '$':
type_ = Comment.Preproc
yield pos, type_, value
class CheetahLexer(RegexLexer):
"""
Generic `cheetah templates`_ lexer. Code that isn't Cheetah
markup is yielded as `Token.Other`. This also works for
`spitfire templates`_ which use the same syntax.
.. _cheetah templates: http://www.cheetahtemplate.org/
.. _spitfire templates: http://code.google.com/p/spitfire/
"""
name = 'Cheetah'
aliases = ['cheetah', 'spitfire']
filenames = ['*.tmpl', '*.spt']
mimetypes = ['application/x-cheetah', 'application/x-spitfire']
tokens = {
'root': [
(r'(##[^\n]*)$',
(bygroups(Comment))),
(r'#[*](.|\n)*?[*]#', Comment),
(r'#end[^#\n]*(?:#|$)', Comment.Preproc),
(r'#slurp$', Comment.Preproc),
(r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
(bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc))),
# TODO support other Python syntax like $foo['bar']
2014-12-01 06:10:30 +00:00
(r'(\$)([a-zA-Z_][\w.]*\w)',
bygroups(Comment.Preproc, using(CheetahPythonLexer))),
(r'(\$\{!?)(.*?)(\})(?s)',
bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
2014-12-01 06:10:30 +00:00
(?=\#[#a-zA-Z]*) | # an eval comment
(?=\$[a-zA-Z_{]) | # a substitution
\Z # end of string
)
''', Other),
(r'\s+', Text),
],
}
class CheetahHtmlLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `CheetahLexer` that highlights unlexed data
with the `HtmlLexer`.
"""
name = 'HTML+Cheetah'
aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
mimetypes = ['text/html+cheetah', 'text/html+spitfire']
def __init__(self, **options):
super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
**options)
class CheetahXmlLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `CheetahLexer` that highlights unlexed data
with the `XmlLexer`.
"""
name = 'XML+Cheetah'
aliases = ['xml+cheetah', 'xml+spitfire']
mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
def __init__(self, **options):
super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
**options)
class CheetahJavascriptLexer(DelegatingLexer):
"""
2014-12-01 06:10:30 +00:00
Subclass of the `CheetahLexer` that highlights unlexed data
with the `JavascriptLexer`.
"""
name = 'JavaScript+Cheetah'
aliases = ['js+cheetah', 'javascript+cheetah',
'js+spitfire', 'javascript+spitfire']
mimetypes = ['application/x-javascript+cheetah',
'text/x-javascript+cheetah',
'text/javascript+cheetah',
'application/x-javascript+spitfire',
'text/x-javascript+spitfire',
'text/javascript+spitfire']
def __init__(self, **options):
super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
CheetahLexer, **options)
class GenshiTextLexer(RegexLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
templates.
"""
name = 'Genshi Text'
aliases = ['genshitext']
mimetypes = ['application/x-genshi-text', 'text/x-genshi']
tokens = {
'root': [
2014-12-01 06:10:30 +00:00
(r'[^#$\s]+', Other),
(r'^(\s*)(##.*)$', bygroups(Text, Comment)),
(r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
include('variable'),
2014-12-01 06:10:30 +00:00
(r'[#$\s]', Other),
],
'directive': [
(r'\n', Text, '#pop'),
(r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
(r'(choose|when|with)([^\S\n]+)(.*)',
bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
(r'(choose|otherwise)\b', Keyword, '#pop'),
(r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
2014-12-01 06:10:30 +00:00
(r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
Name.Variable),
]
}
class GenshiMarkupLexer(RegexLexer):
"""
Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
`GenshiLexer`.
"""
flags = re.DOTALL
tokens = {
'root': [
2014-12-01 06:10:30 +00:00
(r'[^<$]+', Other),
(r'(<\?python)(.*?)(\?>)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
# yield style and script blocks as Other
(r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
(r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
2016-06-13 14:41:17 +00:00
(r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
include('variable'),
2014-12-01 06:10:30 +00:00
(r'[<$]', Other),
],
'pytag': [
(r'\s+', Text),
2014-12-01 06:10:30 +00:00
(r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'pyattr': [
('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
(r'[^\s>]+', String, '#pop'),
],
'tag': [
(r'\s+', Text),
2014-12-01 06:10:30 +00:00
(r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
(r'[\w:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('"', String, 'attr-dstring'),
("'", String, 'attr-sstring'),
(r'[^\s>]*', String, '#pop')
],
'attr-dstring': [
('"', String, '#pop'),
include('strings'),
("'", String)
],
'attr-sstring': [
("'", String, '#pop'),
include('strings'),
("'", String)
],
'strings': [
('[^"\'$]+', String),
include('variable')
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
2014-12-01 06:10:30 +00:00
(r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
Name.Variable),
]
}
class HtmlGenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid HTML templates.
"""
name = 'HTML+Genshi'
aliases = ['html+genshi', 'html+kid']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+genshi']
def __init__(self, **options):
super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
**options)
def analyse_text(text):
rv = 0.0
if re.search('\$\{.*?\}', text) is not None:
rv += 0.2
if re.search('py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + HtmlLexer.analyse_text(text) - 0.01
class GenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid XML templates.
"""
name = 'Genshi'
aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
filenames = ['*.kid']
alias_filenames = ['*.xml']
mimetypes = ['application/x-genshi', 'application/x-kid']
def __init__(self, **options):
super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
**options)
def analyse_text(text):
rv = 0.0
if re.search('\$\{.*?\}', text) is not None:
rv += 0.2
if re.search('py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + XmlLexer.analyse_text(text) - 0.01
class JavascriptGenshiLexer(DelegatingLexer):
"""
A lexer that highlights javascript code in genshi text templates.
"""
name = 'JavaScript+Genshi Text'
aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
'javascript+genshi']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+genshi',
'text/x-javascript+genshi',
'text/javascript+genshi']
def __init__(self, **options):
super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
GenshiTextLexer,
**options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class CssGenshiLexer(DelegatingLexer):
"""
A lexer that highlights CSS definitions in genshi text templates.
"""
name = 'CSS+Genshi Text'
aliases = ['css+genshitext', 'css+genshi']
alias_filenames = ['*.css']
mimetypes = ['text/css+genshi']
def __init__(self, **options):
super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
**options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class RhtmlLexer(DelegatingLexer):
"""
Subclass of the ERB lexer that highlights the unlexed data with the
html lexer.
Nested Javascript and CSS is highlighted too.
"""
name = 'RHTML'
aliases = ['rhtml', 'html+erb', 'html+ruby']
filenames = ['*.rhtml']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+ruby']
def __init__(self, **options):
super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
# one more than the XmlErbLexer returns
rv += 0.5
return rv
class XmlErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights data outside preprocessor
directives with the `XmlLexer`.
"""
name = 'XML+Ruby'
aliases = ['xml+erb', 'xml+ruby']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+ruby']
def __init__(self, **options):
super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
"""
name = 'CSS+Ruby'
aliases = ['css+erb', 'css+ruby']
alias_filenames = ['*.css']
mimetypes = ['text/css+ruby']
def __init__(self, **options):
super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class JavascriptErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Ruby'
aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+ruby',
'text/x-javascript+ruby',
'text/javascript+ruby']
def __init__(self, **options):
super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
**options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class HtmlPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+PHP'
aliases = ['html+php']
filenames = ['*.phtml']
alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
'*.php[345]']
mimetypes = ['application/x-php',
'application/x-httpd-php', 'application/x-httpd-php3',
'application/x-httpd-php4', 'application/x-httpd-php5']
def __init__(self, **options):
super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlPhpLexer(DelegatingLexer):
"""
2016-06-13 14:41:17 +00:00
Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`.
"""
name = 'XML+PHP'
aliases = ['xml+php']
alias_filenames = ['*.xml', '*.php', '*.php[345]']
mimetypes = ['application/xml+php']
def __init__(self, **options):
super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
"""
name = 'CSS+PHP'
aliases = ['css+php']
alias_filenames = ['*.css']
mimetypes = ['text/css+php']
def __init__(self, **options):
super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
def analyse_text(text):
return PhpLexer.analyse_text(text) - 0.05
class JavascriptPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the
`JavascriptLexer`.
"""
name = 'JavaScript+PHP'
aliases = ['js+php', 'javascript+php']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+php',
'text/x-javascript+php',
'text/javascript+php']
def __init__(self, **options):
super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
**options)
def analyse_text(text):
return PhpLexer.analyse_text(text)
class HtmlSmartyLexer(DelegatingLexer):
"""
2016-06-13 14:41:17 +00:00
Subclass of the `SmartyLexer` that highlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Smarty'
aliases = ['html+smarty']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
mimetypes = ['text/html+smarty']
def __init__(self, **options):
super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Smarty'
aliases = ['xml+smarty']
alias_filenames = ['*.xml', '*.tpl']
mimetypes = ['application/xml+smarty']
def __init__(self, **options):
super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Smarty'
aliases = ['css+smarty']
alias_filenames = ['*.css', '*.tpl']
mimetypes = ['text/css+smarty']
def __init__(self, **options):
super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class JavascriptSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Smarty'
aliases = ['js+smarty', 'javascript+smarty']
alias_filenames = ['*.js', '*.tpl']
mimetypes = ['application/x-javascript+smarty',
'text/x-javascript+smarty',
'text/javascript+smarty']
def __init__(self, **options):
super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
**options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class HtmlDjangoLexer(DelegatingLexer):
"""
2016-06-13 14:41:17 +00:00
Subclass of the `DjangoLexer` that highlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Django/Jinja'
aliases = ['html+django', 'html+jinja', 'htmldjango']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+django', 'text/html+jinja']
def __init__(self, **options):
super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Django/Jinja'
aliases = ['xml+django', 'xml+jinja']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+django', 'application/xml+jinja']
def __init__(self, **options):
super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Django/Jinja'
aliases = ['css+django', 'css+jinja']
alias_filenames = ['*.css']
mimetypes = ['text/css+django', 'text/css+jinja']
def __init__(self, **options):
super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JavascriptDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Django/Jinja'
aliases = ['js+django', 'javascript+django',
'js+jinja', 'javascript+jinja']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+django',
'application/x-javascript+jinja',
'text/x-javascript+django',
'text/x-javascript+jinja',
'text/javascript+django',
'text/javascript+jinja']
def __init__(self, **options):
super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
**options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JspRootLexer(RegexLexer):
"""
Base for the `JspLexer`. Yields `Token.Other` for area outside of
JSP tags.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.7
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
# FIXME: I want to make these keywords but still parse attributes.
(r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
Keyword),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
],
}
class JspLexer(DelegatingLexer):
"""
Lexer for Java Server Pages.
2014-12-01 06:10:30 +00:00
.. versionadded:: 0.7
"""
name = 'Java Server Page'
aliases = ['jsp']
filenames = ['*.jsp']
mimetypes = ['application/x-jsp']
def __init__(self, **options):
super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = JavaLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class EvoqueLexer(RegexLexer):
"""
For files using the Evoque templating system.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.1
"""
name = 'Evoque'
aliases = ['evoque']
filenames = ['*.evoque']
mimetypes = ['application/x-evoque']
flags = re.DOTALL
tokens = {
'root': [
(r'[^#$]+', Other),
(r'#\[', Comment.Multiline, 'comment'),
(r'\$\$', Other),
# svn keywords
(r'\$\w+:[^$\n]*\$', Comment.Multiline),
# directives: begin, end
(r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, Punctuation)),
# directives: evoque, overlay
# see doc for handling first name arg: /directives/evoque/
2014-12-01 06:10:30 +00:00
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
# should be using(PythonLexer), not passed out as String
(r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
r'(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, using(PythonLexer), Punctuation)),
# directives: if, for, prefer, test
(r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
using(PythonLexer), Punctuation)),
# directive clauses (no {} expression)
(r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
# expressions
(r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
bygroups(Punctuation, None, using(PythonLexer),
Name.Builtin, None, None, Punctuation)),
(r'#', Other),
],
'comment': [
(r'[^\]#]', Comment.Multiline),
(r'#\[', Comment.Multiline, '#push'),
(r'\]#', Comment.Multiline, '#pop'),
(r'[\]#]', Comment.Multiline)
],
}
2014-12-01 06:10:30 +00:00
class EvoqueHtmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`HtmlLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.1
"""
name = 'HTML+Evoque'
aliases = ['html+evoque']
filenames = ['*.html']
mimetypes = ['text/html+evoque']
def __init__(self, **options):
super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
**options)
2014-12-01 06:10:30 +00:00
class EvoqueXmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`XmlLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.1
"""
name = 'XML+Evoque'
aliases = ['xml+evoque']
filenames = ['*.xml']
mimetypes = ['application/xml+evoque']
def __init__(self, **options):
super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
**options)
2014-12-01 06:10:30 +00:00
class ColdfusionLexer(RegexLexer):
"""
Coldfusion statements
"""
name = 'cfstatement'
aliases = ['cfs']
filenames = []
mimetypes = []
2014-12-01 06:10:30 +00:00
flags = re.IGNORECASE
tokens = {
'root': [
2014-12-01 06:10:30 +00:00
(r'//.*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'\+\+|--', Operator),
(r'[-+*/^&=!]', Operator),
2014-12-01 06:10:30 +00:00
(r'<=|>=|<|>|==', Operator),
(r'mod\b', Operator),
(r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
(r'\|\||&&', Operator),
2014-12-01 06:10:30 +00:00
(r'\?', Operator),
(r'"', String.Double, 'string'),
# There is a special rule for allowing html in single quoted
# strings, evidently.
(r"'.*?'", String.Single),
(r'\d+', Number),
2014-12-01 06:10:30 +00:00
(r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'(application|session|client|cookie|super|this|variables|arguments)\b',
Name.Constant),
(r'([a-z_$][\w.]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
2014-12-01 06:10:30 +00:00
(r'[a-z_$][\w.]*', Name.Variable),
(r'[()\[\]{};:,.\\]', Punctuation),
(r'\s+', Text),
],
'string': [
(r'""', String.Double),
(r'#.+?#', String.Interp),
(r'[^"#]+', String.Double),
(r'#', String.Double),
(r'"', String.Double, '#pop'),
],
}
class ColdfusionMarkupLexer(RegexLexer):
"""
Coldfusion markup only
"""
name = 'Coldfusion'
aliases = ['cf']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'[^<]+', Other),
include('tags'),
(r'<[^<>]*', Other),
],
'tags': [
2014-12-01 06:10:30 +00:00
(r'<!---', Comment.Multiline, 'cfcomment'),
(r'(?s)<!--.*?-->', Comment),
(r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
(r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
# negative lookbehind is for strings with embedded >
(r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
r'mailpart|mail|header|content|zip|image|lock|argument|try|'
r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
],
'cfoutput': [
(r'[^#<]+', Other),
(r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
Punctuation)),
2014-12-01 06:10:30 +00:00
# (r'<cfoutput.*?>', Name.Builtin, '#push'),
(r'</cfoutput.*?>', Name.Builtin, '#pop'),
include('tags'),
(r'(?s)<[^<>]*', Other),
(r'#', Other),
],
2014-12-01 06:10:30 +00:00
'cfcomment': [
(r'<!---', Comment.Multiline, '#push'),
(r'--->', Comment.Multiline, '#pop'),
(r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
],
}
class ColdfusionHtmlLexer(DelegatingLexer):
"""
Coldfusion markup in html
"""
name = 'Coldfusion HTML'
aliases = ['cfm']
2014-12-01 06:10:30 +00:00
filenames = ['*.cfm', '*.cfml']
mimetypes = ['application/x-coldfusion']
def __init__(self, **options):
super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,
**options)
2014-12-01 06:10:30 +00:00
class ColdfusionCFCLexer(DelegatingLexer):
"""
Coldfusion markup/script components
.. versionadded:: 2.0
"""
name = 'Coldfusion CFC'
aliases = ['cfc']
filenames = ['*.cfc']
mimetypes = []
def __init__(self, **options):
super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer,
**options)
class SspLexer(DelegatingLexer):
"""
Lexer for Scalate Server Pages.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.4
"""
name = 'Scalate Server Page'
aliases = ['ssp']
filenames = ['*.ssp']
mimetypes = ['application/x-ssp']
def __init__(self, **options):
super(SspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = 0.0
if re.search('val \w+\s*:', text):
rv += 0.6
if looks_like_xml(text):
rv += 0.2
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class TeaTemplateRootLexer(RegexLexer):
"""
Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
code blocks.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.5
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
(r'[^<]+', Other),
(r'<', Other),
2014-12-01 06:10:30 +00:00
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
2014-12-01 06:10:30 +00:00
],
}
class TeaTemplateLexer(DelegatingLexer):
"""
Lexer for `Tea Templates <http://teatrove.org/>`_.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.5
"""
name = 'Tea'
aliases = ['tea']
filenames = ['*.tea']
mimetypes = ['text/x-tea']
def __init__(self, **options):
super(TeaTemplateLexer, self).__init__(XmlLexer,
TeaTemplateRootLexer, **options)
def analyse_text(text):
rv = TeaLangLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class LassoHtmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`HtmlLexer`.
Nested JavaScript and CSS is also highlighted.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.6
"""
name = 'HTML+Lasso'
aliases = ['html+lasso']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['text/html+lasso',
'application/x-httpd-lasso',
'application/x-httpd-lasso[89]']
def __init__(self, **options):
super(LassoHtmlLexer, self).__init__(HtmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
2014-12-01 06:10:30 +00:00
if html_doctype_matches(text): # same as HTML lexer
rv += 0.5
return rv
class LassoXmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`XmlLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.6
"""
name = 'XML+Lasso'
aliases = ['xml+lasso']
alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['application/xml+lasso']
def __init__(self, **options):
super(LassoXmlLexer, self).__init__(XmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class LassoCssLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`CssLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.6
"""
name = 'CSS+Lasso'
aliases = ['css+lasso']
alias_filenames = ['*.css']
mimetypes = ['text/css+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super(LassoCssLexer, self).__init__(CssLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
if re.search(r'\w+:.+?;', text):
rv += 0.1
if 'padding:' in text:
rv += 0.1
return rv
class LassoJavascriptLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`JavascriptLexer`.
2014-12-01 06:10:30 +00:00
.. versionadded:: 1.6
"""
name = 'JavaScript+Lasso'
aliases = ['js+lasso', 'javascript+lasso']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+lasso',
'text/x-javascript+lasso',
'text/javascript+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super(LassoJavascriptLexer, self).__init__(JavascriptLexer, LassoLexer,
**options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
return rv
2014-12-01 06:10:30 +00:00
class HandlebarsLexer(RegexLexer):
"""
Generic `handlebars <http://handlebarsjs.com/>` template lexer.
Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
Everything else is left for a delegating lexer.
.. versionadded:: 2.0
"""
name = "Handlebars"
aliases = ['handlebars']
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{!.*\}\}', Comment),
(r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
(r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
],
'tag': [
(r'\s+', Text),
(r'\}\}\}', Comment.Special, '#pop'),
(r'\}\}', Comment.Preproc, '#pop'),
# Handlebars
(r'([#/]*)(each|if|unless|else|with|log|in)', bygroups(Keyword,
Keyword)),
# General {{#block}}
(r'([#/])([\w-]+)', bygroups(Name.Function, Name.Function)),
# {{opt=something}}
(r'([\w-]+)(=)', bygroups(Name.Attribute, Operator)),
# borrowed from DjangoLexer
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z][\w-]*', Name.Variable),
(r'\.[\w-]+', Name.Variable),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
]
}
class HandlebarsHtmlLexer(DelegatingLexer):
"""
Subclass of the `HandlebarsLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML+Handlebars"
aliases = ["html+handlebars"]
filenames = ['*.handlebars', '*.hbs']
mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
def __init__(self, **options):
super(HandlebarsHtmlLexer, self).__init__(HtmlLexer, HandlebarsLexer, **options)
class YamlJinjaLexer(DelegatingLexer):
"""
2016-06-13 14:41:17 +00:00
Subclass of the `DjangoLexer` that highlights unlexed data with the
2014-12-01 06:10:30 +00:00
`YamlLexer`.
Commonly used in Saltstack salt states.
.. versionadded:: 2.0
"""
name = 'YAML+Jinja'
aliases = ['yaml+jinja', 'salt', 'sls']
filenames = ['*.sls']
mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
def __init__(self, **options):
super(YamlJinjaLexer, self).__init__(YamlLexer, DjangoLexer, **options)
class LiquidLexer(RegexLexer):
"""
Lexer for `Liquid templates
<http://www.rubydoc.info/github/Shopify/liquid>`_.
.. versionadded:: 2.0
"""
name = 'liquid'
aliases = ['liquid']
filenames = ['*.liquid']
tokens = {
'root': [
(r'[^{]+', Text),
# tags and block tags
(r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
# output tags
(r'(\{\{)(\s*)([^\s}]+)',
bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
'output'),
(r'\{', Text)
],
'tag-or-block': [
# builtin logic blocks
(r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
(r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
combined('end-of-block', 'whitespace', 'generic')),
(r'(else)(\s*)(%\})',
bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
# other builtin blocks
(r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
Whitespace, Punctuation), '#pop'),
(r'(comment)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
(r'(raw)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
# end of block
(r'(end(case|unless|if))(\s*)(%\})',
bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
(r'(end([^\s%]+))(\s*)(%\})',
bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
# builtin tags (assign and include are handled together with usual tags)
(r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
bygroups(Name.Tag, Whitespace,
using(this, state='generic'), Punctuation, Whitespace),
'variable-tag-markup'),
# other tags or blocks
(r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
],
'output': [
include('whitespace'),
('\}\}', Punctuation, '#pop'), # end of output
(r'\|', Punctuation, 'filters')
],
'filters': [
include('whitespace'),
(r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output
(r'([^\s|:]+)(:?)(\s*)',
bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
],
'filter-markup': [
(r'\|', Punctuation, '#pop'),
include('end-of-tag'),
include('default-param-markup')
],
'condition': [
include('end-of-block'),
include('whitespace'),
(r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
bygroups(using(this, state = 'generic'), Whitespace, Operator,
Whitespace, using(this, state = 'generic'), Whitespace,
Punctuation)),
(r'\b!', Operator),
(r'\bnot\b', Operator.Word),
(r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
Whitespace, using(this, state = 'generic'))),
include('generic'),
include('whitespace')
],
'generic-value': [
include('generic'),
include('end-at-whitespace')
],
'operator': [
(r'(\s*)((=|!|>|<)=?)(\s*)',
bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
(r'(\s*)(\bcontains\b)(\s*)',
bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
],
'end-of-tag': [
(r'\}\}', Punctuation, '#pop')
],
'end-of-block': [
(r'%\}', Punctuation, ('#pop', '#pop'))
],
'end-at-whitespace': [
(r'\s+', Whitespace, '#pop')
],
# states for unknown markup
'param-markup': [
include('whitespace'),
# params with colons or equals
(r'([^\s=:]+)(\s*)(=|:)',
bygroups(Name.Attribute, Whitespace, Operator)),
# explicit variables
(r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
Whitespace, Punctuation)),
include('string'),
include('number'),
include('keyword'),
(r',', Punctuation)
],
'default-param-markup': [
include('param-markup'),
(r'.', Text) # fallback for switches / variables / un-quoted strings / ...
],
'variable-param-markup': [
include('param-markup'),
include('variable'),
(r'.', Text) # fallback
],
'tag-markup': [
(r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
include('default-param-markup')
],
'variable-tag-markup': [
(r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
include('variable-param-markup')
],
# states for different values types
'keyword': [
(r'\b(false|true)\b', Keyword.Constant)
],
'variable': [
(r'[a-zA-Z_]\w*', Name.Variable),
(r'(?<=\w)\.(?=\w)', Punctuation)
],
'string': [
(r"'[^']*'", String.Single),
(r'"[^"]*"', String.Double)
],
'number': [
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer)
],
'generic': [ # decides for variable, string, keyword or number
include('keyword'),
include('string'),
include('number'),
include('variable')
],
'whitespace': [
(r'[ \t]+', Whitespace)
],
# states for builtin blocks
'comment': [
(r'(\{%)(\s*)(endcomment)(\s*)(%\})',
bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
Punctuation), ('#pop', '#pop')),
(r'.', Comment)
],
'raw': [
(r'[^{]+', Text),
(r'(\{%)(\s*)(endraw)(\s*)(%\})',
bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
Punctuation), '#pop'),
(r'\{', Text)
],
}
class TwigLexer(RegexLexer):
"""
`Twig <http://twig.sensiolabs.org/>`_ template lexer.
It just highlights Twig code between the preprocessor directives,
other data is left untouched by the lexer.
.. versionadded:: 2.0
"""
name = 'Twig'
aliases = ['twig']
mimetypes = ['application/x-twig']
flags = re.M | re.S
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w-]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# twig comments
(r'\{\#.*?\#\}', Comment),
# raw twig blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Other, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
(r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Other, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
(r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner,
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'tag'),
(r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword), 'tag'),
(r'\{', Other),
],
'varnames': [
(r'(\|)(\s*)(%s)' % _ident_inner,
bygroups(Operator, Text, Name.Function)),
(r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner,
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
(r'(in|not|and|b-and|or|b-or|b-xor|is'
r'if|elseif|else|import'
r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
r'matches|starts\s+with|ends\s+with)\b',
Keyword),
(r'(loop|block|parent)\b', Name.Builtin),
(_ident_inner, Name.Variable),
(r'\.' + _ident_inner, Name.Variable),
(r'\.[0-9]+', Number),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'tag': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation),
],
}
class TwigHtmlLexer(DelegatingLexer):
"""
Subclass of the `TwigLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML+Twig"
aliases = ["html+twig"]
filenames = ['*.twig']
mimetypes = ['text/html+twig']
def __init__(self, **options):
super(TwigHtmlLexer, self).__init__(HtmlLexer, TwigLexer, **options)