upgrade wakatime cli to v4.0.4
This commit is contained in:
parent
307029c37a
commit
440e33b8b7
1087 changed files with 21913 additions and 587 deletions
106
packages/wakatime/languages/__init__.py
Normal file
106
packages/wakatime/languages/__init__.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from a source code file.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from ..compat import u, open, import_module
|
||||
|
||||
|
||||
log = logging.getLogger('WakaTime')
|
||||
|
||||
|
||||
class TokenParser(object):
|
||||
"""The base class for all dependency parsers. To add support for your
|
||||
language, inherit from this class and implement the :meth:`parse` method
|
||||
to return a list of dependency strings.
|
||||
"""
|
||||
source_file = None
|
||||
lexer = None
|
||||
dependencies = []
|
||||
tokens = []
|
||||
|
||||
def __init__(self, source_file, lexer=None):
|
||||
self.source_file = source_file
|
||||
self.lexer = lexer
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
""" Should return a list of dependencies.
|
||||
"""
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
raise Exception('Not yet implemented.')
|
||||
|
||||
def append(self, dep, truncate=False, separator=None, truncate_to=None,
|
||||
strip_whitespace=True):
|
||||
if dep == 'as':
|
||||
print('***************** as')
|
||||
self._save_dependency(
|
||||
dep,
|
||||
truncate=truncate,
|
||||
truncate_to=truncate_to,
|
||||
separator=separator,
|
||||
strip_whitespace=strip_whitespace,
|
||||
)
|
||||
|
||||
def _extract_tokens(self):
|
||||
if self.lexer:
|
||||
with open(self.source_file, 'r', encoding='utf-8') as fh:
|
||||
return self.lexer.get_tokens_unprocessed(fh.read(512000))
|
||||
return []
|
||||
|
||||
def _save_dependency(self, dep, truncate=False, separator=None,
|
||||
truncate_to=None, strip_whitespace=True):
|
||||
if truncate:
|
||||
if separator is None:
|
||||
separator = u('.')
|
||||
separator = u(separator)
|
||||
dep = dep.split(separator)
|
||||
if truncate_to is None or truncate_to < 0 or truncate_to > len(dep) - 1:
|
||||
truncate_to = len(dep) - 1
|
||||
dep = dep[0] if len(dep) == 1 else separator.join(dep[0:truncate_to])
|
||||
if strip_whitespace:
|
||||
dep = dep.strip()
|
||||
if dep:
|
||||
self.dependencies.append(dep)
|
||||
|
||||
|
||||
class DependencyParser(object):
|
||||
source_file = None
|
||||
lexer = None
|
||||
parser = None
|
||||
|
||||
def __init__(self, source_file, lexer):
|
||||
self.source_file = source_file
|
||||
self.lexer = lexer
|
||||
|
||||
if self.lexer:
|
||||
module_name = self.lexer.__module__.split('.')[-1]
|
||||
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
|
||||
else:
|
||||
module_name = 'unknown'
|
||||
class_name = 'UnknownParser'
|
||||
|
||||
try:
|
||||
module = import_module('.%s' % module_name, package=__package__)
|
||||
try:
|
||||
self.parser = getattr(module, class_name)
|
||||
except AttributeError:
|
||||
log.debug('Module {0} is missing class {1}'.format(module.__name__, class_name))
|
||||
except ImportError:
|
||||
log.debug(traceback.format_exc())
|
||||
|
||||
def parse(self):
|
||||
if self.parser:
|
||||
plugin = self.parser(self.source_file, lexer=self.lexer)
|
||||
dependencies = plugin.parse()
|
||||
return list(set(dependencies))
|
||||
return []
|
37
packages/wakatime/languages/c_cpp.py
Normal file
37
packages/wakatime/languages/c_cpp.py
Normal file
|
@ -0,0 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.c_cpp
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from C++ code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class CppParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Preproc':
|
||||
self._process_preproc(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_preproc(self, token, content):
|
||||
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
|
||||
content = content.replace('include', '', 1).strip()
|
||||
self.append(content)
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
66
packages/wakatime/languages/data.py
Normal file
66
packages/wakatime/languages/data.py
Normal file
|
@ -0,0 +1,66 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.data
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from data files.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
FILES = {
|
||||
'bower.json': {'exact': True, 'dependency': 'bower'},
|
||||
'component.json': {'exact': True, 'dependency': 'bower'},
|
||||
'package.json': {'exact': True, 'dependency': 'npm'},
|
||||
}
|
||||
|
||||
|
||||
class JsonParser(TokenParser):
|
||||
state = None
|
||||
level = 0
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
self._process_file_name(os.path.basename(self.source_file))
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_file_name(self, file_name):
|
||||
for key, value in FILES.items():
|
||||
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
|
||||
if found:
|
||||
self.append(value['dependency'])
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token) == 'Token.Name.Tag':
|
||||
self._process_tag(token, content)
|
||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
||||
self._process_literal_string(token, content)
|
||||
elif u(token) == 'Token.Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
|
||||
def _process_tag(self, token, content):
|
||||
if content.strip('"').strip("'") == 'dependencies' or content.strip('"').strip("'") == 'devDependencies':
|
||||
self.state = 'dependencies'
|
||||
elif self.state == 'dependencies' and self.level == 2:
|
||||
self.append(content.strip('"').strip("'"))
|
||||
|
||||
def _process_literal_string(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '{':
|
||||
self.level += 1
|
||||
elif content == '}':
|
||||
self.level -= 1
|
||||
if self.state is not None and self.level <= 1:
|
||||
self.state = None
|
36
packages/wakatime/languages/dotnet.py
Normal file
36
packages/wakatime/languages/dotnet.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.dotnet
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from .NET code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class CSharpParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if content != 'import' and content != 'package' and content != 'namespace':
|
||||
self.append(content, truncate=True)
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
36
packages/wakatime/languages/jvm.py
Normal file
36
packages/wakatime/languages/jvm.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.java
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Java code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class JavaParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if content != 'import' and content != 'package' and content != 'namespace':
|
||||
self.append(content, truncate=True)
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
87
packages/wakatime/languages/php.py
Normal file
87
packages/wakatime/languages/php.py
Normal file
|
@ -0,0 +1,87 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.php
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from PHP code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class PhpParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
||||
self._process_literal_string(token, content)
|
||||
elif u(token) == 'Token.Name.Other':
|
||||
self._process_name(token, content)
|
||||
elif u(token) == 'Token.Name.Function':
|
||||
self._process_function(token, content)
|
||||
elif u(token).split('.')[-1] == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif u(token).split('.')[-1] == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_name(self, token, content):
|
||||
if self.state == 'use':
|
||||
self.append(content, truncate=True, separator=u("\\"))
|
||||
|
||||
def _process_function(self, token, content):
|
||||
if self.state == 'use function':
|
||||
self.append(content, truncate=True, separator=u("\\"))
|
||||
self.state = 'use'
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
if content == 'include' or content == 'include_once' or content == 'require' or content == 'require_once':
|
||||
self.state = 'include'
|
||||
elif content == 'use':
|
||||
self.state = 'use'
|
||||
elif content == 'as':
|
||||
self.state = 'as'
|
||||
elif self.state == 'use' and content == 'function':
|
||||
self.state = 'use function'
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_literal_string(self, token, content):
|
||||
if self.state == 'include':
|
||||
if content != '"':
|
||||
content = content.strip()
|
||||
if u(token) == 'Token.Literal.String.Double':
|
||||
content = u('"{0}"').format(content)
|
||||
self.append(content)
|
||||
self.state = None
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '(':
|
||||
self.parens += 1
|
||||
elif content == ')':
|
||||
self.parens -= 1
|
||||
elif (self.state == 'use' or self.state == 'as') and content == ',':
|
||||
self.state = 'use'
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
120
packages/wakatime/languages/python.py
Normal file
120
packages/wakatime/languages/python.py
Normal file
|
@ -0,0 +1,120 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.python
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Python code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class PythonParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
nonpackage = False
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
elif u(token).split('.')[-1] == 'Name':
|
||||
self._process_name(token, content)
|
||||
elif u(token).split('.')[-1] == 'Word':
|
||||
self._process_word(token, content)
|
||||
elif u(token).split('.')[-1] == 'Operator':
|
||||
self._process_operator(token, content)
|
||||
elif u(token).split('.')[-1] == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif u(token).split('.')[-1] == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state is None:
|
||||
self.state = content
|
||||
else:
|
||||
if content == 'as':
|
||||
self.nonpackage = True
|
||||
else:
|
||||
self._process_import(token, content)
|
||||
|
||||
def _process_name(self, token, content):
|
||||
if self.state is not None:
|
||||
if self.nonpackage:
|
||||
self.nonpackage = False
|
||||
else:
|
||||
if self.state == 'from':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
if self.state == 'from-2' and content != 'import':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
elif self.state == 'import':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
elif self.state == 'import-2':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_word(self, token, content):
|
||||
if self.state is not None:
|
||||
if self.nonpackage:
|
||||
self.nonpackage = False
|
||||
else:
|
||||
if self.state == 'from':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
if self.state == 'from-2' and content != 'import':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
elif self.state == 'import':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
elif self.state == 'import-2':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_operator(self, token, content):
|
||||
if self.state is not None:
|
||||
if content == '.':
|
||||
self.nonpackage = True
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '(':
|
||||
self.parens += 1
|
||||
elif content == ')':
|
||||
self.parens -= 1
|
||||
self.nonpackage = False
|
||||
|
||||
def _process_text(self, token, content):
|
||||
if self.state is not None:
|
||||
if content == "\n" and self.parens == 0:
|
||||
self.state = None
|
||||
self.nonpackage = False
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_import(self, token, content):
|
||||
if not self.nonpackage:
|
||||
if self.state == 'from':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
self.state = 'from-2'
|
||||
elif self.state == 'from-2' and content != 'import':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
elif self.state == 'import':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
self.state = 'import-2'
|
||||
elif self.state == 'import-2':
|
||||
self.append(content, truncate=True, truncate_to=0)
|
||||
else:
|
||||
self.state = None
|
||||
self.nonpackage = False
|
224
packages/wakatime/languages/templates.py
Normal file
224
packages/wakatime/languages/templates.py
Normal file
|
@ -0,0 +1,224 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.templates
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Templates.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
""" If these keywords are found in the source file, treat them as a dependency.
|
||||
Must be lower-case strings.
|
||||
"""
|
||||
KEYWORDS = [
|
||||
'_',
|
||||
'$',
|
||||
'angular',
|
||||
'assert', # probably mocha
|
||||
'backbone',
|
||||
'batman',
|
||||
'c3',
|
||||
'can',
|
||||
'casper',
|
||||
'chai',
|
||||
'chaplin',
|
||||
'd3',
|
||||
'define', # probably require
|
||||
'describe', # mocha or jasmine
|
||||
'eco',
|
||||
'ember',
|
||||
'espresso',
|
||||
'expect', # probably jasmine
|
||||
'exports', # probably npm
|
||||
'express',
|
||||
'gulp',
|
||||
'handlebars',
|
||||
'highcharts',
|
||||
'jasmine',
|
||||
'jquery',
|
||||
'jstz',
|
||||
'ko', # probably knockout
|
||||
'm', # probably mithril
|
||||
'marionette',
|
||||
'meteor',
|
||||
'moment',
|
||||
'monitorio',
|
||||
'mustache',
|
||||
'phantom',
|
||||
'pickadate',
|
||||
'pikaday',
|
||||
'qunit',
|
||||
'react',
|
||||
'reactive',
|
||||
'require', # probably the commonjs spec
|
||||
'ripple',
|
||||
'rivets',
|
||||
'socketio',
|
||||
'spine',
|
||||
'thorax',
|
||||
'underscore',
|
||||
'vue',
|
||||
'way',
|
||||
'zombie',
|
||||
]
|
||||
|
||||
|
||||
class LassoJavascriptParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token) == 'Token.Name.Other':
|
||||
self._process_name(token, content)
|
||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
||||
self._process_literal_string(token, content)
|
||||
|
||||
def _process_name(self, token, content):
|
||||
if content.lower() in KEYWORDS:
|
||||
self.append(content.lower())
|
||||
|
||||
def _process_literal_string(self, token, content):
|
||||
if 'famous/core/' in content.strip('"').strip("'"):
|
||||
self.append('famous')
|
||||
|
||||
|
||||
class HtmlDjangoParser(TokenParser):
|
||||
tags = []
|
||||
getting_attrs = False
|
||||
current_attr = None
|
||||
current_attr_value = None
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token) == 'Token.Name.Tag':
|
||||
self._process_tag(token, content)
|
||||
elif u(token) == 'Token.Literal.String':
|
||||
self._process_string(token, content)
|
||||
elif u(token) == 'Token.Name.Attribute':
|
||||
self._process_attribute(token, content)
|
||||
|
||||
@property
|
||||
def current_tag(self):
|
||||
return None if len(self.tags) == 0 else self.tags[0]
|
||||
|
||||
def _process_tag(self, token, content):
|
||||
if content.startswith('</') or content.startswith('/'):
|
||||
try:
|
||||
self.tags.pop(0)
|
||||
except IndexError:
|
||||
# ignore errors from malformed markup
|
||||
pass
|
||||
self.getting_attrs = False
|
||||
elif content.startswith('<'):
|
||||
self.tags.insert(0, content.replace('<', '', 1).strip().lower())
|
||||
self.getting_attrs = True
|
||||
elif content.startswith('>'):
|
||||
self.getting_attrs = False
|
||||
self.current_attr = None
|
||||
|
||||
def _process_attribute(self, token, content):
|
||||
if self.getting_attrs:
|
||||
self.current_attr = content.lower().strip('=')
|
||||
else:
|
||||
self.current_attr = None
|
||||
self.current_attr_value = None
|
||||
|
||||
def _process_string(self, token, content):
|
||||
if self.getting_attrs and self.current_attr is not None:
|
||||
if content.endswith('"') or content.endswith("'"):
|
||||
if self.current_attr_value is not None:
|
||||
self.current_attr_value += content
|
||||
if self.current_tag == 'script' and self.current_attr == 'src':
|
||||
self.append(self.current_attr_value)
|
||||
self.current_attr = None
|
||||
self.current_attr_value = None
|
||||
else:
|
||||
if len(content) == 1:
|
||||
self.current_attr_value = content
|
||||
else:
|
||||
if self.current_tag == 'script' and self.current_attr == 'src':
|
||||
self.append(content)
|
||||
self.current_attr = None
|
||||
self.current_attr_value = None
|
||||
elif content.startswith('"') or content.startswith("'"):
|
||||
if self.current_attr_value is None:
|
||||
self.current_attr_value = content
|
||||
else:
|
||||
self.current_attr_value += content
|
||||
|
||||
|
||||
class VelocityHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MyghtyHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MasonParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MakoHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class CheetahHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlGenshiParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class RhtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlPhpParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlSmartyParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class EvoqueHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class ColdfusionHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class LassoHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HandlebarsHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class YamlJinjaParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class TwigHtmlParser(HtmlDjangoParser):
|
||||
pass
|
33
packages/wakatime/languages/unknown.py
Normal file
33
packages/wakatime/languages/unknown.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.unknown
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from files of unknown language.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
FILES = {
|
||||
'bower': {'exact': False, 'dependency': 'bower'},
|
||||
'grunt': {'exact': False, 'dependency': 'grunt'},
|
||||
}
|
||||
|
||||
|
||||
class UnknownParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
self._process_file_name(os.path.basename(self.source_file))
|
||||
return self.dependencies
|
||||
|
||||
def _process_file_name(self, file_name):
|
||||
for key, value in FILES.items():
|
||||
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
|
||||
if found:
|
||||
self.append(value['dependency'])
|
Loading…
Add table
Add a link
Reference in a new issue