Upgrade wakatime-cli to v10.1.2
This commit is contained in:
parent
9e102d7c5c
commit
bc770515f0
21 changed files with 520 additions and 93 deletions
|
|
@ -106,8 +106,8 @@ class DependencyParser(object):
|
|||
self.lexer = lexer
|
||||
|
||||
if self.lexer:
|
||||
module_name = self.lexer.__module__.rsplit('.', 1)[-1]
|
||||
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
|
||||
module_name = self.root_lexer.__module__.rsplit('.', 1)[-1]
|
||||
class_name = self.root_lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
|
||||
else:
|
||||
module_name = 'unknown'
|
||||
class_name = 'UnknownParser'
|
||||
|
|
@ -121,6 +121,12 @@ class DependencyParser(object):
|
|||
except ImportError:
|
||||
log.debug('Parsing dependencies not supported for {0}.{1}'.format(module_name, class_name))
|
||||
|
||||
@property
|
||||
def root_lexer(self):
|
||||
if hasattr(self.lexer, 'root_lexer'):
|
||||
return self.lexer.root_lexer
|
||||
return self.lexer
|
||||
|
||||
def parse(self):
|
||||
if self.parser:
|
||||
plugin = self.parser(self.source_file, lexer=self.lexer)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.c_cpp
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.c_cpp
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from C++ code.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.data
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.data
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from data files.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.dotnet
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.dotnet
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from .NET code.
|
||||
|
||||
|
|
|
|||
47
packages/wakatime/dependencies/elm.py
Normal file
47
packages/wakatime/dependencies/elm.py
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.dependencies.elm
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Elm code.
|
||||
|
||||
:copyright: (c) 2018 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class ElmParser(TokenParser):
|
||||
state = None
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
elif self.partial(token) == 'Class':
|
||||
self._process_class(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
self.state = content.strip()
|
||||
|
||||
def _process_class(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(self._format(content))
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
||||
|
||||
def _format(self, content):
|
||||
return content.strip().split('.')[0].strip()
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.go
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.go
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Go code.
|
||||
|
||||
|
|
|
|||
53
packages/wakatime/dependencies/haskell.py
Normal file
53
packages/wakatime/dependencies/haskell.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.dependencies.haskell
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Haskell code.
|
||||
|
||||
:copyright: (c) 2018 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class HaskellParser(TokenParser):
|
||||
state = None
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Reserved':
|
||||
self._process_reserved(token, content)
|
||||
elif self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
elif self.partial(token) == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_reserved(self, token, content):
|
||||
self.state = content.strip()
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(self._format(content))
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
if self.state != 'import' or content.strip() != 'qualified':
|
||||
self.state = None
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
||||
|
||||
def _format(self, content):
|
||||
return content.strip().split('.')[0].strip()
|
||||
48
packages/wakatime/dependencies/haxe.py
Normal file
48
packages/wakatime/dependencies/haxe.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.dependencies.haxe
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Haxe code.
|
||||
|
||||
:copyright: (c) 2018 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class HaxeParser(TokenParser):
|
||||
exclude = [
|
||||
r'^haxe$',
|
||||
]
|
||||
state = None
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(self._format(content))
|
||||
self.state = None
|
||||
else:
|
||||
self.state = content
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
||||
|
||||
def _format(self, content):
|
||||
return content.strip()
|
||||
|
|
@ -1,9 +1,9 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.templates
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.html
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Templates.
|
||||
Parse dependencies from HTML.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
|
|
@ -69,7 +69,7 @@ KEYWORDS = [
|
|||
]
|
||||
|
||||
|
||||
class HtmlDjangoParser(TokenParser):
|
||||
class HtmlParser(TokenParser):
|
||||
tags = []
|
||||
opening_tag = False
|
||||
getting_attrs = False
|
||||
|
|
@ -141,63 +141,3 @@ class HtmlDjangoParser(TokenParser):
|
|||
elif content.startswith('"') or content.startswith("'"):
|
||||
if self.current_attr_value is None:
|
||||
self.current_attr_value = content
|
||||
|
||||
|
||||
class VelocityHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MyghtyHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MasonParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MakoHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class CheetahHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlGenshiParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class RhtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlPhpParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlSmartyParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class EvoqueHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class ColdfusionHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class LassoHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HandlebarsHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class YamlJinjaParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class TwigHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
60
packages/wakatime/dependencies/javascript.py
Normal file
60
packages/wakatime/dependencies/javascript.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.dependencies.javascript
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from JavaScript code.
|
||||
|
||||
:copyright: (c) 2018 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class JavascriptParser(TokenParser):
|
||||
state = None
|
||||
extension = re.compile(r'\.\w{1,4}$')
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Reserved':
|
||||
self._process_reserved(token, content)
|
||||
elif self.partial(token) == 'Single':
|
||||
self._process_string(token, content)
|
||||
elif self.partial(token) == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_reserved(self, token, content):
|
||||
if self.state is None:
|
||||
self.state = content
|
||||
|
||||
def _process_string(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(self._format_module(content))
|
||||
self.state = None
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == ';':
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
def _format_module(self, content):
|
||||
content = content.strip().strip('"').strip("'").strip()
|
||||
content = content.split('/')[-1].split('\\')[-1]
|
||||
content = self.extension.sub('', content, count=1)
|
||||
return content
|
||||
|
||||
|
||||
class TypeScriptParser(JavascriptParser):
|
||||
pass
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.java
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.java
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Java code.
|
||||
|
||||
|
|
@ -94,3 +94,89 @@ class JavaParser(TokenParser):
|
|||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
|
||||
class KotlinParser(TokenParser):
|
||||
state = None
|
||||
exclude = [
|
||||
r'^java\.',
|
||||
]
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
elif self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
self.state = content
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(self._format(content))
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
||||
|
||||
def _format(self, content):
|
||||
content = content.split(u('.'))
|
||||
|
||||
if content[-1] == u('*'):
|
||||
content = content[:len(content) - 1]
|
||||
|
||||
if len(content) == 0:
|
||||
return None
|
||||
|
||||
if len(content) == 1:
|
||||
return content[0]
|
||||
|
||||
return u('.').join(content[:2])
|
||||
|
||||
|
||||
class ScalaParser(TokenParser):
|
||||
state = None
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
elif self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
self.state = content
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(self._format(content))
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
||||
|
||||
def _format(self, content):
|
||||
return content.strip().lstrip('__root__').strip('_').strip('.')
|
||||
|
|
|
|||
84
packages/wakatime/dependencies/objective.py
Normal file
84
packages/wakatime/dependencies/objective.py
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.dependencies.objective
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Objective-C and Swift code.
|
||||
|
||||
:copyright: (c) 2018 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class SwiftParser(TokenParser):
|
||||
state = None
|
||||
exclude = [
|
||||
r'^foundation$',
|
||||
]
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Declaration':
|
||||
self._process_declaration(token, content)
|
||||
elif self.partial(token) == 'Class':
|
||||
self._process_class(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_declaration(self, token, content):
|
||||
if self.state is None:
|
||||
self.state = content
|
||||
|
||||
def _process_class(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(content)
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
|
||||
class ObjectiveCParser(TokenParser):
|
||||
state = None
|
||||
extension = re.compile(r'\.[mh]$')
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Preproc':
|
||||
self._process_preproc(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_preproc(self, token, content):
|
||||
if self.state:
|
||||
self._process_import(token, content)
|
||||
|
||||
self.state = content
|
||||
|
||||
def _process_import(self, token, content):
|
||||
if self.state == '#' and content.startswith('import '):
|
||||
self.append(self._format(content))
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
def _format(self, content):
|
||||
content = content.strip().lstrip('import ').strip()
|
||||
content = content.strip('"').strip("'").strip()
|
||||
content = content.strip('<').strip('>').strip()
|
||||
content = content.split('/')[0]
|
||||
content = self.extension.sub('', content, count=1)
|
||||
return content
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.php
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.php
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from PHP code.
|
||||
|
||||
|
|
@ -16,6 +16,10 @@ from ..compat import u
|
|||
class PhpParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
exclude = [
|
||||
r'^app$',
|
||||
r'app\.php$',
|
||||
]
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.python
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.python
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Python code.
|
||||
|
||||
|
|
|
|||
48
packages/wakatime/dependencies/rust.py
Normal file
48
packages/wakatime/dependencies/rust.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.dependencies.rust
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Rust code.
|
||||
|
||||
:copyright: (c) 2018 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class RustParser(TokenParser):
|
||||
state = None
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
elif self.partial(token) == 'Whitespace':
|
||||
self._process_whitespace(token, content)
|
||||
elif self.partial(token) == 'Name':
|
||||
self._process_name(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
if self.state == 'extern' and content == 'crate':
|
||||
self.state = 'extern crate'
|
||||
else:
|
||||
self.state = content
|
||||
|
||||
def _process_whitespace(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_name(self, token, content):
|
||||
if self.state == 'extern crate':
|
||||
self.append(content)
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.unknown
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
wakatime.dependencies.unknown
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from files of unknown language.
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue