Upgrade wakatime-cli to v10.1.2

This commit is contained in:
Alan Hamlett 2018-03-15 01:33:38 -07:00
parent dadf0bc969
commit fad95ce3f7
21 changed files with 520 additions and 93 deletions

View file

@ -1,7 +1,7 @@
__title__ = 'wakatime' __title__ = 'wakatime'
__description__ = 'Common interface to the WakaTime api.' __description__ = 'Common interface to the WakaTime api.'
__url__ = 'https://github.com/wakatime/wakatime' __url__ = 'https://github.com/wakatime/wakatime'
__version_info__ = ('10', '1', '0') __version_info__ = ('10', '1', '2')
__version__ = '.'.join(__version_info__) __version__ = '.'.join(__version_info__)
__author__ = 'Alan Hamlett' __author__ = 'Alan Hamlett'
__author_email__ = 'alan@wakatime.com' __author_email__ = 'alan@wakatime.com'

View file

@ -106,8 +106,8 @@ class DependencyParser(object):
self.lexer = lexer self.lexer = lexer
if self.lexer: if self.lexer:
module_name = self.lexer.__module__.rsplit('.', 1)[-1] module_name = self.root_lexer.__module__.rsplit('.', 1)[-1]
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1) class_name = self.root_lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
else: else:
module_name = 'unknown' module_name = 'unknown'
class_name = 'UnknownParser' class_name = 'UnknownParser'
@ -121,6 +121,12 @@ class DependencyParser(object):
except ImportError: except ImportError:
log.debug('Parsing dependencies not supported for {0}.{1}'.format(module_name, class_name)) log.debug('Parsing dependencies not supported for {0}.{1}'.format(module_name, class_name))
@property
def root_lexer(self):
if hasattr(self.lexer, 'root_lexer'):
return self.lexer.root_lexer
return self.lexer
def parse(self): def parse(self):
if self.parser: if self.parser:
plugin = self.parser(self.source_file, lexer=self.lexer) plugin = self.parser(self.source_file, lexer=self.lexer)

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.c_cpp wakatime.dependencies.c_cpp
~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from C++ code. Parse dependencies from C++ code.

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.data wakatime.dependencies.data
~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from data files. Parse dependencies from data files.

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.dotnet wakatime.dependencies.dotnet
~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from .NET code. Parse dependencies from .NET code.

View file

@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.elm
~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Elm code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class ElmParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
elif self.partial(token) == 'Class':
self._process_class(token, content)
else:
self._process_other(token, content)
def _process_namespace(self, token, content):
self.state = content.strip()
def _process_class(self, token, content):
if self.state == 'import':
self.append(self._format(content))
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip().split('.')[0].strip()

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.go wakatime.dependencies.go
~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Go code. Parse dependencies from Go code.

View file

@ -0,0 +1,53 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.haskell
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Haskell code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class HaskellParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Reserved':
self._process_reserved(token, content)
elif self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
else:
self._process_other(token, content)
def _process_reserved(self, token, content):
self.state = content.strip()
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
def _process_keyword(self, token, content):
if self.state != 'import' or content.strip() != 'qualified':
self.state = None
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip().split('.')[0].strip()

View file

@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.haxe
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Haxe code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class HaxeParser(TokenParser):
exclude = [
r'^haxe$',
]
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
else:
self._process_other(token, content)
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
self.state = None
else:
self.state = content
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip()

View file

@ -1,9 +1,9 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.templates wakatime.dependencies.html
~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Templates. Parse dependencies from HTML.
:copyright: (c) 2014 Alan Hamlett. :copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details. :license: BSD, see LICENSE for more details.
@ -69,7 +69,7 @@ KEYWORDS = [
] ]
class HtmlDjangoParser(TokenParser): class HtmlParser(TokenParser):
tags = [] tags = []
opening_tag = False opening_tag = False
getting_attrs = False getting_attrs = False
@ -141,63 +141,3 @@ class HtmlDjangoParser(TokenParser):
elif content.startswith('"') or content.startswith("'"): elif content.startswith('"') or content.startswith("'"):
if self.current_attr_value is None: if self.current_attr_value is None:
self.current_attr_value = content self.current_attr_value = content
class VelocityHtmlParser(HtmlDjangoParser):
pass
class MyghtyHtmlParser(HtmlDjangoParser):
pass
class MasonParser(HtmlDjangoParser):
pass
class MakoHtmlParser(HtmlDjangoParser):
pass
class CheetahHtmlParser(HtmlDjangoParser):
pass
class HtmlGenshiParser(HtmlDjangoParser):
pass
class RhtmlParser(HtmlDjangoParser):
pass
class HtmlPhpParser(HtmlDjangoParser):
pass
class HtmlSmartyParser(HtmlDjangoParser):
pass
class EvoqueHtmlParser(HtmlDjangoParser):
pass
class ColdfusionHtmlParser(HtmlDjangoParser):
pass
class LassoHtmlParser(HtmlDjangoParser):
pass
class HandlebarsHtmlParser(HtmlDjangoParser):
pass
class YamlJinjaParser(HtmlDjangoParser):
pass
class TwigHtmlParser(HtmlDjangoParser):
pass

View file

@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from JavaScript code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import re
from . import TokenParser
class JavascriptParser(TokenParser):
state = None
extension = re.compile(r'\.\w{1,4}$')
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Reserved':
self._process_reserved(token, content)
elif self.partial(token) == 'Single':
self._process_string(token, content)
elif self.partial(token) == 'Punctuation':
self._process_punctuation(token, content)
else:
self._process_other(token, content)
def _process_reserved(self, token, content):
if self.state is None:
self.state = content
def _process_string(self, token, content):
if self.state == 'import':
self.append(self._format_module(content))
self.state = None
def _process_punctuation(self, token, content):
if content == ';':
self.state = None
def _process_other(self, token, content):
pass
def _format_module(self, content):
content = content.strip().strip('"').strip("'").strip()
content = content.split('/')[-1].split('\\')[-1]
content = self.extension.sub('', content, count=1)
return content
class TypeScriptParser(JavascriptParser):
pass

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.java wakatime.dependencies.java
~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Java code. Parse dependencies from Java code.
@ -94,3 +94,89 @@ class JavaParser(TokenParser):
def _process_other(self, token, content): def _process_other(self, token, content):
pass pass
class KotlinParser(TokenParser):
state = None
exclude = [
r'^java\.',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
elif self.partial(token) == 'Namespace':
self._process_namespace(token, content)
else:
self._process_other(token, content)
def _process_keyword(self, token, content):
self.state = content
def _process_text(self, token, content):
pass
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
self.state = None
def _process_other(self, token, content):
self.state = None
def _format(self, content):
content = content.split(u('.'))
if content[-1] == u('*'):
content = content[:len(content) - 1]
if len(content) == 0:
return None
if len(content) == 1:
return content[0]
return u('.').join(content[:2])
class ScalaParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
elif self.partial(token) == 'Namespace':
self._process_namespace(token, content)
else:
self._process_other(token, content)
def _process_keyword(self, token, content):
self.state = content
def _process_text(self, token, content):
pass
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
self.state = None
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip().lstrip('__root__').strip('_').strip('.')

View file

@ -0,0 +1,84 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.objective
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Objective-C and Swift code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import re
from . import TokenParser
class SwiftParser(TokenParser):
state = None
exclude = [
r'^foundation$',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Declaration':
self._process_declaration(token, content)
elif self.partial(token) == 'Class':
self._process_class(token, content)
else:
self._process_other(token, content)
def _process_declaration(self, token, content):
if self.state is None:
self.state = content
def _process_class(self, token, content):
if self.state == 'import':
self.append(content)
self.state = None
def _process_other(self, token, content):
pass
class ObjectiveCParser(TokenParser):
state = None
extension = re.compile(r'\.[mh]$')
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Preproc':
self._process_preproc(token, content)
else:
self._process_other(token, content)
def _process_preproc(self, token, content):
if self.state:
self._process_import(token, content)
self.state = content
def _process_import(self, token, content):
if self.state == '#' and content.startswith('import '):
self.append(self._format(content))
self.state = None
def _process_other(self, token, content):
pass
def _format(self, content):
content = content.strip().lstrip('import ').strip()
content = content.strip('"').strip("'").strip()
content = content.strip('<').strip('>').strip()
content = content.split('/')[0]
content = self.extension.sub('', content, count=1)
return content

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.php wakatime.dependencies.php
~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from PHP code. Parse dependencies from PHP code.
@ -16,6 +16,10 @@ from ..compat import u
class PhpParser(TokenParser): class PhpParser(TokenParser):
state = None state = None
parens = 0 parens = 0
exclude = [
r'^app$',
r'app\.php$',
]
def parse(self): def parse(self):
for index, token, content in self.tokens: for index, token, content in self.tokens:

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.python wakatime.dependencies.python
~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Python code. Parse dependencies from Python code.

View file

@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.rust
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Rust code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class RustParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Whitespace':
self._process_whitespace(token, content)
elif self.partial(token) == 'Name':
self._process_name(token, content)
else:
self._process_other(token, content)
def _process_keyword(self, token, content):
if self.state == 'extern' and content == 'crate':
self.state = 'extern crate'
else:
self.state = content
def _process_whitespace(self, token, content):
pass
def _process_name(self, token, content):
if self.state == 'extern crate':
self.append(content)
self.state = None
def _process_other(self, token, content):
self.state = None

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
wakatime.languages.unknown wakatime.dependencies.unknown
~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from files of unknown language. Parse dependencies from files of unknown language.

View file

@ -12,3 +12,8 @@
class NotYetImplemented(Exception): class NotYetImplemented(Exception):
"""This method needs to be implemented.""" """This method needs to be implemented."""
class SkipHeartbeat(Exception):
"""Raised to prevent the current heartbeat from being sent."""
pass

View file

@ -12,6 +12,7 @@ import logging
import re import re
from .compat import u, json from .compat import u, json
from .exceptions import SkipHeartbeat
from .project import get_project_info from .project import get_project_info
from .stats import get_file_stats from .stats import get_file_stats
from .utils import get_user_agent, should_exclude, format_file_path, find_project_file from .utils import get_user_agent, should_exclude, format_file_path, find_project_file
@ -77,12 +78,17 @@ class Heartbeat(object):
self.project = project self.project = project
self.branch = branch self.branch = branch
stats = get_file_stats(self.entity, try:
entity_type=self.type, stats = get_file_stats(self.entity,
lineno=data.get('lineno'), entity_type=self.type,
cursorpos=data.get('cursorpos'), lineno=data.get('lineno'),
plugin=args.plugin, cursorpos=data.get('cursorpos'),
language=data.get('language')) plugin=args.plugin,
language=data.get('language'))
except SkipHeartbeat as ex:
self.skip = u(ex) or 'Skipping'
return
else: else:
self.project = data.get('project') self.project = data.get('project')
self.branch = data.get('branch') self.branch = data.get('branch')

View file

@ -37,7 +37,7 @@ class JavascriptLexer(RegexLexer):
name = 'JavaScript' name = 'JavaScript'
aliases = ['js', 'javascript'] aliases = ['js', 'javascript']
filenames = ['*.js', '*.jsm'] filenames = ['*.js', '*.jsm', '*.mjs']
mimetypes = ['application/javascript', 'application/x-javascript', mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript'] 'text/x-javascript', 'text/javascript']
@ -1035,7 +1035,6 @@ class CoffeeScriptLexer(RegexLexer):
filenames = ['*.coffee'] filenames = ['*.coffee']
mimetypes = ['text/coffeescript'] mimetypes = ['text/coffeescript']
_operator_re = ( _operator_re = (
r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|' r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|' r'\|\||\\(?=\n)|'
@ -1464,6 +1463,7 @@ class EarlGreyLexer(RegexLexer):
], ],
} }
class JuttleLexer(RegexLexer): class JuttleLexer(RegexLexer):
""" """
For `Juttle`_ source code. For `Juttle`_ source code.

View file

@ -17,6 +17,7 @@ import sys
from .compat import u, open from .compat import u, open
from .constants import MAX_FILE_SIZE_SUPPORTED from .constants import MAX_FILE_SIZE_SUPPORTED
from .dependencies import DependencyParser from .dependencies import DependencyParser
from .exceptions import SkipHeartbeat
from .language_priorities import LANGUAGES from .language_priorities import LANGUAGES
from .packages.pygments.lexers import ( from .packages.pygments.lexers import (
@ -53,6 +54,8 @@ def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
if not language: if not language:
language, lexer = guess_language(file_name) language, lexer = guess_language(file_name)
language = use_root_language(language, lexer)
parser = DependencyParser(file_name, lexer) parser = DependencyParser(file_name, lexer)
dependencies = parser.parse() dependencies = parser.parse()
@ -118,6 +121,8 @@ def guess_lexer_using_filename(file_name, text):
try: try:
lexer = custom_pygments_guess_lexer_for_filename(file_name, text) lexer = custom_pygments_guess_lexer_for_filename(file_name, text)
except SkipHeartbeat as ex:
raise SkipHeartbeat(u(ex))
except: except:
log.traceback(logging.DEBUG) log.traceback(logging.DEBUG)
@ -167,7 +172,7 @@ def get_language_from_extension(file_name):
filepart, extension = os.path.splitext(file_name) filepart, extension = os.path.splitext(file_name)
if re.match(r'\.h.*', extension, re.IGNORECASE) or re.match(r'\.c.*', extension, re.IGNORECASE): if re.match(r'\.h.*$', extension, re.IGNORECASE) or re.match(r'\.c.*$', extension, re.IGNORECASE):
if os.path.exists(u('{0}{1}').format(u(filepart), u('.c'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.C'))): if os.path.exists(u('{0}{1}').format(u(filepart), u('.c'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.C'))):
return 'C' return 'C'
@ -178,6 +183,18 @@ def get_language_from_extension(file_name):
if '.c' in available_extensions: if '.c' in available_extensions:
return 'C' return 'C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.m'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.M'))):
return 'Objective-C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.mm'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.MM'))):
return 'Objective-C++'
if re.match(r'\.m$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C'
if re.match(r'\.mm$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C++'
return None return None
@ -236,6 +253,13 @@ def get_lexer(language):
return None return None
def use_root_language(language, lexer):
if lexer and hasattr(lexer, 'root_lexer'):
return u(lexer.root_lexer.name)
return language
def get_language_from_json(language, key): def get_language_from_json(language, key):
"""Finds the given language in a json file.""" """Finds the given language in a json file."""
@ -299,6 +323,12 @@ def custom_pygments_guess_lexer_for_filename(_fn, _text, **options):
return lexer(**options) return lexer(**options)
result.append(customize_lexer_priority(_fn, rv, lexer)) result.append(customize_lexer_priority(_fn, rv, lexer))
matlab = list(filter(lambda x: x[2].name.lower() == 'matlab', result))
if len(matlab) > 0:
objc = list(filter(lambda x: x[2].name.lower() == 'objective-c', result))
if objc and objc[0][0] == matlab[0][0]:
raise SkipHeartbeat('Skipping because not enough language accuracy.')
def type_sort(t): def type_sort(t):
# sort by: # sort by:
# - analyse score # - analyse score
@ -322,7 +352,17 @@ def customize_lexer_priority(file_name, accuracy, lexer):
elif lexer_name == 'matlab': elif lexer_name == 'matlab':
available_extensions = extensions_in_same_folder(file_name) available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions: if '.mat' in available_extensions:
priority = 0.06 accuracy += 0.01
if '.h' not in available_extensions:
accuracy += 0.01
elif lexer_name == 'objective-c':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy -= 0.01
else:
accuracy += 0.01
if '.h' in available_extensions:
accuracy += 0.01
return (accuracy, priority, lexer) return (accuracy, priority, lexer)