upgrade wakatime-cli to v3.0.1
This commit is contained in:
parent
520db283cb
commit
7d4d50ee62
10 changed files with 438 additions and 17 deletions
|
@ -3,6 +3,18 @@ History
|
|||
-------
|
||||
|
||||
|
||||
3.0.1 (2014-12-23)
|
||||
++++++++++++++++++
|
||||
|
||||
- handle unknown language when parsing dependencies
|
||||
|
||||
|
||||
3.0.0 (2014-12-23)
|
||||
++++++++++++++++++
|
||||
|
||||
- detect libraries and frameworks for C++, Java, .NET, PHP, and Python files
|
||||
|
||||
|
||||
2.1.11 (2014-12-22)
|
||||
+++++++++++++++++++
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
from __future__ import print_function
|
||||
|
||||
__title__ = 'wakatime'
|
||||
__version__ = '2.1.11'
|
||||
__version__ = '3.0.1'
|
||||
__author__ = 'Alan Hamlett'
|
||||
__license__ = 'BSD'
|
||||
__copyright__ = 'Copyright 2014 Alan Hamlett'
|
||||
|
@ -40,7 +40,7 @@ except ImportError:
|
|||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages'))
|
||||
|
||||
from .compat import u, open, is_py2, is_py3
|
||||
from .compat import u, open, is_py3
|
||||
from .queue import Queue
|
||||
from .log import setup_logging
|
||||
from .project import find_project
|
||||
|
@ -251,10 +251,10 @@ def get_user_agent(plugin):
|
|||
return user_agent
|
||||
|
||||
|
||||
def send_action(project=None, branch=None, stats={}, key=None, targetFile=None,
|
||||
def send_heartbeat(project=None, branch=None, stats={}, key=None, targetFile=None,
|
||||
timestamp=None, isWrite=None, plugin=None, offline=None,
|
||||
hidefilenames=None, **kwargs):
|
||||
url = 'https://wakatime.com/api/v1/actions'
|
||||
url = 'https://wakatime.com/api/v1/heartbeats'
|
||||
log.debug('Sending heartbeat to api at %s' % url)
|
||||
data = {
|
||||
'time': timestamp,
|
||||
|
@ -401,7 +401,7 @@ def main(argv=None):
|
|||
branch = project.branch()
|
||||
project_name = project.name()
|
||||
|
||||
if send_action(
|
||||
if send_heartbeat(
|
||||
project=project_name,
|
||||
branch=branch,
|
||||
stats=stats,
|
||||
|
@ -412,7 +412,7 @@ def main(argv=None):
|
|||
heartbeat = queue.pop()
|
||||
if heartbeat is None:
|
||||
break
|
||||
sent = send_action(project=heartbeat['project'],
|
||||
sent = send_heartbeat(project=heartbeat['project'],
|
||||
targetFile=heartbeat['file'],
|
||||
timestamp=heartbeat['time'],
|
||||
branch=heartbeat['branch'],
|
||||
|
|
|
@ -40,3 +40,38 @@ elif is_py3:
|
|||
return str(text)
|
||||
open = open
|
||||
basestring = (str, bytes)
|
||||
|
||||
try:
|
||||
from importlib import import_module
|
||||
except ImportError:
|
||||
def _resolve_name(name, package, level):
|
||||
"""Return the absolute name of the module to be imported."""
|
||||
if not hasattr(package, 'rindex'):
|
||||
raise ValueError("'package' not set to a string")
|
||||
dot = len(package)
|
||||
for x in xrange(level, 1, -1):
|
||||
try:
|
||||
dot = package.rindex('.', 0, dot)
|
||||
except ValueError:
|
||||
raise ValueError("attempted relative import beyond top-level "
|
||||
"package")
|
||||
return "%s.%s" % (package[:dot], name)
|
||||
|
||||
def import_module(name, package=None):
|
||||
"""Import a module.
|
||||
The 'package' argument is required when performing a relative import.
|
||||
It specifies the package to use as the anchor point from which to
|
||||
resolve the relative import to an absolute import.
|
||||
"""
|
||||
if name.startswith('.'):
|
||||
if not package:
|
||||
raise TypeError("relative imports require the 'package' "
|
||||
+ "argument")
|
||||
level = 0
|
||||
for character in name:
|
||||
if character != '.':
|
||||
break
|
||||
level += 1
|
||||
name = _resolve_name(name[level:], package, level)
|
||||
__import__(name)
|
||||
return sys.modules[name]
|
||||
|
|
79
packages/wakatime/wakatime/languages/__init__.py
Normal file
79
packages/wakatime/wakatime/languages/__init__.py
Normal file
|
@ -0,0 +1,79 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from a source code file.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from ..compat import open, import_module
|
||||
|
||||
|
||||
log = logging.getLogger('WakaTime')
|
||||
|
||||
|
||||
class TokenParser(object):
|
||||
"""The base class for all dependency parsers. To add support for your
|
||||
language, inherit from this class and implement the :meth:`parse` method
|
||||
to return a list of dependency strings.
|
||||
"""
|
||||
source_file = None
|
||||
lexer = None
|
||||
dependencies = []
|
||||
tokens = []
|
||||
|
||||
def __init__(self, source_file, lexer=None):
|
||||
self.source_file = source_file
|
||||
self.lexer = lexer
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
""" Should return a list of dependencies.
|
||||
"""
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
raise Exception('Not yet implemented.')
|
||||
|
||||
def append(self, dep, truncate=True):
|
||||
self._save_dependency(dep, truncate=truncate)
|
||||
|
||||
def _extract_tokens(self):
|
||||
if self.lexer:
|
||||
with open(self.source_file, 'r', encoding='utf-8') as fh:
|
||||
return self.lexer.get_tokens_unprocessed(fh.read(512000))
|
||||
return []
|
||||
|
||||
def _save_dependency(self, dep, truncate=True):
|
||||
dep = dep.strip().split('.')[0].strip() if truncate else dep.strip()
|
||||
if dep:
|
||||
self.dependencies.append(dep)
|
||||
|
||||
|
||||
class DependencyParser(object):
|
||||
source_file = None
|
||||
lexer = None
|
||||
parser = None
|
||||
|
||||
def __init__(self, source_file, lexer):
|
||||
self.source_file = source_file
|
||||
self.lexer = lexer
|
||||
|
||||
if self.lexer:
|
||||
try:
|
||||
module_name = self.lexer.__module__.split('.')[-1]
|
||||
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
|
||||
module = import_module('.%s' % module_name, package=__package__)
|
||||
self.parser = getattr(module, class_name)
|
||||
except ImportError as ex:
|
||||
log.debug(ex)
|
||||
|
||||
def parse(self):
|
||||
if self.parser:
|
||||
plugin = self.parser(self.source_file, lexer=self.lexer)
|
||||
dependencies = plugin.parse()
|
||||
return list(set(dependencies))
|
||||
return []
|
37
packages/wakatime/wakatime/languages/c_cpp.py
Normal file
37
packages/wakatime/wakatime/languages/c_cpp.py
Normal file
|
@ -0,0 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.c_cpp
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from C++ code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class CppParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Preproc':
|
||||
self._process_preproc(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_preproc(self, token, content):
|
||||
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
|
||||
content = content.replace('include', '', 1).strip()
|
||||
self.append(content, truncate=False)
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
38
packages/wakatime/wakatime/languages/dotnet.py
Normal file
38
packages/wakatime/wakatime/languages/dotnet.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.dotnet
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from .NET code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class CSharpParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if content != 'import' and content != 'package' and content != 'namespace':
|
||||
content = content.split('.')
|
||||
content = content[0] if len(content) == 1 else '.'.join(content[0:len(content)-1])
|
||||
self.append(content, truncate=False)
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
38
packages/wakatime/wakatime/languages/jvm.py
Normal file
38
packages/wakatime/wakatime/languages/jvm.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.java
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Java code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class JavaParser(TokenParser):
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if content != 'import' and content != 'package' and content != 'namespace':
|
||||
content = content.split('.')
|
||||
content = content[0] if len(content) == 1 else '.'.join(content[0:len(content)-1])
|
||||
self.append(content, truncate=False)
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
66
packages/wakatime/wakatime/languages/php.py
Normal file
66
packages/wakatime/wakatime/languages/php.py
Normal file
|
@ -0,0 +1,66 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.php
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from PHP code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class PhpParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
||||
self._process_literal_string(token, content)
|
||||
elif u(token).split('.')[-1] == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif u(token).split('.')[-1] == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
if content == 'include' or content == 'include_once' or content == 'require' or content == 'require_once':
|
||||
self.state = 'include'
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_literal_string(self, token, content):
|
||||
if self.state == 'include':
|
||||
if content != '"':
|
||||
content = content.strip()
|
||||
if u(token) == 'Token.Literal.String.Single':
|
||||
content = content.strip("'")
|
||||
self.append(content, truncate=False)
|
||||
self.state = None
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '(':
|
||||
self.parens += 1
|
||||
elif content == ')':
|
||||
self.parens -= 1
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
116
packages/wakatime/wakatime/languages/python.py
Normal file
116
packages/wakatime/wakatime/languages/python.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.python
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Python code.
|
||||
|
||||
:copyright: (c) 2013 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class PythonParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
nonpackage = False
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
if not tokens and not self.tokens:
|
||||
self.tokens = self._extract_tokens()
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token).split('.')[-1] == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
elif u(token).split('.')[-1] == 'Name':
|
||||
self._process_name(token, content)
|
||||
elif u(token).split('.')[-1] == 'Word':
|
||||
self._process_word(token, content)
|
||||
elif u(token).split('.')[-1] == 'Operator':
|
||||
self._process_operator(token, content)
|
||||
elif u(token).split('.')[-1] == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif u(token).split('.')[-1] == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state is None:
|
||||
self.state = content
|
||||
else:
|
||||
self._process_import(token, content)
|
||||
|
||||
def _process_name(self, token, content):
|
||||
if self.state is not None:
|
||||
if self.nonpackage:
|
||||
self.nonpackage = False
|
||||
else:
|
||||
if self.state == 'from':
|
||||
self.append(content)
|
||||
if self.state == 'from-2' and content != 'import':
|
||||
self.append(content)
|
||||
elif self.state == 'import':
|
||||
self.append(content)
|
||||
elif self.state == 'import-2':
|
||||
self.append(content)
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_word(self, token, content):
|
||||
if self.state is not None:
|
||||
if self.nonpackage:
|
||||
self.nonpackage = False
|
||||
else:
|
||||
if self.state == 'from':
|
||||
self.append(content)
|
||||
if self.state == 'from-2' and content != 'import':
|
||||
self.append(content)
|
||||
elif self.state == 'import':
|
||||
self.append(content)
|
||||
elif self.state == 'import-2':
|
||||
self.append(content)
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_operator(self, token, content):
|
||||
if self.state is not None:
|
||||
if content == '.':
|
||||
self.nonpackage = True
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '(':
|
||||
self.parens += 1
|
||||
elif content == ')':
|
||||
self.parens -= 1
|
||||
self.nonpackage = False
|
||||
|
||||
def _process_text(self, token, content):
|
||||
if self.state is not None:
|
||||
if content == "\n" and self.parens == 0:
|
||||
self.state = None
|
||||
self.nonpackage = False
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_import(self, token, content):
|
||||
self.nonpackage = False
|
||||
if self.state == 'from':
|
||||
self.append(content)
|
||||
self.state = 'from-2'
|
||||
elif self.state == 'from-2' and content != 'import':
|
||||
self.append(content)
|
||||
elif self.state == 'import':
|
||||
self.append(content)
|
||||
self.state = 'import-2'
|
||||
elif self.state == 'import-2':
|
||||
self.append(content)
|
||||
else:
|
||||
self.state = None
|
|
@ -14,6 +14,7 @@ import os
|
|||
import sys
|
||||
|
||||
from .compat import u, open
|
||||
from .languages import DependencyParser
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages', 'pygments_py2'))
|
||||
|
@ -46,20 +47,17 @@ TRANSLATIONS = {
|
|||
|
||||
|
||||
def guess_language(file_name):
|
||||
if file_name:
|
||||
language = guess_language_from_extension(file_name.rsplit('.', 1)[-1])
|
||||
if language:
|
||||
return language
|
||||
lexer = None
|
||||
language, lexer = None, None
|
||||
try:
|
||||
with open(file_name, 'r', encoding='utf-8') as fh:
|
||||
lexer = guess_lexer_for_filename(file_name, fh.read(512000))
|
||||
except:
|
||||
pass
|
||||
if lexer:
|
||||
return translate_language(u(lexer.name))
|
||||
else:
|
||||
return None
|
||||
if file_name:
|
||||
language = guess_language_from_extension(file_name.rsplit('.', 1)[-1])
|
||||
if lexer and language is None:
|
||||
language = translate_language(u(lexer.name))
|
||||
return language, lexer
|
||||
|
||||
|
||||
def guess_language_from_extension(extension):
|
||||
|
@ -89,9 +87,11 @@ def number_lines_in_file(file_name):
|
|||
|
||||
|
||||
def get_file_stats(file_name):
|
||||
dependencies = []
|
||||
language, lexer = guess_language(file_name)
|
||||
parser = DependencyParser(file_name, lexer)
|
||||
dependencies = parser.parse()
|
||||
stats = {
|
||||
'language': guess_language(file_name),
|
||||
'language': language,
|
||||
'dependencies': dependencies,
|
||||
'lines': number_lines_in_file(file_name),
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue