upgrade wakatime-cli to v3.0.2

This commit is contained in:
Alan Hamlett 2014-12-25 01:04:39 -06:00
parent ad4df93b04
commit ee8c0dfed8
11 changed files with 269 additions and 47 deletions

View file

@ -3,6 +3,12 @@ History
-------
3.0.2 (2014-12-25)
++++++++++++++++++
- detect frameworks from JavaScript and JSON files
3.0.1 (2014-12-23)
++++++++++++++++++

View file

@ -13,7 +13,7 @@
from __future__ import print_function
__title__ = 'wakatime'
__version__ = '3.0.1'
__version__ = '3.0.2'
__author__ = 'Alan Hamlett'
__license__ = 'BSD'
__copyright__ = 'Copyright 2014 Alan Hamlett'

View file

@ -10,8 +10,9 @@
"""
import logging
import traceback
from ..compat import open, import_module
from ..compat import u, open, import_module
log = logging.getLogger('WakaTime')
@ -38,8 +39,17 @@ class TokenParser(object):
self.tokens = self._extract_tokens()
raise Exception('Not yet implemented.')
def append(self, dep, truncate=True):
self._save_dependency(dep, truncate=truncate)
def append(self, dep, truncate=False, separator=None, truncate_to=None,
strip_whitespace=True):
if dep == 'as':
print('***************** as')
self._save_dependency(
dep,
truncate=truncate,
truncate_to=truncate_to,
separator=separator,
strip_whitespace=strip_whitespace,
)
def _extract_tokens(self):
if self.lexer:
@ -47,8 +57,18 @@ class TokenParser(object):
return self.lexer.get_tokens_unprocessed(fh.read(512000))
return []
def _save_dependency(self, dep, truncate=True):
dep = dep.strip().split('.')[0].strip() if truncate else dep.strip()
def _save_dependency(self, dep, truncate=False, separator=None,
truncate_to=None, strip_whitespace=True):
if truncate:
if separator is None:
separator = u('.')
separator = u(separator)
dep = dep.split(separator)
if truncate_to is None or truncate_to < 0 or truncate_to > len(dep) - 1:
truncate_to = len(dep) - 1
dep = dep[0] if len(dep) == 1 else separator.join(dep[0:truncate_to])
if strip_whitespace:
dep = dep.strip()
if dep:
self.dependencies.append(dep)
@ -63,13 +83,20 @@ class DependencyParser(object):
self.lexer = lexer
if self.lexer:
module_name = self.lexer.__module__.split('.')[-1]
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
else:
module_name = 'unknown'
class_name = 'UnknownParser'
try:
module = import_module('.%s' % module_name, package=__package__)
try:
module_name = self.lexer.__module__.split('.')[-1]
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
module = import_module('.%s' % module_name, package=__package__)
self.parser = getattr(module, class_name)
except ImportError as ex:
log.debug(ex)
except AttributeError:
log.debug(traceback.format_exc())
except ImportError:
log.debug(traceback.format_exc())
def parse(self):
if self.parser:

View file

@ -31,7 +31,7 @@ class CppParser(TokenParser):
def _process_preproc(self, token, content):
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
content = content.replace('include', '', 1).strip()
self.append(content, truncate=False)
self.append(content)
def _process_other(self, token, content):
pass

View file

@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.data
~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from data files.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import os
from . import TokenParser
from ..compat import u
FILES = {
'bower.json': {'exact': True, 'dependency': 'bower'},
'component.json': {'exact': True, 'dependency': 'bower'},
'package.json': {'exact': True, 'dependency': 'npm'},
}
class JsonParser(TokenParser):
state = None
level = 0
def parse(self, tokens=[]):
self._process_file_name(os.path.basename(self.source_file))
if not tokens and not self.tokens:
self.tokens = self._extract_tokens()
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_file_name(self, file_name):
for key, value in FILES.items():
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
if found:
self.append(value['dependency'])
def _process_token(self, token, content):
if u(token) == 'Token.Name.Tag':
self._process_tag(token, content)
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
self._process_literal_string(token, content)
elif u(token) == 'Token.Punctuation':
self._process_punctuation(token, content)
def _process_tag(self, token, content):
if content.strip('"').strip("'") == 'dependencies' or content.strip('"').strip("'") == 'devDependencies':
self.state = 'dependencies'
elif self.state == 'dependencies' and self.level == 2:
self.append(content.strip('"').strip("'"))
def _process_literal_string(self, token, content):
pass
def _process_punctuation(self, token, content):
if content == '{':
self.level += 1
elif content == '}':
self.level -= 1
if self.state is not None and self.level <= 1:
self.state = None

View file

@ -30,9 +30,7 @@ class CSharpParser(TokenParser):
def _process_namespace(self, token, content):
if content != 'import' and content != 'package' and content != 'namespace':
content = content.split('.')
content = content[0] if len(content) == 1 else '.'.join(content[0:len(content)-1])
self.append(content, truncate=False)
self.append(content, truncate=True)
def _process_other(self, token, content):
pass

View file

@ -30,9 +30,7 @@ class JavaParser(TokenParser):
def _process_namespace(self, token, content):
if content != 'import' and content != 'package' and content != 'namespace':
content = content.split('.')
content = content[0] if len(content) == 1 else '.'.join(content[0:len(content)-1])
self.append(content, truncate=False)
self.append(content, truncate=True)
def _process_other(self, token, content):
pass

View file

@ -42,15 +42,11 @@ class PhpParser(TokenParser):
def _process_name(self, token, content):
if self.state == 'use':
content = content.split("\\")
content = content[0] if len(content) == 1 else "\\".join(content[0:len(content)-1])
self.append(content, truncate=False)
self.append(content, truncate=True, separator=u("\\"))
def _process_function(self, token, content):
if self.state == 'use function':
content = content.split("\\")
content = content[0] if len(content) == 1 else "\\".join(content[0:len(content)-1])
self.append(content, truncate=False)
self.append(content, truncate=True, separator=u("\\"))
self.state = 'use'
def _process_keyword(self, token, content):
@ -71,7 +67,7 @@ class PhpParser(TokenParser):
content = content.strip()
if u(token) == 'Token.Literal.String.Double':
content = u('"{0}"').format(content)
self.append(content, truncate=False)
self.append(content)
self.state = None
def _process_punctuation(self, token, content):

View file

@ -5,7 +5,7 @@
Parse dependencies from Python code.
:copyright: (c) 2013 Alan Hamlett.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
@ -45,7 +45,10 @@ class PythonParser(TokenParser):
if self.state is None:
self.state = content
else:
self._process_import(token, content)
if content == 'as':
self.nonpackage = True
else:
self._process_import(token, content)
def _process_name(self, token, content):
if self.state is not None:
@ -53,13 +56,13 @@ class PythonParser(TokenParser):
self.nonpackage = False
else:
if self.state == 'from':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
if self.state == 'from-2' and content != 'import':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
elif self.state == 'import':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
elif self.state == 'import-2':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
else:
self.state = None
@ -69,13 +72,13 @@ class PythonParser(TokenParser):
self.nonpackage = False
else:
if self.state == 'from':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
if self.state == 'from-2' and content != 'import':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
elif self.state == 'import':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
elif self.state == 'import-2':
self.append(content)
self.append(content, truncate=True, truncate_to=0)
else:
self.state = None
@ -101,16 +104,17 @@ class PythonParser(TokenParser):
pass
def _process_import(self, token, content):
if not self.nonpackage:
if self.state == 'from':
self.append(content, truncate=True, truncate_to=0)
self.state = 'from-2'
elif self.state == 'from-2' and content != 'import':
self.append(content, truncate=True, truncate_to=0)
elif self.state == 'import':
self.append(content, truncate=True, truncate_to=0)
self.state = 'import-2'
elif self.state == 'import-2':
self.append(content, truncate=True, truncate_to=0)
else:
self.state = None
self.nonpackage = False
if self.state == 'from':
self.append(content)
self.state = 'from-2'
elif self.state == 'from-2' and content != 'import':
self.append(content)
elif self.state == 'import':
self.append(content)
self.state = 'import-2'
elif self.state == 'import-2':
self.append(content)
else:
self.state = None

View file

@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.templates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Templates.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
from ..compat import u
""" If these keywords are found in the source file, treat them as a dependency.
Must be lower-case strings.
"""
KEYWORDS = [
'_',
'$',
'angular',
'assert', # probably mocha
'backbone',
'batman',
'c3',
'can',
'casper',
'chai',
'chaplin',
'd3',
'define', # probably require
'describe', # mocha or jasmine
'eco',
'ember',
'espresso',
'expect', # probably jasmine
'exports', # probably npm
'express',
'gulp',
'handlebars',
'highcharts',
'jasmine',
'jquery',
'jstz',
'ko', # probably knockout
'm', # probably mithril
'marionette',
'meteor',
'moment',
'monitorio',
'mustache',
'phantom',
'pickadate',
'pikaday',
'qunit',
'react',
'reactive',
'require', # probably the commonjs spec
'ripple',
'rivets',
'socketio',
'spine',
'thorax',
'underscore',
'vue',
'way',
'zombie',
]
class LassoJavascriptParser(TokenParser):
def parse(self, tokens=[]):
if not tokens and not self.tokens:
self.tokens = self._extract_tokens()
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if u(token) == 'Token.Name.Other':
self._process_name(token, content)
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
self._process_literal_string(token, content)
def _process_name(self, token, content):
if content.lower() in KEYWORDS:
self.append(content.lower())
def _process_literal_string(self, token, content):
if 'famous/core/' in content.strip('"').strip("'"):
self.append('famous')

View file

@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.unknown
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from files of unknown language.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import os
from . import TokenParser
from ..compat import u
FILES = {
'bower': {'exact': False, 'dependency': 'bower'},
'grunt': {'exact': False, 'dependency': 'grunt'},
}
class UnknownParser(TokenParser):
def parse(self, tokens=[]):
self._process_file_name(os.path.basename(self.source_file))
return self.dependencies
def _process_file_name(self, file_name):
for key, value in FILES.items():
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
if found:
self.append(value['dependency'])