upgrade wakatime cli to v4.1.8
This commit is contained in:
parent
ccb336028d
commit
3f65a3ab0c
21 changed files with 296 additions and 221 deletions
|
@ -1,7 +1,7 @@
|
||||||
__title__ = 'wakatime'
|
__title__ = 'wakatime'
|
||||||
__description__ = 'Common interface to the WakaTime api.'
|
__description__ = 'Common interface to the WakaTime api.'
|
||||||
__url__ = 'https://github.com/wakatime/wakatime'
|
__url__ = 'https://github.com/wakatime/wakatime'
|
||||||
__version_info__ = ('4', '1', '6')
|
__version_info__ = ('4', '1', '8')
|
||||||
__version__ = '.'.join(__version_info__)
|
__version__ = '.'.join(__version_info__)
|
||||||
__author__ = 'Alan Hamlett'
|
__author__ = 'Alan Hamlett'
|
||||||
__author_email__ = 'alan@wakatime.com'
|
__author_email__ = 'alan@wakatime.com'
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""
|
"""
|
||||||
wakatime.languages
|
wakatime.dependencies
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Parse dependencies from a source code file.
|
Parse dependencies from a source code file.
|
||||||
|
|
||||||
|
@ -10,10 +10,12 @@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from ..compat import u, open, import_module
|
from ..compat import u, open, import_module
|
||||||
|
from ..exceptions import NotYetImplemented
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger('WakaTime')
|
log = logging.getLogger('WakaTime')
|
||||||
|
@ -24,26 +26,28 @@ class TokenParser(object):
|
||||||
language, inherit from this class and implement the :meth:`parse` method
|
language, inherit from this class and implement the :meth:`parse` method
|
||||||
to return a list of dependency strings.
|
to return a list of dependency strings.
|
||||||
"""
|
"""
|
||||||
source_file = None
|
exclude = []
|
||||||
lexer = None
|
|
||||||
dependencies = []
|
|
||||||
tokens = []
|
|
||||||
|
|
||||||
def __init__(self, source_file, lexer=None):
|
def __init__(self, source_file, lexer=None):
|
||||||
|
self._tokens = None
|
||||||
|
self.dependencies = []
|
||||||
self.source_file = source_file
|
self.source_file = source_file
|
||||||
self.lexer = lexer
|
self.lexer = lexer
|
||||||
|
self.exclude = [re.compile(x, re.IGNORECASE) for x in self.exclude]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tokens(self):
|
||||||
|
if self._tokens is None:
|
||||||
|
self._tokens = self._extract_tokens()
|
||||||
|
return self._tokens
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self, tokens=[]):
|
||||||
""" Should return a list of dependencies.
|
""" Should return a list of dependencies.
|
||||||
"""
|
"""
|
||||||
if not tokens and not self.tokens:
|
raise NotYetImplemented()
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
raise Exception('Not yet implemented.')
|
|
||||||
|
|
||||||
def append(self, dep, truncate=False, separator=None, truncate_to=None,
|
def append(self, dep, truncate=False, separator=None, truncate_to=None,
|
||||||
strip_whitespace=True):
|
strip_whitespace=True):
|
||||||
if dep == 'as':
|
|
||||||
print('***************** as')
|
|
||||||
self._save_dependency(
|
self._save_dependency(
|
||||||
dep,
|
dep,
|
||||||
truncate=truncate,
|
truncate=truncate,
|
||||||
|
@ -52,6 +56,9 @@ class TokenParser(object):
|
||||||
strip_whitespace=strip_whitespace,
|
strip_whitespace=strip_whitespace,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def partial(self, token):
|
||||||
|
return u(token).split('.')[-1]
|
||||||
|
|
||||||
def _extract_tokens(self):
|
def _extract_tokens(self):
|
||||||
if self.lexer:
|
if self.lexer:
|
||||||
try:
|
try:
|
||||||
|
@ -73,13 +80,21 @@ class TokenParser(object):
|
||||||
separator = u('.')
|
separator = u('.')
|
||||||
separator = u(separator)
|
separator = u(separator)
|
||||||
dep = dep.split(separator)
|
dep = dep.split(separator)
|
||||||
if truncate_to is None or truncate_to < 0 or truncate_to > len(dep) - 1:
|
if truncate_to is None or truncate_to < 1:
|
||||||
truncate_to = len(dep) - 1
|
truncate_to = 1
|
||||||
dep = dep[0] if len(dep) == 1 else separator.join(dep[0:truncate_to])
|
if truncate_to > len(dep):
|
||||||
|
truncate_to = len(dep)
|
||||||
|
dep = dep[0] if len(dep) == 1 else separator.join(dep[:truncate_to])
|
||||||
if strip_whitespace:
|
if strip_whitespace:
|
||||||
dep = dep.strip()
|
dep = dep.strip()
|
||||||
if dep:
|
if dep and (not separator or not dep.startswith(separator)):
|
||||||
self.dependencies.append(dep)
|
should_exclude = False
|
||||||
|
for compiled in self.exclude:
|
||||||
|
if compiled.search(dep):
|
||||||
|
should_exclude = True
|
||||||
|
break
|
||||||
|
if not should_exclude:
|
||||||
|
self.dependencies.append(dep)
|
||||||
|
|
||||||
|
|
||||||
class DependencyParser(object):
|
class DependencyParser(object):
|
68
plugin/packages/wakatime/dependencies/c_cpp.py
Normal file
68
plugin/packages/wakatime/dependencies/c_cpp.py
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
wakatime.languages.c_cpp
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Parse dependencies from C++ code.
|
||||||
|
|
||||||
|
:copyright: (c) 2014 Alan Hamlett.
|
||||||
|
:license: BSD, see LICENSE for more details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from . import TokenParser
|
||||||
|
|
||||||
|
|
||||||
|
class CppParser(TokenParser):
|
||||||
|
exclude = [
|
||||||
|
r'^stdio\.h$',
|
||||||
|
r'^stdlib\.h$',
|
||||||
|
r'^string\.h$',
|
||||||
|
r'^time\.h$',
|
||||||
|
]
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
for index, token, content in self.tokens:
|
||||||
|
self._process_token(token, content)
|
||||||
|
return self.dependencies
|
||||||
|
|
||||||
|
def _process_token(self, token, content):
|
||||||
|
if self.partial(token) == 'Preproc':
|
||||||
|
self._process_preproc(token, content)
|
||||||
|
else:
|
||||||
|
self._process_other(token, content)
|
||||||
|
|
||||||
|
def _process_preproc(self, token, content):
|
||||||
|
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
|
||||||
|
content = content.replace('include', '', 1).strip().strip('"').strip('<').strip('>').strip()
|
||||||
|
self.append(content)
|
||||||
|
|
||||||
|
def _process_other(self, token, content):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CParser(TokenParser):
|
||||||
|
exclude = [
|
||||||
|
r'^stdio\.h$',
|
||||||
|
r'^stdlib\.h$',
|
||||||
|
r'^string\.h$',
|
||||||
|
r'^time\.h$',
|
||||||
|
]
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
for index, token, content in self.tokens:
|
||||||
|
self._process_token(token, content)
|
||||||
|
return self.dependencies
|
||||||
|
|
||||||
|
def _process_token(self, token, content):
|
||||||
|
if self.partial(token) == 'Preproc':
|
||||||
|
self._process_preproc(token, content)
|
||||||
|
else:
|
||||||
|
self._process_other(token, content)
|
||||||
|
|
||||||
|
def _process_preproc(self, token, content):
|
||||||
|
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
|
||||||
|
content = content.replace('include', '', 1).strip().strip('"').strip('<').strip('>').strip()
|
||||||
|
self.append(content)
|
||||||
|
|
||||||
|
def _process_other(self, token, content):
|
||||||
|
pass
|
|
@ -26,10 +26,8 @@ class JsonParser(TokenParser):
|
||||||
state = None
|
state = None
|
||||||
level = 0
|
level = 0
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self):
|
||||||
self._process_file_name(os.path.basename(self.source_file))
|
self._process_file_name(os.path.basename(self.source_file))
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
for index, token, content in self.tokens:
|
||||||
self._process_token(token, content)
|
self._process_token(token, content)
|
||||||
return self.dependencies
|
return self.dependencies
|
|
@ -10,20 +10,17 @@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from . import TokenParser
|
from . import TokenParser
|
||||||
from ..compat import u
|
|
||||||
|
|
||||||
|
|
||||||
class CSharpParser(TokenParser):
|
class CSharpParser(TokenParser):
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self):
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
for index, token, content in self.tokens:
|
||||||
self._process_token(token, content)
|
self._process_token(token, content)
|
||||||
return self.dependencies
|
return self.dependencies
|
||||||
|
|
||||||
def _process_token(self, token, content):
|
def _process_token(self, token, content):
|
||||||
if u(token).split('.')[-1] == 'Namespace':
|
if self.partial(token) == 'Namespace':
|
||||||
self._process_namespace(token, content)
|
self._process_namespace(token, content)
|
||||||
else:
|
else:
|
||||||
self._process_other(token, content)
|
self._process_other(token, content)
|
96
plugin/packages/wakatime/dependencies/jvm.py
Normal file
96
plugin/packages/wakatime/dependencies/jvm.py
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
wakatime.languages.java
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Parse dependencies from Java code.
|
||||||
|
|
||||||
|
:copyright: (c) 2014 Alan Hamlett.
|
||||||
|
:license: BSD, see LICENSE for more details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from . import TokenParser
|
||||||
|
from ..compat import u
|
||||||
|
|
||||||
|
|
||||||
|
class JavaParser(TokenParser):
|
||||||
|
exclude = [
|
||||||
|
r'^java\.',
|
||||||
|
r'^javax\.',
|
||||||
|
r'^import$',
|
||||||
|
r'^package$',
|
||||||
|
r'^namespace$',
|
||||||
|
r'^static$',
|
||||||
|
]
|
||||||
|
state = None
|
||||||
|
buffer = u('')
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
for index, token, content in self.tokens:
|
||||||
|
self._process_token(token, content)
|
||||||
|
return self.dependencies
|
||||||
|
|
||||||
|
def _process_token(self, token, content):
|
||||||
|
if self.partial(token) == 'Namespace':
|
||||||
|
self._process_namespace(token, content)
|
||||||
|
if self.partial(token) == 'Name':
|
||||||
|
self._process_name(token, content)
|
||||||
|
elif self.partial(token) == 'Attribute':
|
||||||
|
self._process_attribute(token, content)
|
||||||
|
elif self.partial(token) == 'Operator':
|
||||||
|
self._process_operator(token, content)
|
||||||
|
else:
|
||||||
|
self._process_other(token, content)
|
||||||
|
|
||||||
|
def _process_namespace(self, token, content):
|
||||||
|
if u(content) == u('import'):
|
||||||
|
self.state = 'import'
|
||||||
|
|
||||||
|
elif self.state == 'import':
|
||||||
|
keywords = [
|
||||||
|
u('package'),
|
||||||
|
u('namespace'),
|
||||||
|
u('static'),
|
||||||
|
]
|
||||||
|
if u(content) in keywords:
|
||||||
|
return
|
||||||
|
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||||
|
|
||||||
|
elif self.state == 'import-finished':
|
||||||
|
content = content.split(u('.'))
|
||||||
|
|
||||||
|
if len(content) == 1:
|
||||||
|
self.append(content[0])
|
||||||
|
|
||||||
|
elif len(content) > 1:
|
||||||
|
if len(content[0]) == 3:
|
||||||
|
content = content[1:]
|
||||||
|
if content[-1] == u('*'):
|
||||||
|
content = content[:len(content) - 1]
|
||||||
|
|
||||||
|
if len(content) == 1:
|
||||||
|
self.append(content[0])
|
||||||
|
elif len(content) > 1:
|
||||||
|
self.append(u('.').join(content[:2]))
|
||||||
|
|
||||||
|
self.state = None
|
||||||
|
|
||||||
|
def _process_name(self, token, content):
|
||||||
|
if self.state == 'import':
|
||||||
|
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||||
|
|
||||||
|
def _process_attribute(self, token, content):
|
||||||
|
if self.state == 'import':
|
||||||
|
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||||
|
|
||||||
|
def _process_operator(self, token, content):
|
||||||
|
if u(content) == u(';'):
|
||||||
|
self.state = 'import-finished'
|
||||||
|
self._process_namespace(token, self.buffer)
|
||||||
|
self.state = None
|
||||||
|
self.buffer = u('')
|
||||||
|
elif self.state == 'import':
|
||||||
|
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||||
|
|
||||||
|
def _process_other(self, token, content):
|
||||||
|
pass
|
|
@ -17,15 +17,13 @@ class PhpParser(TokenParser):
|
||||||
state = None
|
state = None
|
||||||
parens = 0
|
parens = 0
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self):
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
for index, token, content in self.tokens:
|
||||||
self._process_token(token, content)
|
self._process_token(token, content)
|
||||||
return self.dependencies
|
return self.dependencies
|
||||||
|
|
||||||
def _process_token(self, token, content):
|
def _process_token(self, token, content):
|
||||||
if u(token).split('.')[-1] == 'Keyword':
|
if self.partial(token) == 'Keyword':
|
||||||
self._process_keyword(token, content)
|
self._process_keyword(token, content)
|
||||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
||||||
self._process_literal_string(token, content)
|
self._process_literal_string(token, content)
|
||||||
|
@ -33,9 +31,9 @@ class PhpParser(TokenParser):
|
||||||
self._process_name(token, content)
|
self._process_name(token, content)
|
||||||
elif u(token) == 'Token.Name.Function':
|
elif u(token) == 'Token.Name.Function':
|
||||||
self._process_function(token, content)
|
self._process_function(token, content)
|
||||||
elif u(token).split('.')[-1] == 'Punctuation':
|
elif self.partial(token) == 'Punctuation':
|
||||||
self._process_punctuation(token, content)
|
self._process_punctuation(token, content)
|
||||||
elif u(token).split('.')[-1] == 'Text':
|
elif self.partial(token) == 'Text':
|
||||||
self._process_text(token, content)
|
self._process_text(token, content)
|
||||||
else:
|
else:
|
||||||
self._process_other(token, content)
|
self._process_other(token, content)
|
|
@ -10,33 +10,30 @@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from . import TokenParser
|
from . import TokenParser
|
||||||
from ..compat import u
|
|
||||||
|
|
||||||
|
|
||||||
class PythonParser(TokenParser):
|
class PythonParser(TokenParser):
|
||||||
state = None
|
state = None
|
||||||
parens = 0
|
parens = 0
|
||||||
nonpackage = False
|
nonpackage = False
|
||||||
|
exclude = [
|
||||||
|
r'^os$',
|
||||||
|
r'^sys\.',
|
||||||
|
]
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self):
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
for index, token, content in self.tokens:
|
||||||
self._process_token(token, content)
|
self._process_token(token, content)
|
||||||
return self.dependencies
|
return self.dependencies
|
||||||
|
|
||||||
def _process_token(self, token, content):
|
def _process_token(self, token, content):
|
||||||
if u(token).split('.')[-1] == 'Namespace':
|
if self.partial(token) == 'Namespace':
|
||||||
self._process_namespace(token, content)
|
self._process_namespace(token, content)
|
||||||
elif u(token).split('.')[-1] == 'Name':
|
elif self.partial(token) == 'Operator':
|
||||||
self._process_name(token, content)
|
|
||||||
elif u(token).split('.')[-1] == 'Word':
|
|
||||||
self._process_word(token, content)
|
|
||||||
elif u(token).split('.')[-1] == 'Operator':
|
|
||||||
self._process_operator(token, content)
|
self._process_operator(token, content)
|
||||||
elif u(token).split('.')[-1] == 'Punctuation':
|
elif self.partial(token) == 'Punctuation':
|
||||||
self._process_punctuation(token, content)
|
self._process_punctuation(token, content)
|
||||||
elif u(token).split('.')[-1] == 'Text':
|
elif self.partial(token) == 'Text':
|
||||||
self._process_text(token, content)
|
self._process_text(token, content)
|
||||||
else:
|
else:
|
||||||
self._process_other(token, content)
|
self._process_other(token, content)
|
||||||
|
@ -50,38 +47,6 @@ class PythonParser(TokenParser):
|
||||||
else:
|
else:
|
||||||
self._process_import(token, content)
|
self._process_import(token, content)
|
||||||
|
|
||||||
def _process_name(self, token, content):
|
|
||||||
if self.state is not None:
|
|
||||||
if self.nonpackage:
|
|
||||||
self.nonpackage = False
|
|
||||||
else:
|
|
||||||
if self.state == 'from':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
if self.state == 'from-2' and content != 'import':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
elif self.state == 'import':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
elif self.state == 'import-2':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
else:
|
|
||||||
self.state = None
|
|
||||||
|
|
||||||
def _process_word(self, token, content):
|
|
||||||
if self.state is not None:
|
|
||||||
if self.nonpackage:
|
|
||||||
self.nonpackage = False
|
|
||||||
else:
|
|
||||||
if self.state == 'from':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
if self.state == 'from-2' and content != 'import':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
elif self.state == 'import':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
elif self.state == 'import-2':
|
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
|
||||||
else:
|
|
||||||
self.state = None
|
|
||||||
|
|
||||||
def _process_operator(self, token, content):
|
def _process_operator(self, token, content):
|
||||||
if self.state is not None:
|
if self.state is not None:
|
||||||
if content == '.':
|
if content == '.':
|
||||||
|
@ -106,15 +71,15 @@ class PythonParser(TokenParser):
|
||||||
def _process_import(self, token, content):
|
def _process_import(self, token, content):
|
||||||
if not self.nonpackage:
|
if not self.nonpackage:
|
||||||
if self.state == 'from':
|
if self.state == 'from':
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
self.append(content, truncate=True, truncate_to=1)
|
||||||
self.state = 'from-2'
|
self.state = 'from-2'
|
||||||
elif self.state == 'from-2' and content != 'import':
|
elif self.state == 'from-2' and content != 'import':
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
self.append(content, truncate=True, truncate_to=1)
|
||||||
elif self.state == 'import':
|
elif self.state == 'import':
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
self.append(content, truncate=True, truncate_to=1)
|
||||||
self.state = 'import-2'
|
self.state = 'import-2'
|
||||||
elif self.state == 'import-2':
|
elif self.state == 'import-2':
|
||||||
self.append(content, truncate=True, truncate_to=0)
|
self.append(content, truncate=True, truncate_to=1)
|
||||||
else:
|
else:
|
||||||
self.state = None
|
self.state = None
|
||||||
self.nonpackage = False
|
self.nonpackage = False
|
|
@ -71,9 +71,7 @@ KEYWORDS = [
|
||||||
|
|
||||||
class LassoJavascriptParser(TokenParser):
|
class LassoJavascriptParser(TokenParser):
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self):
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
for index, token, content in self.tokens:
|
||||||
self._process_token(token, content)
|
self._process_token(token, content)
|
||||||
return self.dependencies
|
return self.dependencies
|
||||||
|
@ -99,9 +97,7 @@ class HtmlDjangoParser(TokenParser):
|
||||||
current_attr = None
|
current_attr = None
|
||||||
current_attr_value = None
|
current_attr_value = None
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self):
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
for index, token, content in self.tokens:
|
||||||
self._process_token(token, content)
|
self._process_token(token, content)
|
||||||
return self.dependencies
|
return self.dependencies
|
|
@ -22,7 +22,7 @@ FILES = {
|
||||||
|
|
||||||
class UnknownParser(TokenParser):
|
class UnknownParser(TokenParser):
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
def parse(self):
|
||||||
self._process_file_name(os.path.basename(self.source_file))
|
self._process_file_name(os.path.basename(self.source_file))
|
||||||
return self.dependencies
|
return self.dependencies
|
||||||
|
|
14
plugin/packages/wakatime/exceptions.py
Normal file
14
plugin/packages/wakatime/exceptions.py
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
wakatime.exceptions
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Custom exceptions.
|
||||||
|
|
||||||
|
:copyright: (c) 2015 Alan Hamlett.
|
||||||
|
:license: BSD, see LICENSE for more details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class NotYetImplemented(Exception):
|
||||||
|
"""This method needs to be implemented."""
|
|
@ -1,37 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
wakatime.languages.c_cpp
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Parse dependencies from C++ code.
|
|
||||||
|
|
||||||
:copyright: (c) 2014 Alan Hamlett.
|
|
||||||
:license: BSD, see LICENSE for more details.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from . import TokenParser
|
|
||||||
from ..compat import u
|
|
||||||
|
|
||||||
|
|
||||||
class CppParser(TokenParser):
|
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
|
||||||
self._process_token(token, content)
|
|
||||||
return self.dependencies
|
|
||||||
|
|
||||||
def _process_token(self, token, content):
|
|
||||||
if u(token).split('.')[-1] == 'Preproc':
|
|
||||||
self._process_preproc(token, content)
|
|
||||||
else:
|
|
||||||
self._process_other(token, content)
|
|
||||||
|
|
||||||
def _process_preproc(self, token, content):
|
|
||||||
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
|
|
||||||
content = content.replace('include', '', 1).strip()
|
|
||||||
self.append(content)
|
|
||||||
|
|
||||||
def _process_other(self, token, content):
|
|
||||||
pass
|
|
|
@ -1,36 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
wakatime.languages.java
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Parse dependencies from Java code.
|
|
||||||
|
|
||||||
:copyright: (c) 2014 Alan Hamlett.
|
|
||||||
:license: BSD, see LICENSE for more details.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from . import TokenParser
|
|
||||||
from ..compat import u
|
|
||||||
|
|
||||||
|
|
||||||
class JavaParser(TokenParser):
|
|
||||||
|
|
||||||
def parse(self, tokens=[]):
|
|
||||||
if not tokens and not self.tokens:
|
|
||||||
self.tokens = self._extract_tokens()
|
|
||||||
for index, token, content in self.tokens:
|
|
||||||
self._process_token(token, content)
|
|
||||||
return self.dependencies
|
|
||||||
|
|
||||||
def _process_token(self, token, content):
|
|
||||||
if u(token).split('.')[-1] == 'Namespace':
|
|
||||||
self._process_namespace(token, content)
|
|
||||||
else:
|
|
||||||
self._process_other(token, content)
|
|
||||||
|
|
||||||
def _process_namespace(self, token, content):
|
|
||||||
if content != 'import' and content != 'package' and content != 'namespace':
|
|
||||||
self.append(content, truncate=True)
|
|
||||||
|
|
||||||
def _process_other(self, token, content):
|
|
||||||
pass
|
|
|
@ -16,8 +16,8 @@ import sys
|
||||||
from .compat import u
|
from .compat import u
|
||||||
try:
|
try:
|
||||||
from collections import OrderedDict # pragma: nocover
|
from collections import OrderedDict # pragma: nocover
|
||||||
except ImportError:
|
except ImportError: # pragma: nocover
|
||||||
from .packages.ordereddict import OrderedDict # pragma: nocover
|
from .packages.ordereddict import OrderedDict
|
||||||
try:
|
try:
|
||||||
from .packages import simplejson as json # pragma: nocover
|
from .packages import simplejson as json # pragma: nocover
|
||||||
except (ImportError, SyntaxError): # pragma: nocover
|
except (ImportError, SyntaxError): # pragma: nocover
|
||||||
|
@ -27,12 +27,12 @@ except (ImportError, SyntaxError): # pragma: nocover
|
||||||
class CustomEncoder(json.JSONEncoder):
|
class CustomEncoder(json.JSONEncoder):
|
||||||
|
|
||||||
def default(self, obj):
|
def default(self, obj):
|
||||||
if isinstance(obj, bytes):
|
if isinstance(obj, bytes): # pragma: nocover
|
||||||
obj = bytes.decode(obj)
|
obj = u(obj)
|
||||||
return json.dumps(obj)
|
return json.dumps(obj)
|
||||||
try:
|
try: # pragma: nocover
|
||||||
encoded = super(CustomEncoder, self).default(obj)
|
encoded = super(CustomEncoder, self).default(obj)
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError: # pragma: nocover
|
||||||
obj = u(obj)
|
obj = u(obj)
|
||||||
encoded = super(CustomEncoder, self).default(obj)
|
encoded = super(CustomEncoder, self).default(obj)
|
||||||
return encoded
|
return encoded
|
||||||
|
@ -83,19 +83,9 @@ def set_log_level(logger, args):
|
||||||
|
|
||||||
def setup_logging(args, version):
|
def setup_logging(args, version):
|
||||||
logger = logging.getLogger('WakaTime')
|
logger = logging.getLogger('WakaTime')
|
||||||
|
for handler in logger.handlers:
|
||||||
|
logger.removeHandler(handler)
|
||||||
set_log_level(logger, args)
|
set_log_level(logger, args)
|
||||||
if len(logger.handlers) > 0:
|
|
||||||
formatter = JsonFormatter(datefmt='%Y/%m/%d %H:%M:%S %z')
|
|
||||||
formatter.setup(
|
|
||||||
timestamp=args.timestamp,
|
|
||||||
isWrite=args.isWrite,
|
|
||||||
entity=args.entity,
|
|
||||||
version=version,
|
|
||||||
plugin=args.plugin,
|
|
||||||
verbose=args.verbose,
|
|
||||||
)
|
|
||||||
logger.handlers[0].setFormatter(formatter)
|
|
||||||
return logger
|
|
||||||
logfile = args.logfile
|
logfile = args.logfile
|
||||||
if not logfile:
|
if not logfile:
|
||||||
logfile = '~/.wakatime.log'
|
logfile = '~/.wakatime.log'
|
||||||
|
@ -127,7 +117,7 @@ def setup_logging(args, version):
|
||||||
logging.getLogger('py.warnings').addHandler(warnings_handler)
|
logging.getLogger('py.warnings').addHandler(warnings_handler)
|
||||||
try:
|
try:
|
||||||
logging.captureWarnings(True)
|
logging.captureWarnings(True)
|
||||||
except AttributeError:
|
except AttributeError: # pragma: nocover
|
||||||
pass # Python >= 2.7 is needed to capture warnings
|
pass # Python >= 2.7 is needed to capture warnings
|
||||||
|
|
||||||
return logger
|
return logger
|
||||||
|
|
|
@ -39,12 +39,12 @@ from .session_cache import SessionCache
|
||||||
from .stats import get_file_stats
|
from .stats import get_file_stats
|
||||||
try:
|
try:
|
||||||
from .packages import simplejson as json # pragma: nocover
|
from .packages import simplejson as json # pragma: nocover
|
||||||
except (ImportError, SyntaxError):
|
except (ImportError, SyntaxError): # pragma: nocover
|
||||||
import json # pragma: nocover
|
import json
|
||||||
try:
|
try:
|
||||||
from .packages import tzlocal # pragma: nocover
|
from .packages import tzlocal
|
||||||
except: # pragma: nocover
|
except: # pragma: nocover
|
||||||
from .packages import tzlocal3 as tzlocal # pragma: nocover
|
from .packages import tzlocal3 as tzlocal
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger('WakaTime')
|
log = logging.getLogger('WakaTime')
|
||||||
|
@ -56,7 +56,7 @@ class FileAction(argparse.Action):
|
||||||
try:
|
try:
|
||||||
if os.path.isfile(values):
|
if os.path.isfile(values):
|
||||||
values = os.path.realpath(values)
|
values = os.path.realpath(values)
|
||||||
except:
|
except: # pragma: nocover
|
||||||
pass
|
pass
|
||||||
setattr(namespace, self.dest, values)
|
setattr(namespace, self.dest, values)
|
||||||
|
|
||||||
|
@ -146,6 +146,8 @@ def parseArguments():
|
||||||
help='defaults to ~/.wakatime.log')
|
help='defaults to ~/.wakatime.log')
|
||||||
parser.add_argument('--apiurl', dest='api_url',
|
parser.add_argument('--apiurl', dest='api_url',
|
||||||
help='heartbeats api url; for debugging with a local server')
|
help='heartbeats api url; for debugging with a local server')
|
||||||
|
parser.add_argument('--timeout', dest='timeout', type=int,
|
||||||
|
help='number of seconds to wait when sending heartbeats to api')
|
||||||
parser.add_argument('--config', dest='config',
|
parser.add_argument('--config', dest='config',
|
||||||
help='defaults to ~/.wakatime.conf')
|
help='defaults to ~/.wakatime.conf')
|
||||||
parser.add_argument('--verbose', dest='verbose', action='store_true',
|
parser.add_argument('--verbose', dest='verbose', action='store_true',
|
||||||
|
@ -189,14 +191,14 @@ def parseArguments():
|
||||||
for pattern in configs.get('settings', 'ignore').split("\n"):
|
for pattern in configs.get('settings', 'ignore').split("\n"):
|
||||||
if pattern.strip() != '':
|
if pattern.strip() != '':
|
||||||
args.exclude.append(pattern)
|
args.exclude.append(pattern)
|
||||||
except TypeError:
|
except TypeError: # pragma: nocover
|
||||||
pass
|
pass
|
||||||
if configs.has_option('settings', 'exclude'):
|
if configs.has_option('settings', 'exclude'):
|
||||||
try:
|
try:
|
||||||
for pattern in configs.get('settings', 'exclude').split("\n"):
|
for pattern in configs.get('settings', 'exclude').split("\n"):
|
||||||
if pattern.strip() != '':
|
if pattern.strip() != '':
|
||||||
args.exclude.append(pattern)
|
args.exclude.append(pattern)
|
||||||
except TypeError:
|
except TypeError: # pragma: nocover
|
||||||
pass
|
pass
|
||||||
if not args.include:
|
if not args.include:
|
||||||
args.include = []
|
args.include = []
|
||||||
|
@ -205,7 +207,7 @@ def parseArguments():
|
||||||
for pattern in configs.get('settings', 'include').split("\n"):
|
for pattern in configs.get('settings', 'include').split("\n"):
|
||||||
if pattern.strip() != '':
|
if pattern.strip() != '':
|
||||||
args.include.append(pattern)
|
args.include.append(pattern)
|
||||||
except TypeError:
|
except TypeError: # pragma: nocover
|
||||||
pass
|
pass
|
||||||
if args.offline and configs.has_option('settings', 'offline'):
|
if args.offline and configs.has_option('settings', 'offline'):
|
||||||
args.offline = configs.getboolean('settings', 'offline')
|
args.offline = configs.getboolean('settings', 'offline')
|
||||||
|
@ -221,6 +223,11 @@ def parseArguments():
|
||||||
args.logfile = configs.get('settings', 'logfile')
|
args.logfile = configs.get('settings', 'logfile')
|
||||||
if not args.api_url and configs.has_option('settings', 'api_url'):
|
if not args.api_url and configs.has_option('settings', 'api_url'):
|
||||||
args.api_url = configs.get('settings', 'api_url')
|
args.api_url = configs.get('settings', 'api_url')
|
||||||
|
if not args.timeout and configs.has_option('settings', 'timeout'):
|
||||||
|
try:
|
||||||
|
args.timeout = int(configs.get('settings', 'timeout'))
|
||||||
|
except ValueError:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
|
||||||
return args, configs
|
return args, configs
|
||||||
|
|
||||||
|
@ -278,12 +285,14 @@ def get_user_agent(plugin):
|
||||||
|
|
||||||
def send_heartbeat(project=None, branch=None, hostname=None, stats={}, key=None, entity=None,
|
def send_heartbeat(project=None, branch=None, hostname=None, stats={}, key=None, entity=None,
|
||||||
timestamp=None, isWrite=None, plugin=None, offline=None, entity_type='file',
|
timestamp=None, isWrite=None, plugin=None, offline=None, entity_type='file',
|
||||||
hidefilenames=None, proxy=None, api_url=None, **kwargs):
|
hidefilenames=None, proxy=None, api_url=None, timeout=None, **kwargs):
|
||||||
"""Sends heartbeat as POST request to WakaTime api server.
|
"""Sends heartbeat as POST request to WakaTime api server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not api_url:
|
if not api_url:
|
||||||
api_url = 'https://wakatime.com/api/v1/heartbeats'
|
api_url = 'https://wakatime.com/api/v1/heartbeats'
|
||||||
|
if not timeout:
|
||||||
|
timeout = 30
|
||||||
log.debug('Sending heartbeat to api at %s' % api_url)
|
log.debug('Sending heartbeat to api at %s' % api_url)
|
||||||
data = {
|
data = {
|
||||||
'time': timestamp,
|
'time': timestamp,
|
||||||
|
@ -342,7 +351,7 @@ def send_heartbeat(project=None, branch=None, hostname=None, stats={}, key=None,
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
response = session.post(api_url, data=request_body, headers=headers,
|
response = session.post(api_url, data=request_body, headers=headers,
|
||||||
proxies=proxies)
|
proxies=proxies, timeout=timeout)
|
||||||
except RequestException:
|
except RequestException:
|
||||||
exception_data = {
|
exception_data = {
|
||||||
sys.exc_info()[0].__name__: u(sys.exc_info()[1]),
|
sys.exc_info()[0].__name__: u(sys.exc_info()[1]),
|
||||||
|
@ -425,6 +434,7 @@ def execute(argv=None):
|
||||||
kwargs['branch'] = branch
|
kwargs['branch'] = branch
|
||||||
kwargs['stats'] = stats
|
kwargs['stats'] = stats
|
||||||
kwargs['hostname'] = args.hostname or socket.gethostname()
|
kwargs['hostname'] = args.hostname or socket.gethostname()
|
||||||
|
kwargs['timeout'] = args.timeout
|
||||||
|
|
||||||
if send_heartbeat(**kwargs):
|
if send_heartbeat(**kwargs):
|
||||||
queue = Queue()
|
queue = Queue()
|
||||||
|
@ -447,6 +457,7 @@ def execute(argv=None):
|
||||||
entity_type=heartbeat['type'],
|
entity_type=heartbeat['type'],
|
||||||
proxy=args.proxy,
|
proxy=args.proxy,
|
||||||
api_url=args.api_url,
|
api_url=args.api_url,
|
||||||
|
timeout=args.timeout,
|
||||||
)
|
)
|
||||||
if not sent:
|
if not sent:
|
||||||
break
|
break
|
||||||
|
|
|
@ -31,8 +31,11 @@ class Queue(object):
|
||||||
db_file = os.path.join(os.path.expanduser('~'), '.wakatime.db')
|
db_file = os.path.join(os.path.expanduser('~'), '.wakatime.db')
|
||||||
table_name = 'heartbeat_1'
|
table_name = 'heartbeat_1'
|
||||||
|
|
||||||
|
def get_db_file(self):
|
||||||
|
return self.db_file
|
||||||
|
|
||||||
def connect(self):
|
def connect(self):
|
||||||
conn = sqlite3.connect(self.db_file)
|
conn = sqlite3.connect(self.get_db_file())
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
|
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
|
||||||
entity text,
|
entity text,
|
||||||
|
@ -47,9 +50,8 @@ class Queue(object):
|
||||||
'''.format(self.table_name))
|
'''.format(self.table_name))
|
||||||
return (conn, c)
|
return (conn, c)
|
||||||
|
|
||||||
|
|
||||||
def push(self, data, stats, plugin, misc=None):
|
def push(self, data, stats, plugin, misc=None):
|
||||||
if not HAS_SQL:
|
if not HAS_SQL: # pragma: nocover
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
conn, c = self.connect()
|
conn, c = self.connect()
|
||||||
|
@ -70,9 +72,8 @@ class Queue(object):
|
||||||
except sqlite3.Error:
|
except sqlite3.Error:
|
||||||
log.error(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
def pop(self):
|
def pop(self):
|
||||||
if not HAS_SQL:
|
if not HAS_SQL: # pragma: nocover
|
||||||
return None
|
return None
|
||||||
tries = 3
|
tries = 3
|
||||||
wait = 0.1
|
wait = 0.1
|
||||||
|
@ -96,12 +97,12 @@ class Queue(object):
|
||||||
if row[index] is not None:
|
if row[index] is not None:
|
||||||
clauses.append('{0}=?'.format(row_name))
|
clauses.append('{0}=?'.format(row_name))
|
||||||
values.append(row[index])
|
values.append(row[index])
|
||||||
else:
|
else: # pragma: nocover
|
||||||
clauses.append('{0} IS NULL'.format(row_name))
|
clauses.append('{0} IS NULL'.format(row_name))
|
||||||
index += 1
|
index += 1
|
||||||
if len(values) > 0:
|
if len(values) > 0:
|
||||||
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)), values)
|
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)), values)
|
||||||
else:
|
else: # pragma: nocover
|
||||||
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)))
|
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)))
|
||||||
conn.commit()
|
conn.commit()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
|
|
|
@ -11,6 +11,8 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from ..exceptions import NotYetImplemented
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger('WakaTime')
|
log = logging.getLogger('WakaTime')
|
||||||
|
|
||||||
|
@ -30,14 +32,14 @@ class BaseProject(object):
|
||||||
returns True if project is valid, otherwise
|
returns True if project is valid, otherwise
|
||||||
returns False.
|
returns False.
|
||||||
"""
|
"""
|
||||||
return False # pragma: nocover
|
raise NotYetImplemented()
|
||||||
|
|
||||||
def name(self):
|
def name(self):
|
||||||
""" Returns the project's name.
|
""" Returns the project's name.
|
||||||
"""
|
"""
|
||||||
return None
|
raise NotYetImplemented()
|
||||||
|
|
||||||
def branch(self):
|
def branch(self):
|
||||||
""" Returns the current branch.
|
""" Returns the current branch.
|
||||||
"""
|
"""
|
||||||
return None # pragma: nocover
|
raise NotYetImplemented()
|
||||||
|
|
|
@ -29,7 +29,7 @@ class Mercurial(BaseProject):
|
||||||
def name(self):
|
def name(self):
|
||||||
if self.configDir:
|
if self.configDir:
|
||||||
return u(os.path.basename(os.path.dirname(self.configDir)))
|
return u(os.path.basename(os.path.dirname(self.configDir)))
|
||||||
return None
|
return None # pragma: nocover
|
||||||
|
|
||||||
def branch(self):
|
def branch(self):
|
||||||
if self.configDir:
|
if self.configDir:
|
||||||
|
@ -37,13 +37,13 @@ class Mercurial(BaseProject):
|
||||||
try:
|
try:
|
||||||
with open(branch_file, 'r', encoding='utf-8') as fh:
|
with open(branch_file, 'r', encoding='utf-8') as fh:
|
||||||
return u(fh.readline().strip().rsplit('/', 1)[-1])
|
return u(fh.readline().strip().rsplit('/', 1)[-1])
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError: # pragma: nocover
|
||||||
try:
|
try:
|
||||||
with open(branch_file, 'r', encoding=sys.getfilesystemencoding()) as fh:
|
with open(branch_file, 'r', encoding=sys.getfilesystemencoding()) as fh:
|
||||||
return u(fh.readline().strip().rsplit('/', 1)[-1])
|
return u(fh.readline().strip().rsplit('/', 1)[-1])
|
||||||
except:
|
except:
|
||||||
log.exception("Exception:")
|
log.exception("Exception:")
|
||||||
except IOError:
|
except IOError: # pragma: nocover
|
||||||
log.exception("Exception:")
|
log.exception("Exception:")
|
||||||
return u('default')
|
return u('default')
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ from .base import BaseProject
|
||||||
from ..compat import u, open
|
from ..compat import u, open
|
||||||
try:
|
try:
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
except ImportError:
|
except ImportError: # pragma: nocover
|
||||||
from ..packages.ordereddict import OrderedDict # pragma: nocover
|
from ..packages.ordereddict import OrderedDict # pragma: nocover
|
||||||
|
|
||||||
|
|
||||||
|
@ -33,12 +33,12 @@ class Subversion(BaseProject):
|
||||||
|
|
||||||
def name(self):
|
def name(self):
|
||||||
if 'Repository Root' not in self.info:
|
if 'Repository Root' not in self.info:
|
||||||
return None
|
return None # pragma: nocover
|
||||||
return u(self.info['Repository Root'].split('/')[-1].split('\\')[-1])
|
return u(self.info['Repository Root'].split('/')[-1].split('\\')[-1])
|
||||||
|
|
||||||
def branch(self):
|
def branch(self):
|
||||||
if 'URL' not in self.info:
|
if 'URL' not in self.info:
|
||||||
return None
|
return None # pragma: nocover
|
||||||
return u(self.info['URL'].split('/')[-1].split('\\')[-1])
|
return u(self.info['URL'].split('/')[-1].split('\\')[-1])
|
||||||
|
|
||||||
def _find_binary(self):
|
def _find_binary(self):
|
||||||
|
|
|
@ -46,8 +46,8 @@ class SessionCache(object):
|
||||||
"""Saves a requests.Session object for the next heartbeat process.
|
"""Saves a requests.Session object for the next heartbeat process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not HAS_SQL:
|
if not HAS_SQL: # pragma: nocover
|
||||||
return # pragma: nocover
|
return
|
||||||
try:
|
try:
|
||||||
conn, c = self.connect()
|
conn, c = self.connect()
|
||||||
c.execute('DELETE FROM session')
|
c.execute('DELETE FROM session')
|
||||||
|
@ -67,14 +67,14 @@ class SessionCache(object):
|
||||||
Gets Session from sqlite3 cache or creates a new Session.
|
Gets Session from sqlite3 cache or creates a new Session.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not HAS_SQL:
|
if not HAS_SQL: # pragma: nocover
|
||||||
return requests.session()
|
return requests.session()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conn, c = self.connect()
|
conn, c = self.connect()
|
||||||
except:
|
except:
|
||||||
log.error(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
return requests.session() # pragma: nocover
|
return requests.session()
|
||||||
|
|
||||||
session = None
|
session = None
|
||||||
try:
|
try:
|
||||||
|
@ -83,12 +83,12 @@ class SessionCache(object):
|
||||||
row = c.fetchone()
|
row = c.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
session = pickle.loads(row[0])
|
session = pickle.loads(row[0])
|
||||||
except:
|
except: # pragma: nocover
|
||||||
log.error(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conn.close()
|
conn.close()
|
||||||
except:
|
except: # pragma: nocover
|
||||||
log.error(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
return session if session is not None else requests.session()
|
return session if session is not None else requests.session()
|
||||||
|
@ -98,7 +98,7 @@ class SessionCache(object):
|
||||||
"""Clears all cached Session objects.
|
"""Clears all cached Session objects.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not HAS_SQL:
|
if not HAS_SQL: # pragma: nocover
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
conn, c = self.connect()
|
conn, c = self.connect()
|
||||||
|
|
|
@ -14,11 +14,11 @@ import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from .compat import u, open
|
from .compat import u, open
|
||||||
from .languages import DependencyParser
|
from .dependencies import DependencyParser
|
||||||
|
|
||||||
if sys.version_info[0] == 2:
|
if sys.version_info[0] == 2: # pragma: nocover
|
||||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages', 'pygments_py2'))
|
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages', 'pygments_py2'))
|
||||||
else:
|
else: # pragma: nocover
|
||||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages', 'pygments_py3'))
|
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages', 'pygments_py3'))
|
||||||
from pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
|
from pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
|
||||||
from pygments.modeline import get_filetype_from_buffer
|
from pygments.modeline import get_filetype_from_buffer
|
||||||
|
@ -35,11 +35,8 @@ def guess_language(file_name):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
language = get_language_from_extension(file_name)
|
language = get_language_from_extension(file_name)
|
||||||
if language:
|
|
||||||
return language, None
|
|
||||||
|
|
||||||
lexer = smart_guess_lexer(file_name)
|
lexer = smart_guess_lexer(file_name)
|
||||||
if lexer:
|
if language is None and lexer is not None:
|
||||||
language = u(lexer.name)
|
language = u(lexer.name)
|
||||||
|
|
||||||
return language, lexer
|
return language, lexer
|
||||||
|
@ -78,7 +75,7 @@ def guess_lexer_using_filename(file_name, text):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
lexer = guess_lexer_for_filename(file_name, text)
|
lexer = guess_lexer_for_filename(file_name, text)
|
||||||
except:
|
except: # pragma: nocover
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if lexer is not None:
|
if lexer is not None:
|
||||||
|
@ -148,7 +145,7 @@ def number_lines_in_file(file_name):
|
||||||
with open(file_name, 'r', encoding='utf-8') as fh:
|
with open(file_name, 'r', encoding='utf-8') as fh:
|
||||||
for line in fh:
|
for line in fh:
|
||||||
lines += 1
|
lines += 1
|
||||||
except:
|
except: # pragma: nocover
|
||||||
try:
|
try:
|
||||||
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
|
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
|
||||||
for line in fh:
|
for line in fh:
|
||||||
|
@ -189,7 +186,7 @@ def get_file_contents(file_name):
|
||||||
try:
|
try:
|
||||||
with open(file_name, 'r', encoding='utf-8') as fh:
|
with open(file_name, 'r', encoding='utf-8') as fh:
|
||||||
text = fh.read(512000)
|
text = fh.read(512000)
|
||||||
except:
|
except: # pragma: nocover
|
||||||
try:
|
try:
|
||||||
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
|
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
|
||||||
text = fh.read(512000)
|
text = fh.read(512000)
|
||||||
|
|
Loading…
Reference in a new issue