upgrade wakatime-cli to v8.0.0
This commit is contained in:
parent
a7be16c948
commit
ca30aca931
11 changed files with 1606 additions and 40 deletions
|
@ -1,7 +1,7 @@
|
|||
__title__ = 'wakatime'
|
||||
__description__ = 'Common interface to the WakaTime api.'
|
||||
__url__ = 'https://github.com/wakatime/wakatime'
|
||||
__version_info__ = ('7', '0', '4')
|
||||
__version_info__ = ('8', '0', '0')
|
||||
__version__ = '.'.join(__version_info__)
|
||||
__author__ = 'Alan Hamlett'
|
||||
__author_email__ = 'alan@wakatime.com'
|
||||
|
|
|
@ -128,8 +128,6 @@ def parseArguments():
|
|||
|
||||
# parse ~/.wakatime.cfg file
|
||||
configs = parseConfigFile(args.config)
|
||||
if configs is None:
|
||||
return args, configs
|
||||
|
||||
# update args from configs
|
||||
if not args.hostname:
|
||||
|
|
|
@ -13,17 +13,16 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from .compat import u, open
|
||||
from .compat import open
|
||||
from .constants import CONFIG_FILE_PARSE_ERROR
|
||||
|
||||
|
||||
try:
|
||||
import ConfigParser as configparser
|
||||
except ImportError: # pragma: nocover
|
||||
import configparser
|
||||
except ImportError:
|
||||
from .packages import configparser
|
||||
|
||||
|
||||
def parseConfigFile(configFile=None):
|
||||
|
@ -41,15 +40,14 @@ def parseConfigFile(configFile=None):
|
|||
if not configFile:
|
||||
configFile = os.path.join(os.path.expanduser('~'), '.wakatime.cfg')
|
||||
|
||||
configs = configparser.SafeConfigParser()
|
||||
configs = configparser.ConfigParser(delimiters=('='), strict=False)
|
||||
try:
|
||||
with open(configFile, 'r', encoding='utf-8') as fh:
|
||||
try:
|
||||
configs.readfp(fh)
|
||||
configs.read_file(fh)
|
||||
except configparser.Error:
|
||||
print(traceback.format_exc())
|
||||
return None
|
||||
raise SystemExit(CONFIG_FILE_PARSE_ERROR)
|
||||
except IOError:
|
||||
sys.stderr.write(u("Error: Could not read from config file {0}\n").format(u(configFile)))
|
||||
raise SystemExit(CONFIG_FILE_PARSE_ERROR)
|
||||
pass
|
||||
return configs
|
||||
|
|
18
plugin/packages/wakatime/language_priorities.py
Normal file
18
plugin/packages/wakatime/language_priorities.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.language_priorities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Overwrite pygments Lexer.priority attribute for specific languages.
|
||||
|
||||
:copyright: (c) 2017 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
LANGUAGES = {
|
||||
'typescript': 0.01,
|
||||
'perl': 0.01,
|
||||
'perl6': 0.01,
|
||||
'f#': 0.01,
|
||||
}
|
|
@ -29,7 +29,6 @@ from .compat import u, is_py3
|
|||
from .constants import (
|
||||
API_ERROR,
|
||||
AUTH_ERROR,
|
||||
CONFIG_FILE_PARSE_ERROR,
|
||||
SUCCESS,
|
||||
UNKNOWN_ERROR,
|
||||
MALFORMED_HEARTBEAT_ERROR,
|
||||
|
@ -293,8 +292,6 @@ def execute(argv=None):
|
|||
sys.argv = ['wakatime'] + argv
|
||||
|
||||
args, configs = parseArguments()
|
||||
if configs is None:
|
||||
return CONFIG_FILE_PARSE_ERROR
|
||||
|
||||
setup_logging(args, __version__)
|
||||
|
||||
|
|
1390
plugin/packages/wakatime/packages/configparser/__init__.py
Normal file
1390
plugin/packages/wakatime/packages/configparser/__init__.py
Normal file
File diff suppressed because it is too large
Load diff
171
plugin/packages/wakatime/packages/configparser/helpers.py
Normal file
171
plugin/packages/wakatime/packages/configparser/helpers.py
Normal file
|
@ -0,0 +1,171 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from collections import MutableMapping
|
||||
try:
|
||||
from collections import UserDict
|
||||
except ImportError:
|
||||
from UserDict import UserDict
|
||||
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from ordereddict import OrderedDict
|
||||
|
||||
from io import open
|
||||
import sys
|
||||
try:
|
||||
from thread import get_ident
|
||||
except ImportError:
|
||||
try:
|
||||
from _thread import get_ident
|
||||
except ImportError:
|
||||
from _dummy_thread import get_ident
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
str = type('str')
|
||||
|
||||
|
||||
def from_none(exc):
|
||||
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
|
||||
exc.__cause__ = None
|
||||
exc.__suppress_context__ = True
|
||||
return exc
|
||||
|
||||
|
||||
# from reprlib 3.2.1
|
||||
def recursive_repr(fillvalue='...'):
|
||||
'Decorator to make a repr function return fillvalue for a recursive call'
|
||||
|
||||
def decorating_function(user_function):
|
||||
repr_running = set()
|
||||
|
||||
def wrapper(self):
|
||||
key = id(self), get_ident()
|
||||
if key in repr_running:
|
||||
return fillvalue
|
||||
repr_running.add(key)
|
||||
try:
|
||||
result = user_function(self)
|
||||
finally:
|
||||
repr_running.discard(key)
|
||||
return result
|
||||
|
||||
# Can't use functools.wraps() here because of bootstrap issues
|
||||
wrapper.__module__ = getattr(user_function, '__module__')
|
||||
wrapper.__doc__ = getattr(user_function, '__doc__')
|
||||
wrapper.__name__ = getattr(user_function, '__name__')
|
||||
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
|
||||
return wrapper
|
||||
|
||||
return decorating_function
|
||||
|
||||
# from collections 3.2.1
|
||||
class _ChainMap(MutableMapping):
|
||||
''' A ChainMap groups multiple dicts (or other mappings) together
|
||||
to create a single, updateable view.
|
||||
|
||||
The underlying mappings are stored in a list. That list is public and can
|
||||
accessed or updated using the *maps* attribute. There is no other state.
|
||||
|
||||
Lookups search the underlying mappings successively until a key is found.
|
||||
In contrast, writes, updates, and deletions only operate on the first
|
||||
mapping.
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, *maps):
|
||||
'''Initialize a ChainMap by setting *maps* to the given mappings.
|
||||
If no mappings are provided, a single empty dictionary is used.
|
||||
|
||||
'''
|
||||
self.maps = list(maps) or [{}] # always at least one map
|
||||
|
||||
def __missing__(self, key):
|
||||
raise KeyError(key)
|
||||
|
||||
def __getitem__(self, key):
|
||||
for mapping in self.maps:
|
||||
try:
|
||||
return mapping[key] # can't use 'key in mapping' with defaultdict
|
||||
except KeyError:
|
||||
pass
|
||||
return self.__missing__(key) # support subclasses that define __missing__
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self[key] if key in self else default
|
||||
|
||||
def __len__(self):
|
||||
return len(set().union(*self.maps)) # reuses stored hash values if possible
|
||||
|
||||
def __iter__(self):
|
||||
return iter(set().union(*self.maps))
|
||||
|
||||
def __contains__(self, key):
|
||||
return any(key in m for m in self.maps)
|
||||
|
||||
@recursive_repr()
|
||||
def __repr__(self):
|
||||
return '{0.__class__.__name__}({1})'.format(
|
||||
self, ', '.join(map(repr, self.maps)))
|
||||
|
||||
@classmethod
|
||||
def fromkeys(cls, iterable, *args):
|
||||
'Create a ChainMap with a single dict created from the iterable.'
|
||||
return cls(dict.fromkeys(iterable, *args))
|
||||
|
||||
def copy(self):
|
||||
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
|
||||
return self.__class__(self.maps[0].copy(), *self.maps[1:])
|
||||
|
||||
__copy__ = copy
|
||||
|
||||
def new_child(self): # like Django's Context.push()
|
||||
'New ChainMap with a new dict followed by all previous maps.'
|
||||
return self.__class__({}, *self.maps)
|
||||
|
||||
@property
|
||||
def parents(self): # like Django's Context.pop()
|
||||
'New ChainMap from maps[1:].'
|
||||
return self.__class__(*self.maps[1:])
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.maps[0][key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
try:
|
||||
del self.maps[0][key]
|
||||
except KeyError:
|
||||
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
|
||||
|
||||
def popitem(self):
|
||||
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
|
||||
try:
|
||||
return self.maps[0].popitem()
|
||||
except KeyError:
|
||||
raise KeyError('No keys found in the first mapping.')
|
||||
|
||||
def pop(self, key, *args):
|
||||
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
|
||||
try:
|
||||
return self.maps[0].pop(key, *args)
|
||||
except KeyError:
|
||||
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
|
||||
|
||||
def clear(self):
|
||||
'Clear maps[0], leaving maps[1:] intact.'
|
||||
self.maps[0].clear()
|
||||
|
||||
|
||||
try:
|
||||
from collections import ChainMap
|
||||
except ImportError:
|
||||
ChainMap = _ChainMap
|
|
@ -148,7 +148,7 @@ LEXERS = {
|
|||
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
|
||||
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
|
||||
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
|
||||
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
|
||||
'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
|
||||
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
|
||||
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
|
||||
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
|
||||
|
|
|
@ -549,7 +549,7 @@ class FSharpLexer(RegexLexer):
|
|||
.. versionadded:: 1.5
|
||||
"""
|
||||
|
||||
name = 'FSharp'
|
||||
name = 'F#'
|
||||
aliases = ['fsharp']
|
||||
filenames = ['*.fs', '*.fsi']
|
||||
mimetypes = ['text/x-fsharp']
|
||||
|
|
|
@ -113,9 +113,6 @@ class TypoScriptLexer(RegexLexer):
|
|||
|
||||
flags = re.DOTALL | re.MULTILINE
|
||||
|
||||
# Slightly higher than TypeScript (which is 0).
|
||||
priority = 0.1
|
||||
|
||||
tokens = {
|
||||
'root': [
|
||||
include('comment'),
|
||||
|
|
|
@ -16,6 +16,7 @@ import sys
|
|||
|
||||
from .compat import u, open
|
||||
from .dependencies import DependencyParser
|
||||
from .language_priorities import LANGUAGES
|
||||
|
||||
from .packages.pygments.lexers import (
|
||||
_iter_lexerclasses,
|
||||
|
@ -117,13 +118,13 @@ def guess_lexer_using_filename(file_name, text):
|
|||
try:
|
||||
lexer = custom_pygments_guess_lexer_for_filename(file_name, text)
|
||||
except:
|
||||
pass
|
||||
log.traceback(logging.DEBUG)
|
||||
|
||||
if lexer is not None:
|
||||
try:
|
||||
accuracy = lexer.analyse_text(text)
|
||||
except:
|
||||
pass
|
||||
log.traceback(logging.DEBUG)
|
||||
|
||||
return lexer, accuracy
|
||||
|
||||
|
@ -140,19 +141,19 @@ def guess_lexer_using_modeline(text):
|
|||
try:
|
||||
file_type = get_filetype_from_buffer(text)
|
||||
except: # pragma: nocover
|
||||
pass
|
||||
log.traceback(logging.DEBUG)
|
||||
|
||||
if file_type is not None:
|
||||
try:
|
||||
lexer = get_lexer_by_name(file_type)
|
||||
except ClassNotFound:
|
||||
pass
|
||||
log.traceback(logging.DEBUG)
|
||||
|
||||
if lexer is not None:
|
||||
try:
|
||||
accuracy = lexer.analyse_text(text)
|
||||
except: # pragma: nocover
|
||||
pass
|
||||
log.traceback(logging.DEBUG)
|
||||
|
||||
return lexer, accuracy
|
||||
|
||||
|
@ -240,13 +241,14 @@ def get_language_from_json(language, key):
|
|||
'languages',
|
||||
'{0}.json').format(key.lower())
|
||||
|
||||
try:
|
||||
with open(file_name, 'r', encoding='utf-8') as fh:
|
||||
languages = json.loads(fh.read())
|
||||
if languages.get(language.lower()):
|
||||
return languages[language.lower()]
|
||||
except:
|
||||
pass
|
||||
if os.path.exists(file_name):
|
||||
try:
|
||||
with open(file_name, 'r', encoding='utf-8') as fh:
|
||||
languages = json.loads(fh.read())
|
||||
if languages.get(language.lower()):
|
||||
return languages[language.lower()]
|
||||
except:
|
||||
log.traceback(logging.DEBUG)
|
||||
|
||||
return None
|
||||
|
||||
|
@ -306,15 +308,10 @@ def custom_pygments_guess_lexer_for_filename(_fn, _text, **options):
|
|||
return result[-1][1](**options)
|
||||
|
||||
|
||||
CUSTOM_PRIORITIES = {
|
||||
'typescript': 0.11,
|
||||
'perl': 0.1,
|
||||
'perl6': 0.1,
|
||||
'f#': 0.1,
|
||||
}
|
||||
def customize_priority(lexer):
|
||||
"""Return an integer priority for the given lexer object."""
|
||||
|
||||
if lexer.name.lower() in CUSTOM_PRIORITIES:
|
||||
lexer.priority = CUSTOM_PRIORITIES[lexer.name.lower()]
|
||||
lexer_name = lexer.name.lower().replace('sharp', '#')
|
||||
if lexer_name in LANGUAGES:
|
||||
lexer.priority = LANGUAGES[lexer_name]
|
||||
return lexer
|
||||
|
|
Loading…
Reference in a new issue