rana-cli/wakatime/stats.py

241 lines
6.2 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
"""
wakatime.stats
~~~~~~~~~~~~~~
2013-09-22 20:41:13 +00:00
Stats about files
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import sys
from .compat import u, open
2015-09-27 02:26:15 +00:00
from .dependencies import DependencyParser
from .packages import (
get_lexer_by_name,
guess_lexer_for_filename,
get_filetype_from_buffer,
)
from .packages.pygments.lexers import ClassNotFound
try:
from .packages import simplejson as json # pragma: nocover
except (ImportError, SyntaxError): # pragma: nocover
import json
log = logging.getLogger('WakaTime')
def guess_language(file_name):
"""Guess lexer and language for a file.
Returns (language, lexer) tuple where language is a unicode string.
"""
language = get_language_from_extension(file_name)
lexer = smart_guess_lexer(file_name)
2015-09-29 09:18:54 +00:00
if language is None and lexer is not None:
2015-08-11 03:10:09 +00:00
language = u(lexer.name)
2014-12-23 05:12:57 +00:00
return language, lexer
def smart_guess_lexer(file_name):
"""Guess Pygments lexer for a file.
Looks for a vim modeline in file contents, then compares the accuracy
of that lexer with a second guess. The second guess looks up all lexers
matching the file name, then runs a text analysis for the best choice.
"""
lexer = None
text = get_file_head(file_name)
lexer1, accuracy1 = guess_lexer_using_filename(file_name, text)
lexer2, accuracy2 = guess_lexer_using_modeline(text)
if lexer1:
lexer = lexer1
if (lexer2 and accuracy2 and
(not accuracy1 or accuracy2 > accuracy1)):
2015-09-07 03:46:57 +00:00
lexer = lexer2 # pragma: nocover
return lexer
2015-06-16 01:55:47 +00:00
def guess_lexer_using_filename(file_name, text):
"""Guess lexer for given text, limited to lexers for this file's extension.
Returns a tuple of (lexer, accuracy).
"""
2015-06-16 01:55:47 +00:00
lexer, accuracy = None, None
2015-06-16 01:55:47 +00:00
try:
lexer = guess_lexer_for_filename(file_name, text)
2015-09-27 08:04:06 +00:00
except: # pragma: nocover
2015-06-16 01:55:47 +00:00
pass
2015-06-16 01:55:47 +00:00
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
2015-09-07 03:46:57 +00:00
except: # pragma: nocover
2015-06-16 01:55:47 +00:00
pass
return lexer, accuracy
def guess_lexer_using_modeline(text):
"""Guess lexer for given text using Vim modeline.
2015-06-16 01:55:47 +00:00
Returns a tuple of (lexer, accuracy).
"""
2015-06-16 01:55:47 +00:00
lexer, accuracy = None, None
file_type = None
try:
file_type = get_filetype_from_buffer(text)
2015-09-07 03:46:57 +00:00
except: # pragma: nocover
pass
if file_type is not None:
try:
2015-06-16 01:55:47 +00:00
lexer = get_lexer_by_name(file_type)
except ClassNotFound:
2015-06-16 01:55:47 +00:00
pass
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
2015-09-07 03:46:57 +00:00
except: # pragma: nocover
pass
2015-06-16 01:55:47 +00:00
return lexer, accuracy
def get_language_from_extension(file_name):
"""Returns a matching language for the given file extension.
2015-06-16 01:55:47 +00:00
"""
filepart, extension = os.path.splitext(file_name)
if os.path.exists(u('{0}{1}').format(u(filepart), u('.c'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.C'))):
return 'C'
extension = extension.lower()
if extension == '.h':
directory = os.path.dirname(file_name)
available_files = os.listdir(directory)
available_extensions = list(zip(*map(os.path.splitext, available_files)))[1]
available_extensions = [ext.lower() for ext in available_extensions]
if '.cpp' in available_extensions:
return 'C++'
if '.c' in available_extensions:
return 'C'
2015-06-16 01:55:47 +00:00
return None
def number_lines_in_file(file_name):
lines = 0
try:
with open(file_name, 'r', encoding='utf-8') as fh:
for line in fh:
lines += 1
except: # pragma: nocover
try:
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
for line in fh:
lines += 1
except:
return None
return lines
def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
plugin=None, alternate_language=None):
if entity_type != 'file':
stats = {
'language': None,
'dependencies': [],
'lines': None,
'lineno': lineno,
'cursorpos': cursorpos,
}
else:
language, lexer = guess_language(file_name)
parser = DependencyParser(file_name, lexer)
dependencies = parser.parse()
if language is None and alternate_language:
language = standardize_language(alternate_language, plugin)
stats = {
'language': language,
'dependencies': dependencies,
'lines': number_lines_in_file(file_name),
'lineno': lineno,
'cursorpos': cursorpos,
}
return stats
def standardize_language(language, plugin):
"""Maps a string to the equivalent Pygments language."""
# standardize language for this plugin
if plugin:
plugin = plugin.split(' ')[-1].split('/')[0].split('-')[0]
standardized = get_language_from_json(language, plugin)
if standardized is not None:
return standardized
# standardize language against default languages
standardized = get_language_from_json(language, 'default')
if standardized is not None:
return standardized
return None
def get_language_from_json(language, key):
"""Finds the given language in a json file."""
file_name = os.path.join(
os.path.dirname(__file__),
'languages',
'{0}.json').format(key.lower())
try:
with open(file_name, 'r', encoding='utf-8') as fh:
languages = json.loads(fh.read())
if language in languages.values():
return language
if languages.get(language):
return languages[language]
except:
pass
return None
def get_file_head(file_name):
"""Returns the first 512000 bytes of the file's contents."""
text = None
try:
with open(file_name, 'r', encoding='utf-8') as fh:
text = fh.read(512000)
2016-06-16 06:02:56 +00:00
except:
try:
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
text = fh.read(512000)
except:
log.traceback('debug')
return text