java dependency parser plugin
This commit is contained in:
parent
3b0cab2208
commit
744cc2f6ca
2 changed files with 55 additions and 6 deletions
|
@ -9,9 +9,14 @@
|
||||||
:license: BSD, see LICENSE for more details.
|
:license: BSD, see LICENSE for more details.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
from ..compat import open, import_module
|
from ..compat import open, import_module
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger('WakaTime')
|
||||||
|
|
||||||
|
|
||||||
class TokenParser(object):
|
class TokenParser(object):
|
||||||
source_file = None
|
source_file = None
|
||||||
lexer = None
|
lexer = None
|
||||||
|
@ -29,15 +34,15 @@ class TokenParser(object):
|
||||||
self.tokens = self._extract_tokens()
|
self.tokens = self._extract_tokens()
|
||||||
raise Exception('Not yet implemented.')
|
raise Exception('Not yet implemented.')
|
||||||
|
|
||||||
def append(self, dep):
|
def append(self, dep, truncate=True):
|
||||||
self._save_dependency(dep)
|
self._save_dependency(dep, truncate=truncate)
|
||||||
|
|
||||||
def _extract_tokens(self):
|
def _extract_tokens(self):
|
||||||
with open(self.source_file, 'r', encoding='utf-8') as fh:
|
with open(self.source_file, 'r', encoding='utf-8') as fh:
|
||||||
return self.lexer.get_tokens_unprocessed(fh.read(512000))
|
return self.lexer.get_tokens_unprocessed(fh.read(512000))
|
||||||
|
|
||||||
def _save_dependency(self, dep):
|
def _save_dependency(self, dep, truncate=True):
|
||||||
dep = dep.strip().split('.')[0].strip()
|
dep = dep.strip().split('.')[0].strip() if truncate else dep.strip()
|
||||||
if dep:
|
if dep:
|
||||||
self.dependencies.append(dep)
|
self.dependencies.append(dep)
|
||||||
|
|
||||||
|
@ -56,8 +61,8 @@ class DependencyParser(object):
|
||||||
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
|
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
|
||||||
module = import_module('.%s' % module_name, package=__package__)
|
module = import_module('.%s' % module_name, package=__package__)
|
||||||
self.parser = getattr(module, class_name)
|
self.parser = getattr(module, class_name)
|
||||||
except ImportError:
|
except ImportError as ex:
|
||||||
pass
|
log.debug(ex)
|
||||||
|
|
||||||
def parse(self):
|
def parse(self):
|
||||||
if self.parser:
|
if self.parser:
|
||||||
|
|
44
wakatime/languages/jvm.py
Normal file
44
wakatime/languages/jvm.py
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
wakatime.languages.java
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Parse dependencies from Java code.
|
||||||
|
|
||||||
|
:copyright: (c) 2013 Alan Hamlett.
|
||||||
|
:license: BSD, see LICENSE for more details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from . import TokenParser
|
||||||
|
from ..compat import u
|
||||||
|
|
||||||
|
|
||||||
|
class JavaParser(TokenParser):
|
||||||
|
|
||||||
|
def parse(self, tokens=[]):
|
||||||
|
if not tokens and not self.tokens:
|
||||||
|
self.tokens = self._extract_tokens()
|
||||||
|
for index, token, content in self.tokens:
|
||||||
|
self._process_token(token, content)
|
||||||
|
return self.dependencies
|
||||||
|
|
||||||
|
def _process_token(self, token, content):
|
||||||
|
if u(token).split('.')[-1] == 'Namespace':
|
||||||
|
self._process_namespace(token, content)
|
||||||
|
else:
|
||||||
|
self._process_other(token, content)
|
||||||
|
|
||||||
|
def _process_namespace(self, token, content):
|
||||||
|
if content != 'import':
|
||||||
|
content = content.split('.')
|
||||||
|
content = content[0] if len(content) == 1 else '.'.join(content[0:len(content)-1])
|
||||||
|
self.append(content, truncate=False)
|
||||||
|
|
||||||
|
def _process_text(self, token, content):
|
||||||
|
if self.state is not None:
|
||||||
|
if content == "\n" and self.parens == 0:
|
||||||
|
self.state = None
|
||||||
|
self.nonpackage = False
|
||||||
|
|
||||||
|
def _process_other(self, token, content):
|
||||||
|
pass
|
Loading…
Reference in a new issue