More refactorings and linting.

This commit is contained in:
Holger Rapp 2014-02-08 14:41:08 +01:00
parent c67a59f579
commit 2b72c46935
3 changed files with 89 additions and 70 deletions

26
pythonx/UltiSnips/escaping.py Executable file
View File

@ -0,0 +1,26 @@
#!/usr/bin/env python
# encoding: utf-8
"""Utilities to deal with text escaping."""
def unescape(text):
"""Removes '\\' escaping from 'text'."""
rv = ""
i = 0
while i < len(text):
if i+1 < len(text) and text[i] == '\\':
rv += text[i+1]
i += 1
else:
rv += text[i]
i += 1
return rv
def fill_in_whitespace(text):
"""Returns 'text' with escaped whitespace replaced through whitespaces."""
text = text.replace(r"\n", "\n")
text = text.replace(r"\t", "\t")
text = text.replace(r"\r", "\r")
text = text.replace(r"\a", "\a")
text = text.replace(r"\b", "\b")
return text

View File

@ -1,9 +1,11 @@
#!/usr/bin/env python
# encoding: utf-8
from UltiSnips.text_objects._lexer import tokenize, EscapeCharToken, VisualToken, \
TransformationToken, TabStopToken, MirrorToken, PythonCodeToken, \
VimLCodeToken, ShellCodeToken
"""Parses tokens into text objects."""
from UltiSnips.text_objects._lexer import tokenize, EscapeCharToken, \
VisualToken, TransformationToken, TabStopToken, MirrorToken, \
PythonCodeToken, VimLCodeToken, ShellCodeToken
from UltiSnips.position import Position
from UltiSnips.text_objects._escaped_char import EscapedChar
from UltiSnips.text_objects._mirror import Mirror
@ -14,43 +16,16 @@ from UltiSnips.text_objects._transformation import Transformation
from UltiSnips.text_objects._viml_code import VimLCode
from UltiSnips.text_objects._visual import Visual
class TOParser(object):
TOKEN2TO = {
_TOKEN_TO_TEXTOBJECT = {
EscapeCharToken: EscapedChar,
VisualToken: Visual,
ShellCodeToken: ShellCode,
PythonCodeToken: PythonCode,
VimLCodeToken: VimLCode,
}
}
def __init__(self, parent_to, text, indent):
"""
The parser is responsible for turning tokens into Real TextObjects
"""
self._indent = indent
self._parent_to = parent_to
self._text = text
def parse(self, add_ts_zero = False):
seen_ts = {}
all_tokens = []
self._do_parse(all_tokens, seen_ts)
self._resolve_ambiguity(all_tokens, seen_ts)
self._create_objects_with_links_to_tabs(all_tokens, seen_ts)
if add_ts_zero and 0 not in seen_ts:
mark = all_tokens[-1][1].end # Last token is always EndOfText
m1 = Position(mark.line, mark.col)
TabStop(self._parent_to, 0, mark, m1)
self._parent_to.replace_initial_text()
#####################
# Private Functions #
#####################
def _resolve_ambiguity(self, all_tokens, seen_ts):
def _resolve_ambiguity(all_tokens, seen_ts):
"""$1 could be a Mirror or a TabStop. This figures this out."""
for parent, token in all_tokens:
if isinstance(token, MirrorToken):
if token.number not in seen_ts:
@ -58,26 +33,44 @@ class TOParser(object):
else:
Mirror(parent, seen_ts[token.number], token)
def _create_objects_with_links_to_tabs(self, all_tokens, seen_ts):
def _create_transformations(all_tokens, seen_ts):
"""Create the objects that need to know about tabstops."""
for parent, token in all_tokens:
if isinstance(token, TransformationToken):
if token.number not in seen_ts:
raise RuntimeError("Tabstop %i is not known but is used by a Transformation" % token.number)
raise RuntimeError(
"Tabstop %i is not known but is used by a Transformation"
% token.number)
Transformation(parent, seen_ts[token.number], token)
def _do_parse(self, all_tokens, seen_ts):
tokens = list(tokenize(self._text, self._indent, self._parent_to.start))
def _do_parse(all_tokens, seen_ts, parent_to, text, indent):
"""Recursive function that actually creates the objects."""
tokens = list(tokenize(text, indent, parent_to.start))
for token in tokens:
all_tokens.append((self._parent_to, token))
all_tokens.append((parent_to, token))
if isinstance(token, TabStopToken):
ts = TabStop(self._parent_to, token)
ts = TabStop(parent_to, token)
seen_ts[token.number] = ts
k = TOParser(ts, token.initial_text, self._indent)
k._do_parse(all_tokens, seen_ts)
_do_parse(all_tokens, seen_ts, ts, token.initial_text, indent)
else:
klass = self.TOKEN2TO.get(token.__class__, None)
klass = _TOKEN_TO_TEXTOBJECT.get(token.__class__, None)
if klass is not None:
klass(self._parent_to, token)
klass(parent_to, token)
def parse_text_object(parent_to, text, indent):
"""Parses a text object from 'text' assuming the current 'indent'. Will
instantiate all the objects and link them as childs to parent_to. Will also
put the initial text into Vim."""
seen_ts = {}
all_tokens = []
_do_parse(all_tokens, seen_ts, parent_to, text, indent)
_resolve_ambiguity(all_tokens, seen_ts)
_create_transformations(all_tokens, seen_ts)
if 0 not in seen_ts:
mark = all_tokens[-1][1].end # Last token is always EndOfText
m1 = Position(mark.line, mark.col)
TabStop(parent_to, 0, mark, m1)
parent_to.replace_initial_text()

View File

@ -9,7 +9,7 @@ from UltiSnips.position import Position
import UltiSnips._vim as _vim
from UltiSnips.text_objects._base import EditableTextObject, \
NoneditableTextObject
from UltiSnips.text_objects._parser import TOParser
from UltiSnips.text_objects._parser import parse_text_object
class SnippetInstance(EditableTextObject):
"""See module docstring."""
@ -29,7 +29,7 @@ class SnippetInstance(EditableTextObject):
EditableTextObject.__init__(self, parent, start, end, initial_text)
TOParser(self, initial_text, indent).parse(True)
parse_text_object(self, initial_text, indent)
self.update_textobjects()