More refactorings and linting.
This commit is contained in:
parent
c67a59f579
commit
2b72c46935
26
pythonx/UltiSnips/escaping.py
Executable file
26
pythonx/UltiSnips/escaping.py
Executable file
@ -0,0 +1,26 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
"""Utilities to deal with text escaping."""
|
||||||
|
|
||||||
|
def unescape(text):
|
||||||
|
"""Removes '\\' escaping from 'text'."""
|
||||||
|
rv = ""
|
||||||
|
i = 0
|
||||||
|
while i < len(text):
|
||||||
|
if i+1 < len(text) and text[i] == '\\':
|
||||||
|
rv += text[i+1]
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
rv += text[i]
|
||||||
|
i += 1
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def fill_in_whitespace(text):
|
||||||
|
"""Returns 'text' with escaped whitespace replaced through whitespaces."""
|
||||||
|
text = text.replace(r"\n", "\n")
|
||||||
|
text = text.replace(r"\t", "\t")
|
||||||
|
text = text.replace(r"\r", "\r")
|
||||||
|
text = text.replace(r"\a", "\a")
|
||||||
|
text = text.replace(r"\b", "\b")
|
||||||
|
return text
|
@ -1,9 +1,11 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
|
||||||
from UltiSnips.text_objects._lexer import tokenize, EscapeCharToken, VisualToken, \
|
"""Parses tokens into text objects."""
|
||||||
TransformationToken, TabStopToken, MirrorToken, PythonCodeToken, \
|
|
||||||
VimLCodeToken, ShellCodeToken
|
from UltiSnips.text_objects._lexer import tokenize, EscapeCharToken, \
|
||||||
|
VisualToken, TransformationToken, TabStopToken, MirrorToken, \
|
||||||
|
PythonCodeToken, VimLCodeToken, ShellCodeToken
|
||||||
from UltiSnips.position import Position
|
from UltiSnips.position import Position
|
||||||
from UltiSnips.text_objects._escaped_char import EscapedChar
|
from UltiSnips.text_objects._escaped_char import EscapedChar
|
||||||
from UltiSnips.text_objects._mirror import Mirror
|
from UltiSnips.text_objects._mirror import Mirror
|
||||||
@ -14,8 +16,7 @@ from UltiSnips.text_objects._transformation import Transformation
|
|||||||
from UltiSnips.text_objects._viml_code import VimLCode
|
from UltiSnips.text_objects._viml_code import VimLCode
|
||||||
from UltiSnips.text_objects._visual import Visual
|
from UltiSnips.text_objects._visual import Visual
|
||||||
|
|
||||||
class TOParser(object):
|
_TOKEN_TO_TEXTOBJECT = {
|
||||||
TOKEN2TO = {
|
|
||||||
EscapeCharToken: EscapedChar,
|
EscapeCharToken: EscapedChar,
|
||||||
VisualToken: Visual,
|
VisualToken: Visual,
|
||||||
ShellCodeToken: ShellCode,
|
ShellCodeToken: ShellCode,
|
||||||
@ -23,34 +24,8 @@ class TOParser(object):
|
|||||||
VimLCodeToken: VimLCode,
|
VimLCodeToken: VimLCode,
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, parent_to, text, indent):
|
def _resolve_ambiguity(all_tokens, seen_ts):
|
||||||
"""
|
"""$1 could be a Mirror or a TabStop. This figures this out."""
|
||||||
The parser is responsible for turning tokens into Real TextObjects
|
|
||||||
"""
|
|
||||||
self._indent = indent
|
|
||||||
self._parent_to = parent_to
|
|
||||||
self._text = text
|
|
||||||
|
|
||||||
def parse(self, add_ts_zero = False):
|
|
||||||
seen_ts = {}
|
|
||||||
all_tokens = []
|
|
||||||
|
|
||||||
self._do_parse(all_tokens, seen_ts)
|
|
||||||
|
|
||||||
self._resolve_ambiguity(all_tokens, seen_ts)
|
|
||||||
self._create_objects_with_links_to_tabs(all_tokens, seen_ts)
|
|
||||||
|
|
||||||
if add_ts_zero and 0 not in seen_ts:
|
|
||||||
mark = all_tokens[-1][1].end # Last token is always EndOfText
|
|
||||||
m1 = Position(mark.line, mark.col)
|
|
||||||
TabStop(self._parent_to, 0, mark, m1)
|
|
||||||
|
|
||||||
self._parent_to.replace_initial_text()
|
|
||||||
|
|
||||||
#####################
|
|
||||||
# Private Functions #
|
|
||||||
#####################
|
|
||||||
def _resolve_ambiguity(self, all_tokens, seen_ts):
|
|
||||||
for parent, token in all_tokens:
|
for parent, token in all_tokens:
|
||||||
if isinstance(token, MirrorToken):
|
if isinstance(token, MirrorToken):
|
||||||
if token.number not in seen_ts:
|
if token.number not in seen_ts:
|
||||||
@ -58,26 +33,44 @@ class TOParser(object):
|
|||||||
else:
|
else:
|
||||||
Mirror(parent, seen_ts[token.number], token)
|
Mirror(parent, seen_ts[token.number], token)
|
||||||
|
|
||||||
def _create_objects_with_links_to_tabs(self, all_tokens, seen_ts):
|
def _create_transformations(all_tokens, seen_ts):
|
||||||
|
"""Create the objects that need to know about tabstops."""
|
||||||
for parent, token in all_tokens:
|
for parent, token in all_tokens:
|
||||||
if isinstance(token, TransformationToken):
|
if isinstance(token, TransformationToken):
|
||||||
if token.number not in seen_ts:
|
if token.number not in seen_ts:
|
||||||
raise RuntimeError("Tabstop %i is not known but is used by a Transformation" % token.number)
|
raise RuntimeError(
|
||||||
|
"Tabstop %i is not known but is used by a Transformation"
|
||||||
|
% token.number)
|
||||||
Transformation(parent, seen_ts[token.number], token)
|
Transformation(parent, seen_ts[token.number], token)
|
||||||
|
|
||||||
def _do_parse(self, all_tokens, seen_ts):
|
def _do_parse(all_tokens, seen_ts, parent_to, text, indent):
|
||||||
tokens = list(tokenize(self._text, self._indent, self._parent_to.start))
|
"""Recursive function that actually creates the objects."""
|
||||||
|
tokens = list(tokenize(text, indent, parent_to.start))
|
||||||
for token in tokens:
|
for token in tokens:
|
||||||
all_tokens.append((self._parent_to, token))
|
all_tokens.append((parent_to, token))
|
||||||
|
|
||||||
if isinstance(token, TabStopToken):
|
if isinstance(token, TabStopToken):
|
||||||
ts = TabStop(self._parent_to, token)
|
ts = TabStop(parent_to, token)
|
||||||
seen_ts[token.number] = ts
|
seen_ts[token.number] = ts
|
||||||
|
|
||||||
k = TOParser(ts, token.initial_text, self._indent)
|
_do_parse(all_tokens, seen_ts, ts, token.initial_text, indent)
|
||||||
k._do_parse(all_tokens, seen_ts)
|
|
||||||
else:
|
else:
|
||||||
klass = self.TOKEN2TO.get(token.__class__, None)
|
klass = _TOKEN_TO_TEXTOBJECT.get(token.__class__, None)
|
||||||
if klass is not None:
|
if klass is not None:
|
||||||
klass(self._parent_to, token)
|
klass(parent_to, token)
|
||||||
|
|
||||||
|
def parse_text_object(parent_to, text, indent):
|
||||||
|
"""Parses a text object from 'text' assuming the current 'indent'. Will
|
||||||
|
instantiate all the objects and link them as childs to parent_to. Will also
|
||||||
|
put the initial text into Vim."""
|
||||||
|
seen_ts = {}
|
||||||
|
all_tokens = []
|
||||||
|
|
||||||
|
_do_parse(all_tokens, seen_ts, parent_to, text, indent)
|
||||||
|
_resolve_ambiguity(all_tokens, seen_ts)
|
||||||
|
_create_transformations(all_tokens, seen_ts)
|
||||||
|
|
||||||
|
if 0 not in seen_ts:
|
||||||
|
mark = all_tokens[-1][1].end # Last token is always EndOfText
|
||||||
|
m1 = Position(mark.line, mark.col)
|
||||||
|
TabStop(parent_to, 0, mark, m1)
|
||||||
|
parent_to.replace_initial_text()
|
||||||
|
@ -9,7 +9,7 @@ from UltiSnips.position import Position
|
|||||||
import UltiSnips._vim as _vim
|
import UltiSnips._vim as _vim
|
||||||
from UltiSnips.text_objects._base import EditableTextObject, \
|
from UltiSnips.text_objects._base import EditableTextObject, \
|
||||||
NoneditableTextObject
|
NoneditableTextObject
|
||||||
from UltiSnips.text_objects._parser import TOParser
|
from UltiSnips.text_objects._parser import parse_text_object
|
||||||
|
|
||||||
class SnippetInstance(EditableTextObject):
|
class SnippetInstance(EditableTextObject):
|
||||||
"""See module docstring."""
|
"""See module docstring."""
|
||||||
@ -29,7 +29,7 @@ class SnippetInstance(EditableTextObject):
|
|||||||
|
|
||||||
EditableTextObject.__init__(self, parent, start, end, initial_text)
|
EditableTextObject.__init__(self, parent, start, end, initial_text)
|
||||||
|
|
||||||
TOParser(self, initial_text, indent).parse(True)
|
parse_text_object(self, initial_text, indent)
|
||||||
|
|
||||||
self.update_textobjects()
|
self.update_textobjects()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user