YouCompleteMe/python/ycm/completers/all/identifier_completer.py

244 lines
7.7 KiB
Python
Raw Normal View History

#!/usr/bin/env python
#
# Copyright (C) 2011, 2012 Strahinja Val Markovic <val@markovic.io>
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import ycm_core
from collections import defaultdict
2013-05-19 22:44:42 -04:00
from ycm.completers.general_completer import GeneralCompleter
# from ycm.completers.general import syntax_parse
2013-05-19 22:44:42 -04:00
from ycm import utils
from ycm import server_responses
MAX_IDENTIFIER_COMPLETIONS_RETURNED = 10
SYNTAX_FILENAME = 'YCM_PLACEHOLDER_FOR_SYNTAX'
class IdentifierCompleter( GeneralCompleter ):
def __init__( self, user_options ):
super( IdentifierCompleter, self ).__init__( user_options )
self.completer = ycm_core.IdentifierCompleter()
self.completer.EnableThreading()
self.tags_file_last_mtime = defaultdict( int )
self.filetypes_with_keywords_loaded = set()
def ShouldUseNow( self, request_data ):
return self.QueryLengthAboveMinThreshold( request_data )
def CandidatesForQueryAsync( self, request_data ):
self.completions_future = self.completer.CandidatesForQueryAndTypeAsync(
utils.SanitizeQuery( request_data[ 'query' ] ),
request_data[ 'filetypes' ][ 0 ] )
def AddIdentifier( self, identifier, request_data ):
filetype = request_data[ 'filetypes' ][ 0 ]
filepath = request_data[ 'filepath' ]
if not filetype or not filepath or not identifier:
return
vector = ycm_core.StringVec()
vector.append( identifier )
self.completer.AddIdentifiersToDatabase( vector,
filetype,
filepath )
def AddPreviousIdentifier( self, request_data ):
self.AddIdentifier(
_PreviousIdentifier(
self.user_options[ 'min_num_of_chars_for_completion' ],
request_data ),
request_data )
def AddIdentifierUnderCursor( self, request_data ):
cursor_identifier = _GetCursorIdentifier( request_data )
if not cursor_identifier:
return
self.AddIdentifier( cursor_identifier, request_data )
def AddBufferIdentifiers( self, request_data ):
filetype = request_data[ 'filetypes' ][ 0 ]
filepath = request_data[ 'filepath' ]
collect_from_comments_and_strings = bool( self.user_options[
'collect_identifiers_from_comments_and_strings' ] )
if not filetype or not filepath:
return
text = request_data[ 'file_data' ][ filepath ][ 'contents' ]
self.completer.AddIdentifiersToDatabaseFromBufferAsync(
text,
filetype,
filepath,
collect_from_comments_and_strings )
def AddIdentifiersFromTagFiles( self, tag_files ):
absolute_paths_to_tag_files = ycm_core.StringVec()
for tag_file in tag_files:
try:
current_mtime = os.path.getmtime( tag_file )
except:
continue
last_mtime = self.tags_file_last_mtime[ tag_file ]
# We don't want to repeatedly process the same file over and over; we only
# process if it's changed since the last time we looked at it
if current_mtime <= last_mtime:
continue
self.tags_file_last_mtime[ tag_file ] = current_mtime
absolute_paths_to_tag_files.append( tag_file )
if not absolute_paths_to_tag_files:
return
self.completer.AddIdentifiersToDatabaseFromTagFilesAsync(
absolute_paths_to_tag_files )
# def AddIdentifiersFromSyntax( self ):
# filetype = vim.eval( "&filetype" )
# if filetype in self.filetypes_with_keywords_loaded:
# return
# self.filetypes_with_keywords_loaded.add( filetype )
# keyword_set = syntax_parse.SyntaxKeywordsForCurrentBuffer()
# keywords = ycm_core.StringVec()
# for keyword in keyword_set:
# keywords.append( keyword )
# filepath = SYNTAX_FILENAME + filetype
# self.completer.AddIdentifiersToDatabase( keywords,
# filetype,
# filepath )
def OnFileReadyToParse( self, request_data ):
self.AddBufferIdentifiers( request_data )
if 'tag_files' in request_data:
self.AddIdentifiersFromTagFiles( request_data[ 'tag_files' ] )
#self.AddIdentifiersFromSyntax()
def OnInsertLeave( self, request_data ):
self.AddIdentifierUnderCursor( request_data )
def OnCurrentIdentifierFinished( self, request_data ):
self.AddPreviousIdentifier( request_data )
def CandidatesFromStoredRequest( self ):
if not self.completions_future:
return []
completions = self.completions_future.GetResults()[
: MAX_IDENTIFIER_COMPLETIONS_RETURNED ]
completions = _RemoveSmallCandidates(
completions, self.user_options[ 'min_num_identifier_candidate_chars' ] )
return [ server_responses.BuildCompletionData( x ) for x in completions ]
def _PreviousIdentifier( min_num_completion_start_chars, request_data ):
line_num = request_data[ 'line_num' ]
column_num = request_data[ 'column_num' ]
filepath = request_data[ 'filepath' ]
contents_per_line = (
request_data[ 'file_data' ][ filepath ][ 'contents' ].split( '\n' ) )
line = contents_per_line[ line_num ]
end_column = column_num
while end_column > 0 and not utils.IsIdentifierChar( line[ end_column - 1 ] ):
end_column -= 1
# Look at the previous line if we reached the end of the current one
if end_column == 0:
try:
line = contents_per_line[ line_num - 1 ]
except:
return ""
end_column = len( line )
while end_column > 0 and not utils.IsIdentifierChar(
line[ end_column - 1 ] ):
end_column -= 1
start_column = end_column
while start_column > 0 and utils.IsIdentifierChar( line[ start_column - 1 ] ):
start_column -= 1
if end_column - start_column < min_num_completion_start_chars:
return ""
return line[ start_column : end_column ]
def _RemoveSmallCandidates( candidates, min_num_candidate_size_chars ):
if min_num_candidate_size_chars == 0:
return candidates
return [ x for x in candidates if len( x ) >= min_num_candidate_size_chars ]
# This is meant to behave like 'expand("<cword")' in Vim, thus starting at the
# cursor column and returning the "cursor word". If the cursor is not on a valid
# character, it searches forward until a valid identifier is found.
def _GetCursorIdentifier( request_data ):
def FindFirstValidChar( line, column ):
current_column = column
while not utils.IsIdentifierChar( line[ current_column ] ):
current_column += 1
return current_column
def FindIdentifierStart( line, valid_char_column ):
identifier_start = valid_char_column
while identifier_start > 0 and utils.IsIdentifierChar( line[
identifier_start - 1 ] ):
identifier_start -= 1
return identifier_start
def FindIdentifierEnd( line, valid_char_column ):
identifier_end = valid_char_column
while identifier_end < len( line ) - 1 and utils.IsIdentifierChar( line[
identifier_end + 1 ] ):
identifier_end += 1
return identifier_end + 1
column_num = request_data[ 'column_num' ]
line = request_data[ 'line_value' ]
try:
valid_char_column = FindFirstValidChar( line, column_num )
return line[ FindIdentifierStart( line, valid_char_column ) :
FindIdentifierEnd( line, valid_char_column ) ]
except:
return ''