|
| 1 | +from functools import lru_cache |
| 2 | +from sqlglot import Dialect, Tokenizer |
| 3 | +from sqlmesh.lsp.custom import AllModelsResponse |
| 4 | +import typing as t |
| 5 | +from sqlmesh.lsp.context import LSPContext |
| 6 | + |
| 7 | + |
| 8 | +def get_sql_completions(context: t.Optional[LSPContext], file_uri: str) -> AllModelsResponse: |
| 9 | + """ |
| 10 | + Return a list of completions for a given file. |
| 11 | + """ |
| 12 | + return AllModelsResponse( |
| 13 | + models=list(get_models(context, file_uri)), |
| 14 | + keywords=list(get_keywords(context, file_uri)), |
| 15 | + ) |
| 16 | + |
| 17 | + |
| 18 | +def get_models(context: t.Optional[LSPContext], file_uri: t.Optional[str]) -> t.Set[str]: |
| 19 | + """ |
| 20 | + Return a list of models for a given file. |
| 21 | +
|
| 22 | + If there is no context, return an empty list. |
| 23 | + If there is a context, return a list of all models bar the ones the file itself defines. |
| 24 | + """ |
| 25 | + if context is None: |
| 26 | + return set() |
| 27 | + all_models = set(model for models in context.map.values() for model in models) |
| 28 | + if file_uri is not None: |
| 29 | + models_file_refers_to = context.map[file_uri] |
| 30 | + for model in models_file_refers_to: |
| 31 | + all_models.discard(model) |
| 32 | + return all_models |
| 33 | + |
| 34 | + |
| 35 | +def get_keywords(context: t.Optional[LSPContext], file_uri: t.Optional[str]) -> t.Set[str]: |
| 36 | + """ |
| 37 | + Return a list of sql keywords for a given file. |
| 38 | + If no context is provided, return ANSI SQL keywords. |
| 39 | +
|
| 40 | + If a context is provided but no file_uri is provided, returns the keywords |
| 41 | + for the default dialect of the context. |
| 42 | +
|
| 43 | + If both a context and a file_uri are provided, returns the keywords |
| 44 | + for the dialect of the model that the file belongs to. |
| 45 | + """ |
| 46 | + if file_uri is not None and context is not None: |
| 47 | + models = context.map[file_uri] |
| 48 | + if models: |
| 49 | + model = models[0] |
| 50 | + model_from_context = context.context.get_model(model) |
| 51 | + if model_from_context is not None: |
| 52 | + if model_from_context.dialect: |
| 53 | + return get_keywords_from_tokenizer(model_from_context.dialect) |
| 54 | + if context is not None: |
| 55 | + return get_keywords_from_tokenizer(context.context.default_dialect) |
| 56 | + return get_keywords_from_tokenizer(None) |
| 57 | + |
| 58 | + |
| 59 | +@lru_cache() |
| 60 | +def get_keywords_from_tokenizer(dialect: t.Optional[str] = None) -> t.Set[str]: |
| 61 | + """ |
| 62 | + Return a list of sql keywords for a given dialect. This is separate from |
| 63 | + the direct use of Tokenizer.KEYWORDS.keys() because that returns a set of |
| 64 | + keywords that are expanded, e.g. "ORDER BY" -> ["ORDER", "BY"]. |
| 65 | + """ |
| 66 | + tokenizer = Tokenizer |
| 67 | + if dialect is not None: |
| 68 | + try: |
| 69 | + tokenizer = Dialect.get_or_raise(dialect).tokenizer_class |
| 70 | + except Exception: |
| 71 | + pass |
| 72 | + |
| 73 | + expanded_keywords = set() |
| 74 | + for keyword in tokenizer.KEYWORDS.keys(): |
| 75 | + parts = keyword.split(" ") |
| 76 | + expanded_keywords.update(parts) |
| 77 | + return expanded_keywords |
0 commit comments