mirror of
https://github.com/microsoft/TypeScript.git
synced 2026-02-04 12:32:08 -06:00
Merge pull request #2977 from Microsoft/getClassifications2
Add a common, dense, format for classification operations to lower cost of processing on the host side.
This commit is contained in:
commit
3bd4dd4095
@ -241,6 +241,9 @@ module Harness.LanguageService {
|
||||
class ClassifierShimProxy implements ts.Classifier {
|
||||
constructor(private shim: ts.ClassifierShim) {
|
||||
}
|
||||
getEncodedLexicalClassifications(text: string, lexState: ts.EndOfLineState, classifyKeywordsInGenerics?: boolean): ts.Classifications {
|
||||
throw new Error("NYI");
|
||||
}
|
||||
getClassificationsForLine(text: string, lexState: ts.EndOfLineState, classifyKeywordsInGenerics?: boolean): ts.ClassificationResult {
|
||||
var result = this.shim.getClassificationsForLine(text, lexState, classifyKeywordsInGenerics).split('\n');
|
||||
var entries: ts.ClassificationInfo[] = [];
|
||||
@ -306,6 +309,12 @@ module Harness.LanguageService {
|
||||
getSemanticClassifications(fileName: string, span: ts.TextSpan): ts.ClassifiedSpan[] {
|
||||
return unwrapJSONCallResult(this.shim.getSemanticClassifications(fileName, span.start, span.length));
|
||||
}
|
||||
getEncodedSyntacticClassifications(fileName: string, span: ts.TextSpan): ts.Classifications {
|
||||
return unwrapJSONCallResult(this.shim.getEncodedSyntacticClassifications(fileName, span.start, span.length));
|
||||
}
|
||||
getEncodedSemanticClassifications(fileName: string, span: ts.TextSpan): ts.Classifications {
|
||||
return unwrapJSONCallResult(this.shim.getEncodedSemanticClassifications(fileName, span.start, span.length));
|
||||
}
|
||||
getCompletionsAtPosition(fileName: string, position: number): ts.CompletionInfo {
|
||||
return unwrapJSONCallResult(this.shim.getCompletionsAtPosition(fileName, position));
|
||||
}
|
||||
|
||||
@ -533,6 +533,14 @@ module ts.server {
|
||||
throw new Error("Not Implemented Yet.");
|
||||
}
|
||||
|
||||
getEncodedSyntacticClassifications(fileName: string, span: TextSpan): Classifications {
|
||||
throw new Error("Not Implemented Yet.");
|
||||
}
|
||||
|
||||
getEncodedSemanticClassifications(fileName: string, span: TextSpan): Classifications {
|
||||
throw new Error("Not Implemented Yet.");
|
||||
}
|
||||
|
||||
getProgram(): Program {
|
||||
throw new Error("SourceFile objects are not serializable through the server protocol.");
|
||||
}
|
||||
|
||||
@ -197,6 +197,9 @@ module ts {
|
||||
let list = createNode(SyntaxKind.SyntaxList, nodes.pos, nodes.end, NodeFlags.Synthetic, this);
|
||||
list._children = [];
|
||||
let pos = nodes.pos;
|
||||
|
||||
|
||||
|
||||
for (let node of nodes) {
|
||||
if (pos < node.pos) {
|
||||
pos = this.addSyntheticNodes(list._children, pos, node.pos);
|
||||
@ -969,9 +972,20 @@ module ts {
|
||||
getSemanticDiagnostics(fileName: string): Diagnostic[];
|
||||
getCompilerOptionsDiagnostics(): Diagnostic[];
|
||||
|
||||
/**
|
||||
* @deprecated Use getEncodedSyntacticClassifications instead.
|
||||
*/
|
||||
getSyntacticClassifications(fileName: string, span: TextSpan): ClassifiedSpan[];
|
||||
|
||||
/**
|
||||
* @deprecated Use getEncodedSemanticClassifications instead.
|
||||
*/
|
||||
getSemanticClassifications(fileName: string, span: TextSpan): ClassifiedSpan[];
|
||||
|
||||
// Encoded as triples of [start, length, ClassificationType].
|
||||
getEncodedSyntacticClassifications(fileName: string, span: TextSpan): Classifications;
|
||||
getEncodedSemanticClassifications(fileName: string, span: TextSpan): Classifications;
|
||||
|
||||
getCompletionsAtPosition(fileName: string, position: number): CompletionInfo;
|
||||
getCompletionEntryDetails(fileName: string, position: number, entryName: string): CompletionEntryDetails;
|
||||
|
||||
@ -1015,6 +1029,11 @@ module ts {
|
||||
dispose(): void;
|
||||
}
|
||||
|
||||
export interface Classifications {
|
||||
spans: number[],
|
||||
endOfLineState: EndOfLineState
|
||||
}
|
||||
|
||||
export interface ClassifiedSpan {
|
||||
textSpan: TextSpan;
|
||||
classificationType: string; // ClassificationTypeNames
|
||||
@ -1258,7 +1277,7 @@ module ts {
|
||||
}
|
||||
|
||||
export const enum EndOfLineState {
|
||||
Start,
|
||||
None,
|
||||
InMultiLineCommentTrivia,
|
||||
InSingleQuoteStringLiteral,
|
||||
InDoubleQuoteStringLiteral,
|
||||
@ -1308,8 +1327,10 @@ module ts {
|
||||
* classifications which may be incorrectly categorized will be given
|
||||
* back as Identifiers in order to allow the syntactic classifier to
|
||||
* subsume the classification.
|
||||
* @deprecated Use getLexicalClassifications instead.
|
||||
*/
|
||||
getClassificationsForLine(text: string, lexState: EndOfLineState, syntacticClassifierAbsent: boolean): ClassificationResult;
|
||||
getEncodedLexicalClassifications(text: string, endOfLineState: EndOfLineState, syntacticClassifierAbsent: boolean): Classifications;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1484,7 +1505,28 @@ module ts {
|
||||
public static interfaceName = "interface name";
|
||||
public static moduleName = "module name";
|
||||
public static typeParameterName = "type parameter name";
|
||||
public static typeAlias = "type alias name";
|
||||
public static typeAliasName = "type alias name";
|
||||
public static parameterName = "parameter name";
|
||||
}
|
||||
|
||||
export const enum ClassificationType {
|
||||
comment = 1,
|
||||
identifier = 2,
|
||||
keyword = 3,
|
||||
numericLiteral = 4,
|
||||
operator = 5,
|
||||
stringLiteral = 6,
|
||||
regularExpressionLiteral = 7,
|
||||
whiteSpace = 8,
|
||||
text = 9,
|
||||
punctuation = 10,
|
||||
className = 11,
|
||||
enumName = 12,
|
||||
interfaceName = 13,
|
||||
moduleName = 14,
|
||||
typeParameterName = 15,
|
||||
typeAliasName = 16,
|
||||
parameterName = 17
|
||||
}
|
||||
|
||||
/// Language Service
|
||||
@ -5804,35 +5846,45 @@ module ts {
|
||||
return NavigationBar.getNavigationBarItems(sourceFile);
|
||||
}
|
||||
|
||||
function getSemanticClassifications(fileName: string, span: TextSpan): ClassifiedSpan[] {
|
||||
function getSemanticClassifications(fileName: string, span: TextSpan): ClassifiedSpan[]{
|
||||
return convertClassifications(getEncodedSemanticClassifications(fileName, span));
|
||||
}
|
||||
|
||||
function getEncodedSemanticClassifications(fileName: string, span: TextSpan): Classifications {
|
||||
synchronizeHostData();
|
||||
|
||||
let sourceFile = getValidSourceFile(fileName);
|
||||
let typeChecker = program.getTypeChecker();
|
||||
|
||||
let result: ClassifiedSpan[] = [];
|
||||
let result: number[] = [];
|
||||
processNode(sourceFile);
|
||||
|
||||
return result;
|
||||
return { spans: result, endOfLineState: EndOfLineState.None };
|
||||
|
||||
function classifySymbol(symbol: Symbol, meaningAtPosition: SemanticMeaning) {
|
||||
function pushClassification(start: number, length: number, type: ClassificationType) {
|
||||
result.push(start);
|
||||
result.push(length);
|
||||
result.push(type);
|
||||
}
|
||||
|
||||
function classifySymbol(symbol: Symbol, meaningAtPosition: SemanticMeaning): ClassificationType {
|
||||
let flags = symbol.getFlags();
|
||||
|
||||
if (flags & SymbolFlags.Class) {
|
||||
return ClassificationTypeNames.className;
|
||||
return ClassificationType.className;
|
||||
}
|
||||
else if (flags & SymbolFlags.Enum) {
|
||||
return ClassificationTypeNames.enumName;
|
||||
return ClassificationType.enumName;
|
||||
}
|
||||
else if (flags & SymbolFlags.TypeAlias) {
|
||||
return ClassificationTypeNames.typeAlias;
|
||||
return ClassificationType.typeAliasName;
|
||||
}
|
||||
else if (meaningAtPosition & SemanticMeaning.Type) {
|
||||
if (flags & SymbolFlags.Interface) {
|
||||
return ClassificationTypeNames.interfaceName;
|
||||
return ClassificationType.interfaceName;
|
||||
}
|
||||
else if (flags & SymbolFlags.TypeParameter) {
|
||||
return ClassificationTypeNames.typeParameterName;
|
||||
return ClassificationType.typeParameterName;
|
||||
}
|
||||
}
|
||||
else if (flags & SymbolFlags.Module) {
|
||||
@ -5841,7 +5893,7 @@ module ts {
|
||||
// - There exists a module declaration which actually impacts the value side.
|
||||
if (meaningAtPosition & SemanticMeaning.Namespace ||
|
||||
(meaningAtPosition & SemanticMeaning.Value && hasValueSideModule(symbol))) {
|
||||
return ClassificationTypeNames.moduleName;
|
||||
return ClassificationType.moduleName;
|
||||
}
|
||||
}
|
||||
|
||||
@ -5865,10 +5917,7 @@ module ts {
|
||||
if (symbol) {
|
||||
let type = classifySymbol(symbol, getMeaningFromLocation(node));
|
||||
if (type) {
|
||||
result.push({
|
||||
textSpan: createTextSpan(node.getStart(), node.getWidth()),
|
||||
classificationType: type
|
||||
});
|
||||
pushClassification(node.getStart(), node.getWidth(), type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -5878,7 +5927,46 @@ module ts {
|
||||
}
|
||||
}
|
||||
|
||||
function getSyntacticClassifications(fileName: string, span: TextSpan): ClassifiedSpan[] {
|
||||
function getClassificationTypeName(type: ClassificationType) {
|
||||
switch (type) {
|
||||
case ClassificationType.comment: return ClassificationTypeNames.comment;
|
||||
case ClassificationType.identifier: return ClassificationTypeNames.identifier;
|
||||
case ClassificationType.keyword: return ClassificationTypeNames.keyword;
|
||||
case ClassificationType.numericLiteral: return ClassificationTypeNames.numericLiteral;
|
||||
case ClassificationType.operator: return ClassificationTypeNames.operator;
|
||||
case ClassificationType.stringLiteral: return ClassificationTypeNames.stringLiteral;
|
||||
case ClassificationType.whiteSpace: return ClassificationTypeNames.whiteSpace;
|
||||
case ClassificationType.text: return ClassificationTypeNames.text;
|
||||
case ClassificationType.punctuation: return ClassificationTypeNames.punctuation;
|
||||
case ClassificationType.className: return ClassificationTypeNames.className;
|
||||
case ClassificationType.enumName: return ClassificationTypeNames.enumName;
|
||||
case ClassificationType.interfaceName: return ClassificationTypeNames.interfaceName;
|
||||
case ClassificationType.moduleName: return ClassificationTypeNames.moduleName;
|
||||
case ClassificationType.typeParameterName: return ClassificationTypeNames.typeParameterName;
|
||||
case ClassificationType.typeAliasName: return ClassificationTypeNames.typeAliasName;
|
||||
case ClassificationType.parameterName: return ClassificationTypeNames.parameterName;
|
||||
}
|
||||
}
|
||||
|
||||
function convertClassifications(classifications: Classifications): ClassifiedSpan[] {
|
||||
Debug.assert(classifications.spans.length % 3 === 0);
|
||||
let dense = classifications.spans;
|
||||
let result: ClassifiedSpan[] = [];
|
||||
for (let i = 0, n = dense.length; i < n; i += 3) {
|
||||
result.push({
|
||||
textSpan: createTextSpan(dense[i], dense[i + 1]),
|
||||
classificationType: getClassificationTypeName(dense[i + 2])
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function getSyntacticClassifications(fileName: string, span: TextSpan): ClassifiedSpan[]{
|
||||
return convertClassifications(getEncodedSyntacticClassifications(fileName, span));
|
||||
}
|
||||
|
||||
function getEncodedSyntacticClassifications(fileName: string, span: TextSpan): Classifications {
|
||||
// doesn't use compiler - no need to synchronize with host
|
||||
let sourceFile = syntaxTreeCache.getCurrentSourceFile(fileName);
|
||||
|
||||
@ -5886,10 +5974,16 @@ module ts {
|
||||
let triviaScanner = createScanner(ScriptTarget.Latest, /*skipTrivia:*/ false, sourceFile.text);
|
||||
let mergeConflictScanner = createScanner(ScriptTarget.Latest, /*skipTrivia:*/ false, sourceFile.text);
|
||||
|
||||
let result: ClassifiedSpan[] = [];
|
||||
let result: number[] = [];
|
||||
processElement(sourceFile);
|
||||
|
||||
return result;
|
||||
return { spans: result, endOfLineState: EndOfLineState.None };
|
||||
|
||||
function pushClassification(start: number, length: number, type: ClassificationType) {
|
||||
result.push(start);
|
||||
result.push(length);
|
||||
result.push(type);
|
||||
}
|
||||
|
||||
function classifyLeadingTrivia(token: Node): void {
|
||||
let tokenStart = skipTrivia(sourceFile.text, token.pos, /*stopAfterLineBreak:*/ false);
|
||||
@ -5912,10 +6006,7 @@ module ts {
|
||||
|
||||
if (isComment(kind)) {
|
||||
// Simple comment. Just add as is.
|
||||
result.push({
|
||||
textSpan: createTextSpan(start, width),
|
||||
classificationType: ClassificationTypeNames.comment
|
||||
})
|
||||
pushClassification(start, width, ClassificationType.comment);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -5926,10 +6017,7 @@ module ts {
|
||||
// for the <<<<<<< and >>>>>>> markers, we just add them in as comments
|
||||
// in the classification stream.
|
||||
if (ch === CharacterCodes.lessThan || ch === CharacterCodes.greaterThan) {
|
||||
result.push({
|
||||
textSpan: createTextSpan(start, width),
|
||||
classificationType: ClassificationTypeNames.comment
|
||||
});
|
||||
pushClassification(start, width, ClassificationType.comment);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -5950,11 +6038,7 @@ module ts {
|
||||
break;
|
||||
}
|
||||
}
|
||||
result.push({
|
||||
textSpan: createTextSpanFromBounds(start, i),
|
||||
classificationType: ClassificationTypeNames.comment
|
||||
});
|
||||
|
||||
pushClassification(start, i - start, ClassificationType.comment);
|
||||
mergeConflictScanner.setTextPos(i);
|
||||
|
||||
while (mergeConflictScanner.getTextPos() < end) {
|
||||
@ -5969,10 +6053,7 @@ module ts {
|
||||
|
||||
let type = classifyTokenType(tokenKind);
|
||||
if (type) {
|
||||
result.push({
|
||||
textSpan: createTextSpanFromBounds(start, end),
|
||||
classificationType: type
|
||||
});
|
||||
pushClassification(start, end - start, type);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5982,10 +6063,7 @@ module ts {
|
||||
if (token.getWidth() > 0) {
|
||||
let type = classifyTokenType(token.kind, token);
|
||||
if (type) {
|
||||
result.push({
|
||||
textSpan: createTextSpan(token.getStart(), token.getWidth()),
|
||||
classificationType: type
|
||||
});
|
||||
pushClassification(token.getStart(), token.getWidth(), type);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -5993,9 +6071,9 @@ module ts {
|
||||
// for accurate classification, the actual token should be passed in. however, for
|
||||
// cases like 'disabled merge code' classification, we just get the token kind and
|
||||
// classify based on that instead.
|
||||
function classifyTokenType(tokenKind: SyntaxKind, token?: Node): string {
|
||||
function classifyTokenType(tokenKind: SyntaxKind, token?: Node): ClassificationType {
|
||||
if (isKeyword(tokenKind)) {
|
||||
return ClassificationTypeNames.keyword;
|
||||
return ClassificationType.keyword;
|
||||
}
|
||||
|
||||
// Special case < and > If they appear in a generic context they are punctuation,
|
||||
@ -6004,7 +6082,7 @@ module ts {
|
||||
// If the node owning the token has a type argument list or type parameter list, then
|
||||
// we can effectively assume that a '<' and '>' belong to those lists.
|
||||
if (token && getTypeArgumentOrTypeParameterList(token.parent)) {
|
||||
return ClassificationTypeNames.punctuation;
|
||||
return ClassificationType.punctuation;
|
||||
}
|
||||
}
|
||||
|
||||
@ -6015,7 +6093,7 @@ module ts {
|
||||
if (token.parent.kind === SyntaxKind.VariableDeclaration ||
|
||||
token.parent.kind === SyntaxKind.PropertyDeclaration ||
|
||||
token.parent.kind === SyntaxKind.Parameter) {
|
||||
return ClassificationTypeNames.operator;
|
||||
return ClassificationType.operator;
|
||||
}
|
||||
}
|
||||
|
||||
@ -6023,58 +6101,64 @@ module ts {
|
||||
token.parent.kind === SyntaxKind.PrefixUnaryExpression ||
|
||||
token.parent.kind === SyntaxKind.PostfixUnaryExpression ||
|
||||
token.parent.kind === SyntaxKind.ConditionalExpression) {
|
||||
return ClassificationTypeNames.operator;
|
||||
return ClassificationType.operator;
|
||||
}
|
||||
}
|
||||
|
||||
return ClassificationTypeNames.punctuation;
|
||||
return ClassificationType.punctuation;
|
||||
}
|
||||
else if (tokenKind === SyntaxKind.NumericLiteral) {
|
||||
return ClassificationTypeNames.numericLiteral;
|
||||
return ClassificationType.numericLiteral;
|
||||
}
|
||||
else if (tokenKind === SyntaxKind.StringLiteral) {
|
||||
return ClassificationTypeNames.stringLiteral;
|
||||
return ClassificationType.stringLiteral;
|
||||
}
|
||||
else if (tokenKind === SyntaxKind.RegularExpressionLiteral) {
|
||||
// TODO: we should get another classification type for these literals.
|
||||
return ClassificationTypeNames.stringLiteral;
|
||||
return ClassificationType.stringLiteral;
|
||||
}
|
||||
else if (isTemplateLiteralKind(tokenKind)) {
|
||||
// TODO (drosen): we should *also* get another classification type for these literals.
|
||||
return ClassificationTypeNames.stringLiteral;
|
||||
return ClassificationType.stringLiteral;
|
||||
}
|
||||
else if (tokenKind === SyntaxKind.Identifier) {
|
||||
if (token) {
|
||||
switch (token.parent.kind) {
|
||||
case SyntaxKind.ClassDeclaration:
|
||||
if ((<ClassDeclaration>token.parent).name === token) {
|
||||
return ClassificationTypeNames.className;
|
||||
return ClassificationType.className;
|
||||
}
|
||||
return;
|
||||
case SyntaxKind.TypeParameter:
|
||||
if ((<TypeParameterDeclaration>token.parent).name === token) {
|
||||
return ClassificationTypeNames.typeParameterName;
|
||||
return ClassificationType.typeParameterName;
|
||||
}
|
||||
return;
|
||||
case SyntaxKind.InterfaceDeclaration:
|
||||
if ((<InterfaceDeclaration>token.parent).name === token) {
|
||||
return ClassificationTypeNames.interfaceName;
|
||||
return ClassificationType.interfaceName;
|
||||
}
|
||||
return;
|
||||
case SyntaxKind.EnumDeclaration:
|
||||
if ((<EnumDeclaration>token.parent).name === token) {
|
||||
return ClassificationTypeNames.enumName;
|
||||
return ClassificationType.enumName;
|
||||
}
|
||||
return;
|
||||
case SyntaxKind.ModuleDeclaration:
|
||||
if ((<ModuleDeclaration>token.parent).name === token) {
|
||||
return ClassificationTypeNames.moduleName;
|
||||
return ClassificationType.moduleName;
|
||||
}
|
||||
return;
|
||||
case SyntaxKind.Parameter:
|
||||
if ((<ParameterDeclaration>token.parent).name === token) {
|
||||
return ClassificationType.parameterName;
|
||||
}
|
||||
return;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return ClassificationTypeNames.text;
|
||||
return ClassificationType.text;
|
||||
}
|
||||
}
|
||||
|
||||
@ -6405,6 +6489,8 @@ module ts {
|
||||
getCompilerOptionsDiagnostics,
|
||||
getSyntacticClassifications,
|
||||
getSemanticClassifications,
|
||||
getEncodedSyntacticClassifications,
|
||||
getEncodedSemanticClassifications,
|
||||
getCompletionsAtPosition,
|
||||
getCompletionEntryDetails,
|
||||
getSignatureHelpItems,
|
||||
@ -6545,10 +6631,67 @@ module ts {
|
||||
// if there are more cases we want the classifier to be better at.
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
function convertClassifications(classifications: Classifications, text: string): ClassificationResult {
|
||||
var entries: ClassificationInfo[] = [];
|
||||
let dense = classifications.spans;
|
||||
let lastEnd = 0;
|
||||
|
||||
for (let i = 0, n = dense.length; i < n; i += 3) {
|
||||
let start = dense[i];
|
||||
let length = dense[i + 1];
|
||||
let type = <ClassificationType>dense[i + 2];
|
||||
|
||||
// Make a whitespace entry between the last item and this one.
|
||||
if (lastEnd >= 0) {
|
||||
let whitespaceLength = start - lastEnd;
|
||||
if (whitespaceLength > 0) {
|
||||
entries.push({ length: whitespaceLength, classification: TokenClass.Whitespace });
|
||||
}
|
||||
}
|
||||
|
||||
entries.push({ length, classification: convertClassification(type) });
|
||||
lastEnd = start + length;
|
||||
}
|
||||
|
||||
let whitespaceLength = text.length - lastEnd;
|
||||
if (whitespaceLength > 0) {
|
||||
entries.push({ length: whitespaceLength, classification: TokenClass.Whitespace });
|
||||
}
|
||||
|
||||
return { entries, finalLexState: classifications.endOfLineState };
|
||||
}
|
||||
|
||||
function convertClassification(type: ClassificationType): TokenClass {
|
||||
switch (type) {
|
||||
case ClassificationType.comment: return TokenClass.Comment;
|
||||
case ClassificationType.keyword: return TokenClass.Keyword;
|
||||
case ClassificationType.numericLiteral: return TokenClass.NumberLiteral;
|
||||
case ClassificationType.operator: return TokenClass.Operator;
|
||||
case ClassificationType.stringLiteral: return TokenClass.StringLiteral;
|
||||
case ClassificationType.whiteSpace: return TokenClass.Whitespace;
|
||||
case ClassificationType.punctuation: return TokenClass.Punctuation;
|
||||
case ClassificationType.identifier:
|
||||
case ClassificationType.className:
|
||||
case ClassificationType.enumName:
|
||||
case ClassificationType.interfaceName:
|
||||
case ClassificationType.moduleName:
|
||||
case ClassificationType.typeParameterName:
|
||||
case ClassificationType.typeAliasName:
|
||||
case ClassificationType.text:
|
||||
case ClassificationType.parameterName:
|
||||
default:
|
||||
return TokenClass.Identifier;
|
||||
}
|
||||
}
|
||||
|
||||
function getClassificationsForLine(text: string, lexState: EndOfLineState, syntacticClassifierAbsent: boolean): ClassificationResult {
|
||||
return convertClassifications(getEncodedLexicalClassifications(text, lexState, syntacticClassifierAbsent), text);
|
||||
}
|
||||
|
||||
// If there is a syntactic classifier ('syntacticClassifierAbsent' is false),
|
||||
// we will be more conservative in order to avoid conflicting with the syntactic classifier.
|
||||
function getClassificationsForLine(text: string, lexState: EndOfLineState, syntacticClassifierAbsent: boolean): ClassificationResult {
|
||||
function getEncodedLexicalClassifications(text: string, lexState: EndOfLineState, syntacticClassifierAbsent: boolean): Classifications {
|
||||
let offset = 0;
|
||||
let token = SyntaxKind.Unknown;
|
||||
let lastNonTriviaToken = SyntaxKind.Unknown;
|
||||
@ -6591,9 +6734,9 @@ module ts {
|
||||
|
||||
scanner.setText(text);
|
||||
|
||||
let result: ClassificationResult = {
|
||||
finalLexState: EndOfLineState.Start,
|
||||
entries: []
|
||||
let result: Classifications = {
|
||||
endOfLineState: EndOfLineState.None,
|
||||
spans: []
|
||||
};
|
||||
|
||||
// We can run into an unfortunate interaction between the lexical and syntactic classifier
|
||||
@ -6706,7 +6849,7 @@ module ts {
|
||||
let start = scanner.getTokenPos();
|
||||
let end = scanner.getTextPos();
|
||||
|
||||
addResult(end - start, classFromKind(token));
|
||||
addResult(start, end, classFromKind(token));
|
||||
|
||||
if (end >= text.length) {
|
||||
if (token === SyntaxKind.StringLiteral) {
|
||||
@ -6723,7 +6866,7 @@ module ts {
|
||||
// If we have an odd number of backslashes, then the multiline string is unclosed
|
||||
if (numBackslashes & 1) {
|
||||
let quoteChar = tokenText.charCodeAt(0);
|
||||
result.finalLexState = quoteChar === CharacterCodes.doubleQuote
|
||||
result.endOfLineState = quoteChar === CharacterCodes.doubleQuote
|
||||
? EndOfLineState.InDoubleQuoteStringLiteral
|
||||
: EndOfLineState.InSingleQuoteStringLiteral;
|
||||
}
|
||||
@ -6732,16 +6875,16 @@ module ts {
|
||||
else if (token === SyntaxKind.MultiLineCommentTrivia) {
|
||||
// Check to see if the multiline comment was unclosed.
|
||||
if (scanner.isUnterminated()) {
|
||||
result.finalLexState = EndOfLineState.InMultiLineCommentTrivia;
|
||||
result.endOfLineState = EndOfLineState.InMultiLineCommentTrivia;
|
||||
}
|
||||
}
|
||||
else if (isTemplateLiteralKind(token)) {
|
||||
if (scanner.isUnterminated()) {
|
||||
if (token === SyntaxKind.TemplateTail) {
|
||||
result.finalLexState = EndOfLineState.InTemplateMiddleOrTail;
|
||||
result.endOfLineState = EndOfLineState.InTemplateMiddleOrTail;
|
||||
}
|
||||
else if (token === SyntaxKind.NoSubstitutionTemplateLiteral) {
|
||||
result.finalLexState = EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate;
|
||||
result.endOfLineState = EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate;
|
||||
}
|
||||
else {
|
||||
Debug.fail("Only 'NoSubstitutionTemplateLiteral's and 'TemplateTail's can be unterminated; got SyntaxKind #" + token);
|
||||
@ -6749,20 +6892,34 @@ module ts {
|
||||
}
|
||||
}
|
||||
else if (templateStack.length > 0 && lastOrUndefined(templateStack) === SyntaxKind.TemplateHead) {
|
||||
result.finalLexState = EndOfLineState.InTemplateSubstitutionPosition;
|
||||
result.endOfLineState = EndOfLineState.InTemplateSubstitutionPosition;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function addResult(length: number, classification: TokenClass): void {
|
||||
if (length > 0) {
|
||||
// If this is the first classification we're adding to the list, then remove any
|
||||
// offset we have if we were continuing a construct from the previous line.
|
||||
if (result.entries.length === 0) {
|
||||
length -= offset;
|
||||
}
|
||||
function addResult(start: number, end: number, classification: ClassificationType): void {
|
||||
if (classification === ClassificationType.whiteSpace) {
|
||||
// Don't bother with whitespace classifications. They're not needed.
|
||||
return;
|
||||
}
|
||||
|
||||
result.entries.push({ length: length, classification: classification });
|
||||
if (start === 0 && offset > 0) {
|
||||
// We're classifying the first token, and this was a case where we prepended
|
||||
// text. We should consider the start of this token to be at the start of
|
||||
// the original text.
|
||||
start += offset;
|
||||
}
|
||||
|
||||
// All our tokens are in relation to the augmented text. Move them back to be
|
||||
// relative to the original text.
|
||||
start -= offset;
|
||||
end -= offset;
|
||||
let length = end - start;
|
||||
|
||||
if (length > 0) {
|
||||
result.spans.push(start);
|
||||
result.spans.push(length);
|
||||
result.spans.push(classification);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -6829,41 +6986,44 @@ module ts {
|
||||
return token >= SyntaxKind.FirstKeyword && token <= SyntaxKind.LastKeyword;
|
||||
}
|
||||
|
||||
function classFromKind(token: SyntaxKind) {
|
||||
function classFromKind(token: SyntaxKind): ClassificationType {
|
||||
if (isKeyword(token)) {
|
||||
return TokenClass.Keyword;
|
||||
return ClassificationType.keyword;
|
||||
}
|
||||
else if (isBinaryExpressionOperatorToken(token) || isPrefixUnaryExpressionOperatorToken(token)) {
|
||||
return TokenClass.Operator;
|
||||
return ClassificationType.operator;
|
||||
}
|
||||
else if (token >= SyntaxKind.FirstPunctuation && token <= SyntaxKind.LastPunctuation) {
|
||||
return TokenClass.Punctuation;
|
||||
return ClassificationType.punctuation;
|
||||
}
|
||||
|
||||
switch (token) {
|
||||
case SyntaxKind.NumericLiteral:
|
||||
return TokenClass.NumberLiteral;
|
||||
return ClassificationType.numericLiteral;
|
||||
case SyntaxKind.StringLiteral:
|
||||
return TokenClass.StringLiteral;
|
||||
return ClassificationType.stringLiteral;
|
||||
case SyntaxKind.RegularExpressionLiteral:
|
||||
return TokenClass.RegExpLiteral;
|
||||
return ClassificationType.regularExpressionLiteral;
|
||||
case SyntaxKind.ConflictMarkerTrivia:
|
||||
case SyntaxKind.MultiLineCommentTrivia:
|
||||
case SyntaxKind.SingleLineCommentTrivia:
|
||||
return TokenClass.Comment;
|
||||
return ClassificationType.comment;
|
||||
case SyntaxKind.WhitespaceTrivia:
|
||||
case SyntaxKind.NewLineTrivia:
|
||||
return TokenClass.Whitespace;
|
||||
return ClassificationType.whiteSpace;
|
||||
case SyntaxKind.Identifier:
|
||||
default:
|
||||
if (isTemplateLiteralKind(token)) {
|
||||
return TokenClass.StringLiteral;
|
||||
return ClassificationType.stringLiteral;
|
||||
}
|
||||
return TokenClass.Identifier;
|
||||
return ClassificationType.identifier;
|
||||
}
|
||||
}
|
||||
|
||||
return { getClassificationsForLine };
|
||||
return {
|
||||
getClassificationsForLine,
|
||||
getEncodedLexicalClassifications
|
||||
};
|
||||
}
|
||||
|
||||
/// getDefaultLibraryFilePath
|
||||
|
||||
@ -99,6 +99,8 @@ module ts {
|
||||
|
||||
getSyntacticClassifications(fileName: string, start: number, length: number): string;
|
||||
getSemanticClassifications(fileName: string, start: number, length: number): string;
|
||||
getEncodedSyntacticClassifications(fileName: string, start: number, length: number): string;
|
||||
getEncodedSemanticClassifications(fileName: string, start: number, length: number): string;
|
||||
|
||||
getCompletionsAtPosition(fileName: string, position: number): string;
|
||||
getCompletionEntryDetails(fileName: string, position: number, entryName: string): string;
|
||||
@ -189,6 +191,7 @@ module ts {
|
||||
}
|
||||
|
||||
export interface ClassifierShim extends Shim {
|
||||
getEncodedLexicalClassifications(text: string, lexState: EndOfLineState, syntacticClassifierAbsent?: boolean): string;
|
||||
getClassificationsForLine(text: string, lexState: EndOfLineState, syntacticClassifierAbsent?: boolean): string;
|
||||
}
|
||||
|
||||
@ -199,7 +202,9 @@ module ts {
|
||||
}
|
||||
|
||||
function logInternalError(logger: Logger, err: Error) {
|
||||
logger.log("*INTERNAL ERROR* - Exception in typescript services: " + err.message);
|
||||
if (logger) {
|
||||
logger.log("*INTERNAL ERROR* - Exception in typescript services: " + err.message);
|
||||
}
|
||||
}
|
||||
|
||||
class ScriptSnapshotShimAdapter implements IScriptSnapshot {
|
||||
@ -321,25 +326,32 @@ module ts {
|
||||
}
|
||||
}
|
||||
|
||||
function simpleForwardCall(logger: Logger, actionDescription: string, action: () => any): any {
|
||||
logger.log(actionDescription);
|
||||
var start = Date.now();
|
||||
var result = action();
|
||||
var end = Date.now();
|
||||
logger.log(actionDescription + " completed in " + (end - start) + " msec");
|
||||
if (typeof (result) === "string") {
|
||||
var str = <string>result;
|
||||
if (str.length > 128) {
|
||||
str = str.substring(0, 128) + "...";
|
||||
}
|
||||
logger.log(" result.length=" + str.length + ", result='" + JSON.stringify(str) + "'");
|
||||
function simpleForwardCall(logger: Logger, actionDescription: string, action: () => any, noPerfLogging: boolean): any {
|
||||
if (!noPerfLogging) {
|
||||
logger.log(actionDescription);
|
||||
var start = Date.now();
|
||||
}
|
||||
|
||||
var result = action();
|
||||
|
||||
if (!noPerfLogging) {
|
||||
var end = Date.now();
|
||||
logger.log(actionDescription + " completed in " + (end - start) + " msec");
|
||||
if (typeof (result) === "string") {
|
||||
var str = <string>result;
|
||||
if (str.length > 128) {
|
||||
str = str.substring(0, 128) + "...";
|
||||
}
|
||||
logger.log(" result.length=" + str.length + ", result='" + JSON.stringify(str) + "'");
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function forwardJSONCall(logger: Logger, actionDescription: string, action: () => any): string {
|
||||
function forwardJSONCall(logger: Logger, actionDescription: string, action: () => any, noPerfLogging: boolean): string {
|
||||
try {
|
||||
var result = simpleForwardCall(logger, actionDescription, action);
|
||||
var result = simpleForwardCall(logger, actionDescription, action, noPerfLogging);
|
||||
return JSON.stringify({ result: result });
|
||||
}
|
||||
catch (err) {
|
||||
@ -387,7 +399,7 @@ module ts {
|
||||
}
|
||||
|
||||
public forwardJSONCall(actionDescription: string, action: () => any): string {
|
||||
return forwardJSONCall(this.logger, actionDescription, action);
|
||||
return forwardJSONCall(this.logger, actionDescription, action, /*noPerfLogging:*/ false);
|
||||
}
|
||||
|
||||
/// DISPOSE
|
||||
@ -457,6 +469,26 @@ module ts {
|
||||
});
|
||||
}
|
||||
|
||||
public getEncodedSyntacticClassifications(fileName: string, start: number, length: number): string {
|
||||
return this.forwardJSONCall(
|
||||
"getEncodedSyntacticClassifications('" + fileName + "', " + start + ", " + length + ")",
|
||||
() => {
|
||||
// directly serialize the spans out to a string. This is much faster to decode
|
||||
// on the managed side versus a full JSON array.
|
||||
return convertClassifications(this.languageService.getEncodedSyntacticClassifications(fileName, createTextSpan(start, length)));
|
||||
});
|
||||
}
|
||||
|
||||
public getEncodedSemanticClassifications(fileName: string, start: number, length: number): string {
|
||||
return this.forwardJSONCall(
|
||||
"getEncodedSemanticClassifications('" + fileName + "', " + start + ", " + length + ")",
|
||||
() => {
|
||||
// directly serialize the spans out to a string. This is much faster to decode
|
||||
// on the managed side versus a full JSON array.
|
||||
return convertClassifications(this.languageService.getEncodedSemanticClassifications(fileName, createTextSpan(start, length)));
|
||||
});
|
||||
}
|
||||
|
||||
private getNewLine(): string {
|
||||
return this.host.getNewLine ? this.host.getNewLine() : "\r\n";
|
||||
}
|
||||
@ -736,14 +768,24 @@ module ts {
|
||||
}
|
||||
}
|
||||
|
||||
function convertClassifications(classifications: Classifications): { spans: string, endOfLineState: EndOfLineState } {
|
||||
return { spans: classifications.spans.join(","), endOfLineState: classifications.endOfLineState };
|
||||
}
|
||||
|
||||
class ClassifierShimObject extends ShimBase implements ClassifierShim {
|
||||
public classifier: Classifier;
|
||||
|
||||
constructor(factory: ShimFactory) {
|
||||
constructor(factory: ShimFactory, private logger: Logger) {
|
||||
super(factory);
|
||||
this.classifier = createClassifier();
|
||||
}
|
||||
|
||||
public getEncodedLexicalClassifications(text: string, lexState: EndOfLineState, syntacticClassifierAbsent?: boolean): string {
|
||||
return forwardJSONCall(this.logger, "getEncodedLexicalClassifications",
|
||||
() => convertClassifications(this.classifier.getEncodedLexicalClassifications(text, lexState, syntacticClassifierAbsent)),
|
||||
/*noPerfLogging:*/ true);
|
||||
}
|
||||
|
||||
/// COLORIZATION
|
||||
public getClassificationsForLine(text: string, lexState: EndOfLineState, classifyKeywordsInGenerics?: boolean): string {
|
||||
var classification = this.classifier.getClassificationsForLine(text, lexState, classifyKeywordsInGenerics);
|
||||
@ -765,7 +807,7 @@ module ts {
|
||||
}
|
||||
|
||||
private forwardJSONCall(actionDescription: string, action: () => any): any {
|
||||
return forwardJSONCall(this.logger, actionDescription, action);
|
||||
return forwardJSONCall(this.logger, actionDescription, action, /*noPerfLogging:*/ false);
|
||||
}
|
||||
|
||||
public getPreProcessedFileInfo(fileName: string, sourceTextSnapshot: IScriptSnapshot): string {
|
||||
@ -858,7 +900,7 @@ module ts {
|
||||
|
||||
public createClassifierShim(logger: Logger): ClassifierShim {
|
||||
try {
|
||||
return new ClassifierShimObject(this);
|
||||
return new ClassifierShimObject(this, logger);
|
||||
}
|
||||
catch (err) {
|
||||
logInternalError(logger, err);
|
||||
|
||||
@ -651,8 +651,12 @@ module FourSlashInterface {
|
||||
return getClassification("typeParameterName", text, position);
|
||||
}
|
||||
|
||||
export function typeAlias(text: string, position?: number): { classificationType: string; text: string; textSpan?: TextSpan } {
|
||||
return getClassification("typeAlias", text, position);
|
||||
export function parameterName(text: string, position?: number): { classificationType: string; text: string; textSpan?: TextSpan } {
|
||||
return getClassification("parameterName", text, position);
|
||||
}
|
||||
|
||||
export function typeAliasName(text: string, position?: number): { classificationType: string; text: string; textSpan?: TextSpan } {
|
||||
return getClassification("typeAliasName", text, position);
|
||||
}
|
||||
|
||||
function getClassification(type: string, text: string, position?: number) {
|
||||
|
||||
@ -7,9 +7,9 @@
|
||||
|
||||
var c = classification;
|
||||
verify.semanticClassificationsAre(
|
||||
c.typeAlias("Alias", test.marker("0").position),
|
||||
c.typeAlias("Alias", test.marker("1").position),
|
||||
c.typeAlias("Alias", test.marker("2").position),
|
||||
c.typeAlias("Alias", test.marker("3").position),
|
||||
c.typeAlias("Alias", test.marker("4").position)
|
||||
c.typeAliasName("Alias", test.marker("0").position),
|
||||
c.typeAliasName("Alias", test.marker("1").position),
|
||||
c.typeAliasName("Alias", test.marker("2").position),
|
||||
c.typeAliasName("Alias", test.marker("3").position),
|
||||
c.typeAliasName("Alias", test.marker("4").position)
|
||||
);
|
||||
@ -19,7 +19,7 @@ var firstCommentText =
|
||||
var c = classification;
|
||||
verify.syntacticClassificationsAre(
|
||||
c.comment(firstCommentText),
|
||||
c.keyword("function"), c.text("myFunction"), c.punctuation("("), c.comment("/* x */"), c.text("x"), c.punctuation(":"), c.keyword("any"), c.punctuation(")"), c.punctuation("{"),
|
||||
c.keyword("function"), c.text("myFunction"), c.punctuation("("), c.comment("/* x */"), c.parameterName("x"), c.punctuation(":"), c.keyword("any"), c.punctuation(")"), c.punctuation("{"),
|
||||
c.keyword("var"), c.text("y"), c.operator("="), c.text("x"), c.operator("?"), c.text("x"), c.operator("++"), c.operator(":"), c.operator("++"), c.text("x"), c.punctuation(";"),
|
||||
c.punctuation("}"),
|
||||
c.comment("// end of file"));
|
||||
@ -66,8 +66,9 @@ describe('Colorization', function () {
|
||||
|
||||
describe("test getClassifications", function () {
|
||||
it("Returns correct token classes", function () {
|
||||
debugger;
|
||||
testLexicalClassification("var x: string = \"foo\"; //Hello",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
keyword("var"),
|
||||
whitespace(" "),
|
||||
identifier("x"),
|
||||
@ -81,7 +82,7 @@ describe('Colorization', function () {
|
||||
|
||||
it("correctly classifies a comment after a divide operator", function () {
|
||||
testLexicalClassification("1 / 2 // comment",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
numberLiteral("1"),
|
||||
whitespace(" "),
|
||||
operator("/"),
|
||||
@ -91,7 +92,7 @@ describe('Colorization', function () {
|
||||
|
||||
it("correctly classifies a literal after a divide operator", function () {
|
||||
testLexicalClassification("1 / 2, 3 / 4",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
numberLiteral("1"),
|
||||
whitespace(" "),
|
||||
operator("/"),
|
||||
@ -103,40 +104,41 @@ describe('Colorization', function () {
|
||||
|
||||
it("correctly classifies a multi-line string with one backslash", function () {
|
||||
testLexicalClassification("'line1\\",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("'line1\\"),
|
||||
finalEndOfLineState(ts.EndOfLineState.InSingleQuoteStringLiteral));
|
||||
});
|
||||
|
||||
it("correctly classifies a multi-line string with three backslashes", function () {
|
||||
testLexicalClassification("'line1\\\\\\",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("'line1\\\\\\"),
|
||||
finalEndOfLineState(ts.EndOfLineState.InSingleQuoteStringLiteral));
|
||||
});
|
||||
|
||||
it("correctly classifies an unterminated single-line string with no backslashes", function () {
|
||||
testLexicalClassification("'line1",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("'line1"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies an unterminated single-line string with two backslashes", function () {
|
||||
testLexicalClassification("'line1\\\\",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("'line1\\\\"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies an unterminated single-line string with four backslashes", function () {
|
||||
testLexicalClassification("'line1\\\\\\\\",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("'line1\\\\\\\\"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies the continuing line of a multi-line string ending in one backslash", function () {
|
||||
debugger;
|
||||
testLexicalClassification("\\",
|
||||
ts.EndOfLineState.InDoubleQuoteStringLiteral,
|
||||
stringLiteral("\\"),
|
||||
@ -154,33 +156,33 @@ describe('Colorization', function () {
|
||||
testLexicalClassification(" ",
|
||||
ts.EndOfLineState.InDoubleQuoteStringLiteral,
|
||||
stringLiteral(" "),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies the last line of an unterminated multi-line string ending in two backslashes", function () {
|
||||
testLexicalClassification("\\\\",
|
||||
ts.EndOfLineState.InDoubleQuoteStringLiteral,
|
||||
stringLiteral("\\\\"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies the last line of an unterminated multi-line string ending in four backslashes", function () {
|
||||
testLexicalClassification("\\\\\\\\",
|
||||
ts.EndOfLineState.InDoubleQuoteStringLiteral,
|
||||
stringLiteral("\\\\\\\\"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies the last line of a multi-line string", function () {
|
||||
testLexicalClassification("'",
|
||||
ts.EndOfLineState.InSingleQuoteStringLiteral,
|
||||
stringLiteral("'"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies an unterminated multiline comment", function () {
|
||||
testLexicalClassification("/*",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
comment("/*"),
|
||||
finalEndOfLineState(ts.EndOfLineState.InMultiLineCommentTrivia));
|
||||
});
|
||||
@ -189,7 +191,7 @@ describe('Colorization', function () {
|
||||
testLexicalClassification(" */ ",
|
||||
ts.EndOfLineState.InMultiLineCommentTrivia,
|
||||
comment(" */"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("correctly classifies the continuation of a multiline comment", function () {
|
||||
@ -201,33 +203,33 @@ describe('Colorization', function () {
|
||||
|
||||
it("correctly classifies an unterminated multiline comment on a line ending in '/*/'", function () {
|
||||
testLexicalClassification(" /*/",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
comment("/*/"),
|
||||
finalEndOfLineState(ts.EndOfLineState.InMultiLineCommentTrivia));
|
||||
});
|
||||
|
||||
it("correctly classifies an unterminated multiline comment with trailing space", function () {
|
||||
testLexicalClassification("/* ",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
comment("/* "),
|
||||
finalEndOfLineState(ts.EndOfLineState.InMultiLineCommentTrivia));
|
||||
});
|
||||
|
||||
it("correctly classifies a keyword after a dot", function () {
|
||||
testLexicalClassification("a.var",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
identifier("var"));
|
||||
});
|
||||
|
||||
it("correctly classifies a string literal after a dot", function () {
|
||||
testLexicalClassification("a.\"var\"",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("\"var\""));
|
||||
});
|
||||
|
||||
it("correctly classifies a keyword after a dot separated by comment trivia", function () {
|
||||
testLexicalClassification("a./*hello world*/ var",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
identifier("a"),
|
||||
punctuation("."),
|
||||
comment("/*hello world*/"),
|
||||
@ -236,41 +238,41 @@ describe('Colorization', function () {
|
||||
|
||||
it("classifies a property access with whitespace around the dot", function () {
|
||||
testLexicalClassification(" x .\tfoo ()",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
identifier("x"),
|
||||
identifier("foo"));
|
||||
});
|
||||
|
||||
it("classifies a keyword after a dot on previous line", function () {
|
||||
testLexicalClassification("var",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
keyword("var"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("classifies multiple keywords properly", function () {
|
||||
testLexicalClassification("public static",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
keyword("public"),
|
||||
keyword("static"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
|
||||
testLexicalClassification("public var",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
keyword("public"),
|
||||
identifier("var"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("classifies a single line no substitution template string correctly", () => {
|
||||
testLexicalClassification("`number number public string`",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("`number number public string`"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
it("classifies substitution parts of a template string correctly", () => {
|
||||
testLexicalClassification("`number '${ 1 + 1 }' string '${ 'hello' }'`",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("`number '${"),
|
||||
numberLiteral("1"),
|
||||
operator("+"),
|
||||
@ -278,11 +280,11 @@ describe('Colorization', function () {
|
||||
stringLiteral("}' string '${"),
|
||||
stringLiteral("'hello'"),
|
||||
stringLiteral("}'`"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
it("classifies an unterminated no substitution template string correctly", () => {
|
||||
testLexicalClassification("`hello world",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
stringLiteral("`hello world"),
|
||||
finalEndOfLineState(ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate));
|
||||
});
|
||||
@ -308,7 +310,7 @@ describe('Colorization', function () {
|
||||
testLexicalClassification("...`",
|
||||
ts.EndOfLineState.InTemplateHeadOrNoSubstitutionTemplate,
|
||||
stringLiteral("...`"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
it("classifies the substitution parts and middle/tail of a multiline template string", () => {
|
||||
testLexicalClassification("${ 1 + 1 }...`",
|
||||
@ -318,7 +320,7 @@ describe('Colorization', function () {
|
||||
operator("+"),
|
||||
numberLiteral("1"),
|
||||
stringLiteral("}...`"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
it("classifies a template middle and propagates the end of line state",() => {
|
||||
testLexicalClassification("${ 1 + 1 }...`",
|
||||
@ -328,7 +330,7 @@ describe('Colorization', function () {
|
||||
operator("+"),
|
||||
numberLiteral("1"),
|
||||
stringLiteral("}...`"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
it("classifies substitution expressions with curly braces appropriately", () => {
|
||||
var pos = 0;
|
||||
@ -349,7 +351,7 @@ describe('Colorization', function () {
|
||||
stringLiteral(track(" ", "`1`"), pos),
|
||||
punctuation(track(" ", "}"), pos),
|
||||
stringLiteral(track(" ", "}...`"), pos),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
|
||||
// Adjusts 'pos' by accounting for the length of each portion of the string,
|
||||
// but only return the last given string
|
||||
@ -364,22 +366,22 @@ describe('Colorization', function () {
|
||||
|
||||
it("classifies partially written generics correctly.", function () {
|
||||
testLexicalClassification("Foo<number",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
identifier("Foo"),
|
||||
operator("<"),
|
||||
identifier("number"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
|
||||
// Looks like a cast, should get classified as a keyword.
|
||||
testLexicalClassification("<number",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
operator("<"),
|
||||
keyword("number"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
|
||||
// handle nesting properly.
|
||||
testLexicalClassification("Foo<Foo,Foo<number",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
identifier("Foo"),
|
||||
operator("<"),
|
||||
identifier("Foo"),
|
||||
@ -387,7 +389,7 @@ describe('Colorization', function () {
|
||||
identifier("Foo"),
|
||||
operator("<"),
|
||||
identifier("number"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("LexicallyClassifiesConflictTokens", () => {
|
||||
@ -400,7 +402,7 @@ describe('Colorization', function () {
|
||||
v = 2;\r\n\
|
||||
>>>>>>> Branch - a\r\n\
|
||||
}",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
keyword("class"),
|
||||
identifier("C"),
|
||||
punctuation("{"),
|
||||
@ -412,7 +414,7 @@ describe('Colorization', function () {
|
||||
comment("=======\r\n v = 2;\r\n"),
|
||||
comment(">>>>>>> Branch - a"),
|
||||
punctuation("}"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
|
||||
testLexicalClassification(
|
||||
"<<<<<<< HEAD\r\n\
|
||||
@ -420,7 +422,7 @@ class C { }\r\n\
|
||||
=======\r\n\
|
||||
class D { }\r\n\
|
||||
>>>>>>> Branch - a\r\n",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
comment("<<<<<<< HEAD"),
|
||||
keyword("class"),
|
||||
identifier("C"),
|
||||
@ -428,12 +430,12 @@ class D { }\r\n\
|
||||
punctuation("}"),
|
||||
comment("=======\r\nclass D { }\r\n"),
|
||||
comment(">>>>>>> Branch - a"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
|
||||
it("'of' keyword", function () {
|
||||
testLexicalClassification("for (var of of of) { }",
|
||||
ts.EndOfLineState.Start,
|
||||
ts.EndOfLineState.None,
|
||||
keyword("for"),
|
||||
punctuation("("),
|
||||
keyword("var"),
|
||||
@ -443,7 +445,7 @@ class D { }\r\n\
|
||||
punctuation(")"),
|
||||
punctuation("{"),
|
||||
punctuation("}"),
|
||||
finalEndOfLineState(ts.EndOfLineState.Start));
|
||||
finalEndOfLineState(ts.EndOfLineState.None));
|
||||
});
|
||||
});
|
||||
});
|
||||
Loading…
x
Reference in New Issue
Block a user