Merge pull request #1239 from Microsoft/incrementalParser

Change how the incremental parser works.
This commit is contained in:
CyrusNajmabadi 2014-11-24 17:47:30 -08:00
commit 99eb271e78
5 changed files with 223 additions and 360 deletions

View File

@ -4,8 +4,10 @@ module TypeScript.IncrementalParser {
interface IParserRewindPoint {
// Information used by the incremental parser source.
oldSourceUnitCursor: SyntaxCursor;
changeDelta: number;
changeRange: TextChangeRange;
}
interface ISyntaxElementInternal extends ISyntaxElement {
intersectsChange: boolean;
}
// Parser source used in incremental scenarios. This parser source wraps an old tree, text
@ -21,9 +23,6 @@ module TypeScript.IncrementalParser {
// prevent this level of reuse include substantially destructive operations like introducing
// "/*" without a "*/" nearby to terminate the comment.
function createParserSource(oldSyntaxTree: SyntaxTree, textChangeRange: TextChangeRange, text: ISimpleText): Parser.IParserSource {
var fileName = oldSyntaxTree.fileName();
var languageVersion = oldSyntaxTree.languageVersion();
// The underlying source that we will use to scan tokens from any new text, or any tokens
// from the old tree that we decide we can't use for any reason. We will also continue
// scanning tokens from this source until we've decided that we're resynchronized and can
@ -31,40 +30,15 @@ module TypeScript.IncrementalParser {
//
// This parser source also keeps track of the absolute position in the text that we're in,
// and any token diagnostics produced. That way we dont' have to track that ourselves.
var _scannerParserSource: Scanner.IScannerParserSource;
// The range of text in the *original* text that was changed, and the new length of it after
// the change.
var _changeRange: TextChangeRange;
// Cached value of _changeRange.newSpan(). Cached for performance.
var _changeRangeNewSpan: TextSpan;
// This number represents how our position in the old tree relates to the position we're
// pointing at in the new text. If it is 0 then our positions are in sync and we can read
// nodes or tokens from the old tree. If it is non-zero, then our positions are not in
// sync and we cannot use nodes or tokens from the old tree.
//
// Now, changeDelta could be negative or positive. Negative means 'the position we're at
// in the original tree is behind the position we're at in the text'. In this case we
// keep throwing out old nodes or tokens (and thus move forward in the original tree) until
// changeDelta becomes 0 again or positive. If it becomes 0 then we are resynched and can
// read nodes or tokesn from the tree.
//
// If changeDelta is positive, that means the current node or token we're pointing at in
// the old tree is at a further ahead position than the position we're pointing at in the
// new text. In this case we have no choice but to scan tokens from teh new text. We will
// continue to do so until, again, changeDelta becomes 0 and we've resynced, or change delta
// becomes negative and we need to skip nodes or tokes in the original tree.
var _changeDelta: number = 0;
var _scannerParserSource = Scanner.createParserSource(oldSyntaxTree.fileName(), text, oldSyntaxTree.languageVersion());
// The cursor we use to navigate through and retrieve nodes and tokens from the old tree.
var _oldSourceUnitCursor = getSyntaxCursor();
var oldSourceUnit = oldSyntaxTree.sourceUnit();
var _outstandingRewindPointCount = 0;
// Start the cursor pointing at the first element in the source unit (if it exists).
var _oldSourceUnitCursor = getSyntaxCursor();
if (oldSourceUnit.moduleElements.length > 0) {
_oldSourceUnitCursor.pushElement(childAt(oldSourceUnit.moduleElements, 0), /*indexInParent:*/ 0);
}
@ -74,8 +48,10 @@ module TypeScript.IncrementalParser {
// time this could be problematic would be if the user made a ton of discontinuous edits.
// For example, doing a column select on a *large* section of a code. If this is a
// problem, we can always update this code to handle multiple changes.
_changeRange = extendToAffectedRange(textChangeRange, oldSourceUnit);
_changeRangeNewSpan = _changeRange.newSpan();
var _changeRange = extendToAffectedRange(textChangeRange, oldSourceUnit);
// Cached value of _changeRange.newSpan(). Cached for performance.
var _changeRangeNewSpan = _changeRange.newSpan();
// The old tree's length, plus whatever length change was caused by the edit
// Had better equal the new text's length!
@ -83,8 +59,23 @@ module TypeScript.IncrementalParser {
Debug.assert((fullWidth(oldSourceUnit) - _changeRange.span().length() + _changeRange.newLength()) === text.length());
}
// Set up a scanner so that we can scan tokens out of the new text.
_scannerParserSource = Scanner.createParserSource(oldSyntaxTree.fileName(), text, oldSyntaxTree.languageVersion());
var delta = _changeRange.newSpan().length() - _changeRange.span().length();
// If we added or removed characters during the edit, then we need to go and adjust all
// the nodes after the edit. Those nodes may move forward down (if we inserted chars)
// or they may move backward (if we deleted chars).
//
// Doing this helps us out in two ways. First, it means that any nodes/tokens we want
// to reuse are already at the appropriate position in the new text. That way when we
// reuse them, we don't have to figure out if they need to be adjusted. Second, it makes
// it very easy to determine if we can reuse a node. If the node's position is at where
// we are in the text, then we can reuse it. Otherwise we can't. If hte node's position
// is ahead of us, then we'll need to rescan tokens. If the node's position is behind
// us, then we'll need to skip it or crumble it as appropriate
//
// Also, mark any syntax elements that intersect the changed span. We know, up front,
// that we cannot reuse these elements.
updateTokenPositionsAndMarkElements(<ISyntaxElementInternal><ISyntaxElement>oldSourceUnit,
_changeRange.span().start(), _changeRange.span().end(), delta, /*fullStart:*/ 0);
function release() {
_scannerParserSource.release();
@ -93,8 +84,7 @@ module TypeScript.IncrementalParser {
_outstandingRewindPointCount = 0;
}
function extendToAffectedRange(changeRange: TextChangeRange,
sourceUnit: SourceUnitSyntax): TextChangeRange {
function extendToAffectedRange(changeRange: TextChangeRange, sourceUnit: SourceUnitSyntax): TextChangeRange {
// Consider the following code:
// void foo() { /; }
//
@ -105,14 +95,6 @@ module TypeScript.IncrementalParser {
// (as it does not intersect the actual original change range). Because an edit may
// change the token touching it, we actually need to look back *at least* one token so
// that the prior token sees that change.
//
// Note: i believe (outside of regex tokens) max lookahead is just one token for
// TypeScript. However, if this turns out to be wrong, we may have to increase how much
// futher we look back.
//
// Note: lookahead handling for regex characters is handled specially in during
// incremental parsing, and does not need to be handled here.
var maxLookahead = 1;
var start = changeRange.span().start();
@ -122,10 +104,6 @@ module TypeScript.IncrementalParser {
// start of the tree.
for (var i = 0; start > 0 && i <= maxLookahead; i++) {
var token = findToken(sourceUnit, start);
// Debug.assert(token.kind !== SyntaxKind.None);
// Debug.assert(token.kind() === SyntaxKind.EndOfFileToken || token.fullWidth() > 0);
var position = token.fullStart();
start = Math.max(0, position - 1);
@ -149,17 +127,8 @@ module TypeScript.IncrementalParser {
// Get a rewind point for our new text reader and for our old source unit cursor.
var rewindPoint = <IParserRewindPoint>_scannerParserSource.getRewindPoint();
// Clone our cursor. That way we can restore to that point if hte parser needs to rewind.
var oldSourceUnitCursorClone = cloneSyntaxCursor(_oldSourceUnitCursor);
// Store where we were when the rewind point was created.
rewindPoint.changeDelta = _changeDelta;
rewindPoint.changeRange = _changeRange;
rewindPoint.oldSourceUnitCursor = _oldSourceUnitCursor;
_oldSourceUnitCursor = oldSourceUnitCursorClone;
// Debug.assert(rewindPoint.pinCount === _oldSourceUnitCursor.pinCount());
// Clone our cursor. That way we can restore to that point if the parser needs to rewind.
rewindPoint.oldSourceUnitCursor = cloneSyntaxCursor(_oldSourceUnitCursor);
_outstandingRewindPointCount++;
return rewindPoint;
@ -167,15 +136,13 @@ module TypeScript.IncrementalParser {
function rewind(rewindPoint: IParserRewindPoint): void {
// Restore our state to the values when the rewind point was created.
_changeRange = rewindPoint.changeRange;
_changeDelta = rewindPoint.changeDelta;
// Reset the cursor to what it was when we got the rewind point. Make sure to return
// our existing cursor to the pool so it can be reused.
returnSyntaxCursor(_oldSourceUnitCursor);
_oldSourceUnitCursor = rewindPoint.oldSourceUnitCursor;
// Null out the cursor that the rewind point points to. This way we don't try
// Clear the cursor that the rewind point points to. This way we don't try
// to return it in 'releaseRewindPoint'.
rewindPoint.oldSourceUnitCursor = undefined;
@ -196,7 +163,7 @@ module TypeScript.IncrementalParser {
return _outstandingRewindPointCount > 0;
}
function canReadFromOldSourceUnit() {
function trySynchronizeCursorToPosition() {
// If we're currently pinned, then do not want to touch the cursor. Here's why. First,
// recall that we're 'pinned' when we're speculatively parsing. So say we were to allow
// returning old nodes/tokens while speculatively parsing. Then, the parser might start
@ -218,81 +185,72 @@ module TypeScript.IncrementalParser {
return false;
}
// If our current absolute position is in the middle of the changed range in the new text
// then we definitely can't read from the old source unit right now.
if (_changeRange && _changeRangeNewSpan.intersectsWithPosition(absolutePosition())) {
return false;
}
var absolutePos = absolutePosition();
while (true) {
if (_oldSourceUnitCursor.isFinished()) {
// Can't synchronize the cursor to the current position if the cursor is finished.
return false;
}
// First, try to sync up with the new text if we're behind.
syncCursorToNewTextIfBehind();
// Start with the current node or token the cursor is pointing at.
var currentNodeOrToken = _oldSourceUnitCursor.currentNodeOrToken();
// Now, if we're synced up *and* we're not currently pinned in the new text scanner,
// then we can read a node from the cursor. If we're pinned in the scanner then we
// can't read a node from the cursor because we will mess up the pinned scanner when
// we try to move it forward past this node.
return _changeDelta === 0 &&
!_oldSourceUnitCursor.isFinished();
}
// Node, move the cursor past any nodes or tokens that intersect the change range
// 1) they are never reusable.
// 2) their positions are wacky as they refer to the original text.
//
// We consider these nodes and tokens essentially invisible to all further parts
// of the incremental algorithm.
if ((<ISyntaxElementInternal><ISyntaxElement>currentNodeOrToken).intersectsChange) {
if (isNode(currentNodeOrToken)) {
_oldSourceUnitCursor.moveToFirstChild();
}
else {
_oldSourceUnitCursor.moveToNextSibling();
}
continue;
}
function updateTokenPosition(token: ISyntaxToken): void {
// If we got a node or token, and we're past the range of edited text, then walk its
// constituent tokens, making sure all their positions are correct. We don't need to
// do this for the tokens before the edited range (since their positions couldn't have
// been affected by the edit), and we don't need to do this for the tokens in the
// edited range, as their positions will be correct when the underlying parser source
// creates them.
var currentNodeOrTokenFullStart = fullStart(currentNodeOrToken);
if (currentNodeOrTokenFullStart === absolutePos) {
// We were able to synchronize the cursor to the current position. We can
// read from the cursor
return true;
}
if (isPastChangeRange()) {
token.setFullStart(absolutePosition());
}
}
if (currentNodeOrTokenFullStart > absolutePos) {
// The node or token is ahead of the current position. We'll need to rescan
// tokens until we catch up.
return false;
}
function updateNodePosition(node: ISyntaxNode): void {
// If we got a node or token, and we're past the range of edited text, then walk its
// constituent tokens, making sure all their positions are correct. We don't need to
// do this for the tokens before the edited range (since their positions couldn't have
// been affected by the edit), and we don't need to do this for the tokens in the
// edited range, as their positions will be correct when the underlying parser source
// creates them.
// The node or is behind the current position we're at in the text.
if (isPastChangeRange()) {
var position = absolutePosition();
var currentNodeOrTokenFullWidth = fullWidth(currentNodeOrToken);
var currentNodeOrTokenFullEnd = currentNodeOrTokenFullStart + currentNodeOrTokenFullWidth;
var tokens = getTokens(node);
for (var i = 0, n = tokens.length; i < n; i++) {
var token = tokens[i];
token.setFullStart(position);
position += token.fullWidth();
// If we're pointing at a node, and that node ends before our current position, we
// can just skip the node entirely. Or, if we're pointing at a token, we won't be
// able to break up that token any further and we should just move to the next
// token.
if (currentNodeOrTokenFullEnd <= absolutePos || isToken(currentNodeOrToken)) {
_oldSourceUnitCursor.moveToNextSibling();
}
else {
// We have a node, and it started before our absolute pos, and ended after our
// pos. Try to crumble this node to see if we'll be able to skip the first node
// or token contained within.
_oldSourceUnitCursor.moveToFirstChild();
}
}
}
function getTokens(node: ISyntaxNode): ISyntaxToken[] {
var tokens = node.__cachedTokens;
if (!tokens) {
tokens = [];
tokenCollectorWalker.tokens = tokens;
visitNodeOrToken(tokenCollectorWalker, node);
node.__cachedTokens = tokens;
tokenCollectorWalker.tokens = undefined;
}
return tokens;
}
function currentNode(): ISyntaxNode {
if (canReadFromOldSourceUnit()) {
if (trySynchronizeCursorToPosition()) {
// Try to read a node. If we can't then our caller will call back in and just try
// to get a token.
var node = tryGetNodeFromOldSourceUnit();
if (node) {
// Make sure the positions for the tokens in this node are correct.
updateNodePosition(node);
return node;
}
}
@ -302,11 +260,9 @@ module TypeScript.IncrementalParser {
}
function currentToken(): ISyntaxToken {
if (canReadFromOldSourceUnit()) {
if (trySynchronizeCursorToPosition()) {
var token = tryGetTokenFromOldSourceUnit();
if (token) {
// Make sure the token's position/text is correct.
updateTokenPosition(token);
return token;
}
}
@ -321,69 +277,13 @@ module TypeScript.IncrementalParser {
return _scannerParserSource.currentContextualToken();
}
function syncCursorToNewTextIfBehind() {
while (true) {
if (_oldSourceUnitCursor.isFinished()) {
// Can't sync up if the cursor is finished.
break;
}
if (_changeDelta >= 0) {
// Nothing to do if we're synced up or ahead of the text.
break;
}
// We're behind in the original tree. Throw out a node or token in an attempt to
// catch up to the position we're at in the new text.
var currentNodeOrToken = _oldSourceUnitCursor.currentNodeOrToken();
// If we're pointing at a node, and that node's width is less than our delta,
// then we can just skip that node. Otherwise, if we're pointing at a node
// whose width is greater than the delta, then crumble it and try again.
// Otherwise, we must be pointing at a token. Just skip it and try again.
if (isNode(currentNodeOrToken) && (fullWidth(currentNodeOrToken) > Math.abs(_changeDelta))) {
// We were pointing at a node whose width was more than changeDelta. Crumble the
// node and try again. Note: we haven't changed changeDelta. So the callers loop
// will just repeat this until we get to a node or token that we can skip over.
_oldSourceUnitCursor.moveToFirstChild();
}
else {
_oldSourceUnitCursor.moveToNextSibling();
// Get our change delta closer to 0 as we skip past this item.
_changeDelta += fullWidth(currentNodeOrToken);
// If this was a node, then our changeDelta is 0 or negative. If this was a
// token, then we could still be negative (and we have to read another token),
// we could be zero (we're done), or we could be positive (we've moved ahead
// of the new text). Only if we're negative will we continue looping.
}
}
// At this point, we must be either:
// a) done with the cursor
// b) (ideally) caught up to the new text position.
// c) ahead of the new text position.
// In case 'b' we can try to reuse a node from teh old tree.
// Debug.assert(_oldSourceUnitCursor.isFinished() || _changeDelta >= 0);
}
function intersectsWithChangeRangeSpanInOriginalText(start: number, length: number) {
return !isPastChangeRange() && _changeRange.span().intersectsWith(start, length);
}
function tryGetNodeFromOldSourceUnit(): ISyntaxNode {
// Debug.assert(canReadFromOldSourceUnit());
// Keep moving the cursor down to the first node that is safe to return. A node is
// safe to return if:
// a) it does not intersect the changed text.
// b) it does not contain skipped text.
// c) it does not have any zero width tokens in it.
// d) it does not have a regex token in it.
// e) we are still in the same strict or non-strict state that the node was originally parsed in.
// a) it does not contain skipped text.
// b) it does not have any zero width tokens in it.
// c) it does not have a regex token in it.
// d) we are still in the same strict or non-strict state that the node was originally parsed in.
while (true) {
var node = _oldSourceUnitCursor.currentNode();
if (node === undefined) {
@ -391,15 +291,10 @@ module TypeScript.IncrementalParser {
return undefined;
}
if (!intersectsWithChangeRangeSpanInOriginalText(absolutePosition(), fullWidth(node))) {
// Didn't intersect with the change range.
var isIncrementallyUnusuable = TypeScript.isIncrementallyUnusable(node);
if (!isIncrementallyUnusuable) {
// Didn't contain anything that would make it unusable. Awesome. This is
// a node we can reuse.
return node;
}
if (!TypeScript.isIncrementallyUnusable(node)) {
// Didn't contain anything that would make it unusable. Awesome. This is
// a node we can reuse.
return node;
}
// We couldn't use currentNode. Try to move to its first child (in case that's a
@ -409,9 +304,9 @@ module TypeScript.IncrementalParser {
}
}
function canReuseTokenFromOldSourceUnit(position: number, token: ISyntaxToken): boolean {
function canReuseTokenFromOldSourceUnit(token: ISyntaxToken): boolean {
// A token is safe to return if:
// a) it does not intersect the changed text.
// a) it did not intersect the change range.
// b) it does not contain skipped text.
// c) it is not zero width.
// d) it is not a contextual parser token.
@ -427,33 +322,21 @@ module TypeScript.IncrementalParser {
// need to make sure that if that the parser asks for a *token* we don't return it.
// Converted identifiers can't ever be created by the scanner, and as such, should not
// be returned by this source.
if (token) {
if (!intersectsWithChangeRangeSpanInOriginalText(position, token.fullWidth())) {
// Didn't intersect with the change range.
if (!token.isIncrementallyUnusable() && !Scanner.isContextualToken(token)) {
// Didn't contain anything that would make it unusable. Awesome. This is
// a token we can reuse.
return true;
}
}
}
return false;
return token &&
!(<ISyntaxElementInternal><ISyntaxElement>token).intersectsChange &&
!token.isIncrementallyUnusable() &&
!Scanner.isContextualToken(token);
}
function tryGetTokenFromOldSourceUnit(): ISyntaxToken {
// Debug.assert(canReadFromOldSourceUnit());
// get the current token that the cursor is pointing at.
var token = _oldSourceUnitCursor.currentToken();
return canReuseTokenFromOldSourceUnit(absolutePosition(), token)
? token : undefined;
return canReuseTokenFromOldSourceUnit(token) ? token : undefined;
}
function peekToken(n: number): ISyntaxToken {
if (canReadFromOldSourceUnit()) {
if (trySynchronizeCursorToPosition()) {
var token = tryPeekTokenFromOldSourceUnit(n);
if (token) {
return token;
@ -465,8 +348,6 @@ module TypeScript.IncrementalParser {
}
function tryPeekTokenFromOldSourceUnit(n: number): ISyntaxToken {
// Debug.assert(canReadFromOldSourceUnit());
// clone the existing cursor so we can move it forward and then restore ourselves back
// to where we started from.
@ -481,11 +362,6 @@ module TypeScript.IncrementalParser {
}
function tryPeekTokenFromOldSourceUnitWorker(n: number): ISyntaxToken {
// In order to peek the 'nth' token we need all the tokens up to that point. That way
// we know we know position that the nth token is at. The position is necessary so
// that we can test if this token (or any that precede it cross the change range).
var currentPosition = absolutePosition();
// First, make sure the cursor is pointing at a token.
_oldSourceUnitCursor.moveToFirstToken();
@ -493,112 +369,31 @@ module TypeScript.IncrementalParser {
for (var i = 0; i < n; i++) {
var interimToken = _oldSourceUnitCursor.currentToken();
if (!canReuseTokenFromOldSourceUnit(currentPosition, interimToken)) {
if (!canReuseTokenFromOldSourceUnit(interimToken)) {
return undefined;
}
currentPosition += interimToken.fullWidth();
_oldSourceUnitCursor.moveToNextSibling();
}
var token = _oldSourceUnitCursor.currentToken();
return canReuseTokenFromOldSourceUnit(currentPosition, token)
? token : undefined;
return canReuseTokenFromOldSourceUnit(token) ? token : undefined;
}
function consumeNode(node: ISyntaxNode): void {
// A node could have only come from the old source unit cursor. Update it and our
// current state.
// Debug.assert(_changeDelta === 0);
// Debug.assert(currentNode() === node);
_oldSourceUnitCursor.moveToNextSibling();
// Update the underlying source with where it should now be currently pointin.
var _absolutePosition = absolutePosition() + fullWidth(node);
_scannerParserSource.resetToPosition(_absolutePosition);
// Debug.assert(previousToken !== undefined);
// Debug.assert(previousToken.width() > 0);
//if (!isPastChangeRange()) {
// // If we still have a change range, then this node must have ended before the
// // change range starts. Thus, we don't need to call 'skipPastChanges'.
// Debug.assert(absolutePosition() < _changeRange.span().start());
//}
}
function consumeToken(currentToken: ISyntaxToken): void {
// Debug.assert(currentToken.fullWidth() > 0 || currentToken.kind === SyntaxKind.EndOfFileToken);
// This token may have come from the old source unit, or from the new text. Handle
// both accordingly.
if (_oldSourceUnitCursor.currentToken() === currentToken) {
// The token came from the old source unit. So our tree and text must be in sync.
// Debug.assert(_changeDelta === 0);
// Move the cursor past this token.
_oldSourceUnitCursor.moveToNextSibling();
// Debug.assert(!_normalParserSource.isPinned());
// Update the underlying source with where it should now be currently pointing. We
// don't need to do this when the token came from the new text as the source will
// automatically be placed in the right position.
var _absolutePosition = absolutePosition() + currentToken.fullWidth();
_scannerParserSource.resetToPosition(_absolutePosition);
// Debug.assert(previousToken !== undefined);
// Debug.assert(previousToken.width() > 0);
//if (!isPastChangeRange()) {
// // If we still have a change range, then this token must have ended before the
// // change range starts. Thus, we don't need to call 'skipPastChanges'.
// Debug.assert(absolutePosition() < _changeRange.span().start());
//}
}
else {
// the token came from the new text. That means the normal source moved forward,
// while the syntax cursor stayed in the same place. Thus our delta moves even
// further back.
_changeDelta -= currentToken.fullWidth();
// Move our underlying source forward.
_scannerParserSource.consumeToken(currentToken);
// Because we read a token from the new text, we may have moved ourselves past the
// change range. If we did, then we may also have to update our change delta to
// compensate for the length change between the old and new text.
if (!isPastChangeRange()) {
// var changeEndInNewText = _changeRange.span().start() + _changeRange.newLength();
if (absolutePosition() >= _changeRangeNewSpan.end()) {
_changeDelta += _changeRange.newLength() - _changeRange.span().length();
// Once we're past the change range, we no longer need it. Null it out.
// From now on we can check if we're past the change range just by seeing
// if this is undefined.
_changeRange = undefined;
}
}
}
}
function isPastChangeRange(): boolean {
return _changeRange === undefined;
function consumeNodeOrToken(nodeOrToken: ISyntaxNodeOrToken): void {
_scannerParserSource.consumeNodeOrToken(nodeOrToken);
}
return {
text: text,
fileName: fileName,
languageVersion: languageVersion,
fileName: oldSyntaxTree.fileName(),
languageVersion: oldSyntaxTree.languageVersion(),
absolutePosition: absolutePosition,
currentNode: currentNode,
currentToken: currentToken,
currentContextualToken: currentContextualToken,
peekToken: peekToken,
consumeNode: consumeNode,
consumeToken: consumeToken,
consumeNodeOrToken: consumeNodeOrToken,
getRewindPoint: getRewindPoint,
rewind: rewind,
releaseRewindPoint: releaseRewindPoint,
@ -700,7 +495,6 @@ module TypeScript.IncrementalParser {
// Makes this cursor into a deep copy of the cursor passed in.
function deepCopyFrom(other: SyntaxCursor): void {
// Debug.assert(currentPieceIndex === -1);
for (var i = 0, n = other.pieces.length; i < n; i++) {
var piece = other.pieces[i];
@ -710,8 +504,6 @@ module TypeScript.IncrementalParser {
pushElement(piece.element, piece.indexInParent);
}
// Debug.assert(currentPieceIndex === other.currentPieceIndex);
}
function isFinished(): boolean {
@ -726,9 +518,6 @@ module TypeScript.IncrementalParser {
var result = pieces[currentPieceIndex].element;
// The current element must always be a node or a token.
// Debug.assert(result !== undefined);
// Debug.assert(result.isNode() || result.isToken());
return <ISyntaxNodeOrToken>result;
}
@ -752,9 +541,6 @@ module TypeScript.IncrementalParser {
return;
}
// The last element must be a token or a node.
// Debug.assert(isNode(nodeOrToken));
// Either the node has some existent child, then move to it. if it doesn't, then it's
// an empty node. Conceptually the first child of an empty node is really just the
// next sibling of the empty node.
@ -773,8 +559,6 @@ module TypeScript.IncrementalParser {
// This element must have been an empty node. Moving to its 'first child' is equivalent to just
// moving to the next sibling.
// Debug.assert(fullWidth(nodeOrToken) === 0);
moveToNextSibling();
}
@ -818,15 +602,11 @@ module TypeScript.IncrementalParser {
if (isList(element)) {
// We cannot ever get an empty list in our piece path. Empty lists are 'shared' and
// we make sure to filter that out before pushing any children.
// Debug.assert(childCount(element) > 0);
pushElement(childAt(element, 0), /*indexInParent:*/ 0);
}
}
function pushElement(element: ISyntaxElement, indexInParent: number): void {
// Debug.assert(element !== undefined);
// Debug.assert(indexInParent >= 0);
currentPieceIndex++;
// Reuse an existing piece if we have one. Otherwise, push a new piece to our list.
@ -848,7 +628,6 @@ module TypeScript.IncrementalParser {
continue;
}
// Debug.assert(isToken(element));
return;
}
}
@ -857,7 +636,6 @@ module TypeScript.IncrementalParser {
moveToFirstToken();
var element = currentNodeOrToken();
// Debug.assert(element === undefined || element.isToken());
return <ISyntaxToken>element;
}
@ -888,6 +666,94 @@ module TypeScript.IncrementalParser {
}
var tokenCollectorWalker = new TokenCollectorWalker();
function updateTokenPositionsAndMarkElements(element: ISyntaxElement, changeStart: number, changeRangeOldEnd: number, delta: number, fullStart: number): void {
// First, try to skip past any elements that we dont' need to move. We don't need to
// move any elements that don't start after the end of the change range.
if (fullStart > changeRangeOldEnd) {
// Note, we only move elements that are truly after the end of the change range.
// We consider elements that are touching the end of the change range to be unusable.
forceUpdateTokenPositionsForElement(element, delta);
}
else {
// Check if the element intersects the change range. If it does, then it is not
// reusable. Also, we'll need to recurse to see what constituent portions we may
// be able to use.
var fullEnd = fullStart + fullWidth(element);
if (fullEnd >= changeStart) {
(<ISyntaxElementInternal>element).intersectsChange = true;
if (isList(element)) {
var list = <ISyntaxNodeOrToken[]>element;
for (var i = 0, n = list.length; i < n; i++) {
var child: ISyntaxElement = list[i];
updateTokenPositionsAndMarkElements(child, changeStart, changeRangeOldEnd, delta, fullStart);
fullStart += fullWidth(child);
}
}
else if (isNode(element)) {
var node = <ISyntaxNode>element;
for (var i = 0, n = node.childCount; i < n; i++) {
var child = node.childAt(i);
if (child) {
updateTokenPositionsAndMarkElements(child, changeStart, changeRangeOldEnd, delta, fullStart);
fullStart += fullWidth(child);
}
}
}
}
// else {
// This element ended strictly before the edited range. We don't need to do anything
// with it.
// }
}
}
function forceUpdateTokenPositionsForElement(element: ISyntaxElement, delta: number) {
// No need to move anything if the delta is 0.
if (delta !== 0) {
if (isList(element)) {
var list = <ISyntaxNodeOrToken[]>element;
for (var i = 0, n = list.length; i < n; i++) {
forceUpdateTokenPositionForNodeOrToken(list[i], delta);
}
}
else {
forceUpdateTokenPositionForNodeOrToken(<ISyntaxNodeOrToken>element, delta);
}
}
}
function forceUpdateTokenPosition(token: ISyntaxToken, delta: number) {
token.setFullStart(token.fullStart() + delta);
}
function forceUpdateTokenPositionForNodeOrToken(nodeOrToken: ISyntaxNodeOrToken, delta: number) {
if (isToken(nodeOrToken)) {
forceUpdateTokenPosition(<ISyntaxToken>nodeOrToken, delta);
}
else {
var node = <ISyntaxNode>nodeOrToken;
var tokens = getTokens(node);
for (var i = 0, n = tokens.length; i < n; i++) {
forceUpdateTokenPosition(tokens[i], delta);
}
}
}
function getTokens(node: ISyntaxNode): ISyntaxToken[] {
var tokens = node.__cachedTokens;
if (!tokens) {
tokens = [];
tokenCollectorWalker.tokens = tokens;
visitNodeOrToken(tokenCollectorWalker, node);
node.__cachedTokens = tokens;
tokenCollectorWalker.tokens = undefined;
}
return tokens;
}
export function parse(oldSyntaxTree: SyntaxTree, textChangeRange: TextChangeRange, newText: ISimpleText): SyntaxTree {
if (textChangeRange.isUnchanged()) {

View File

@ -70,8 +70,7 @@ module TypeScript.Parser {
// Called to move the source to the next node or token once the parser has consumed the
// current one.
consumeNode(node: ISyntaxNode): void;
consumeToken(token: ISyntaxToken): void;
consumeNodeOrToken(node: ISyntaxNodeOrToken): void;
// Gets a rewind point that the parser can use to move back to after it speculatively
// parses something. The source guarantees that if the parser calls 'rewind' with that
@ -320,14 +319,14 @@ module TypeScript.Parser {
// call 'consumeToken'. Doing so would attempt to add any previous skipped tokens
// to this token we're skipping. We don't want to do that. Instead, we want to add
// all the skipped tokens when we finally eat the next good token.
source.consumeToken(token)
source.consumeNodeOrToken(token)
}
function consumeToken(token: ISyntaxToken): ISyntaxToken {
// Debug.assert(token.fullWidth() > 0 || token.kind === SyntaxKind.EndOfFileToken);
// First, tell our source that the token has been consumed.
source.consumeToken(token);
source.consumeNodeOrToken(token);
// Now, if we had any skipped tokens, we want to add them to the start of this token
// we're consuming.
@ -385,7 +384,7 @@ module TypeScript.Parser {
function consumeNode(node: ISyntaxNode): void {
Debug.assert(_skippedTokens === undefined);
source.consumeNode(node);
source.consumeNodeOrToken(node);
}
//this method is called very frequently

View File

@ -1430,16 +1430,6 @@ module TypeScript.Scanner {
return !hadError && SyntaxFacts.isIdentifierNameOrAnyKeyword(token) && width(token) === text.length();
}
// A parser source that gets its data from an underlying scanner.
export interface IScannerParserSource extends Parser.IParserSource {
// The position that the scanner is currently at.
absolutePosition(): number;
// Resets the source to this position. Any diagnostics produced after this point will be
// removed.
resetToPosition(absolutePosition: number): void;
}
interface IScannerRewindPoint extends Parser.IRewindPoint {
// Information used by normal parser source.
absolutePosition: number;
@ -1449,7 +1439,7 @@ module TypeScript.Scanner {
// Parser source used in batch scenarios. Directly calls into an underlying text scanner and
// supports none of the functionality to reuse nodes. Good for when you just want want to do
// a single parse of a file.
export function createParserSource(fileName: string, text: ISimpleText, languageVersion: ts.ScriptTarget): IScannerParserSource {
export function createParserSource(fileName: string, text: ISimpleText, languageVersion: ts.ScriptTarget): Parser.IParserSource {
// The absolute position we're at in the text we're reading from.
var _absolutePosition: number = 0;
@ -1489,11 +1479,6 @@ module TypeScript.Scanner {
return undefined;
}
function consumeNode(node: ISyntaxNode): void {
// Should never get called.
throw Errors.invalidOperation();
}
function absolutePosition() {
return _absolutePosition;
}
@ -1561,13 +1546,20 @@ module TypeScript.Scanner {
return slidingWindow.peekItemN(n);
}
function consumeToken(token: ISyntaxToken): void {
// Debug.assert(token.fullWidth() > 0 || token.kind === SyntaxKind.EndOfFileToken);
// Debug.assert(currentToken() === token);
_absolutePosition += token.fullWidth();
slidingWindow.moveToNextItem();
function consumeNodeOrToken(nodeOrToken: ISyntaxNodeOrToken): void {
if (nodeOrToken === slidingWindow.currentItemWithoutFetching()) {
// We're consuming the token that was just fetched from us by the parser. We just
// need to move ourselves forward and ditch this token from the sliding window.
_absolutePosition += (<ISyntaxToken>nodeOrToken).fullWidth();
slidingWindow.moveToNextItem();
}
else {
// We're either consuming a node, or we're consuming a token that wasn't from our
// sliding window. Both cases happen in incremental scenarios when the incremental
// parser uses a node or token from an older tree. In that case, we simply want to
// point ourselves at the end of the element that the parser just consumed.
resetToPosition(fullEnd(nodeOrToken));
}
}
function currentToken(): ISyntaxToken {
@ -1644,15 +1636,13 @@ module TypeScript.Scanner {
currentToken: currentToken,
currentContextualToken: currentContextualToken,
peekToken: peekToken,
consumeNode: consumeNode,
consumeToken: consumeToken,
consumeNodeOrToken: consumeNodeOrToken,
getRewindPoint: getRewindPoint,
rewind: rewind,
releaseRewindPoint: releaseRewindPoint,
tokenDiagnostics: tokenDiagnostics,
release: release,
absolutePosition: absolutePosition,
resetToPosition: resetToPosition,
};
}

View File

@ -163,6 +163,14 @@ module TypeScript {
return this.window[this.currentRelativeItemIndex];
}
public currentItemWithoutFetching(): any {
if (this.currentRelativeItemIndex >= this.windowCount) {
return undefined;
}
return this.window[this.currentRelativeItemIndex];
}
public peekItemN(n: number): any {
// Assert disabled because it is actually expensive enugh to affect perf.
// Debug.assert(n >= 0);

View File

@ -38,7 +38,7 @@ module TypeScript {
module TypeScript.Syntax {
function addArrayPrototypeValue(name: string, val: any) {
if (Object.defineProperty && (<any>Array.prototype)[name] === undefined) {
Object.defineProperty(Array.prototype, name, { value: val, writable: false });
Object.defineProperty(Array.prototype, name, { value: val, writable: false, enumerable: false });
}
else {
(<any>Array.prototype)[name] = val;