Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 37 additions & 1 deletion internal/ast/ast.go
Original file line number Diff line number Diff line change
Expand Up @@ -10781,6 +10781,7 @@ type SourceFile struct {

tokenCacheMu sync.Mutex
tokenCache map[core.TextRange]*Node
tokenFactory *NodeFactory
declarationMapMu sync.Mutex
declarationMap map[string][]*Node
}
Expand Down Expand Up @@ -10942,6 +10943,7 @@ func (node *SourceFile) GetOrCreateToken(
pos int,
end int,
parent *Node,
flags TokenFlags,
) *TokenNode {
node.tokenCacheMu.Lock()
defer node.tokenCacheMu.Unlock()
Expand All @@ -10959,13 +10961,47 @@ func (node *SourceFile) GetOrCreateToken(
return token
}

token := newNode(kind, &Token{}, NodeFactoryHooks{})
token := createToken(kind, node, pos, end, flags)
token.Loc = loc
token.Parent = parent
node.tokenCache[loc] = token
return token
}

// `kind` should be a token kind.
func createToken(kind Kind, file *SourceFile, pos, end int, flags TokenFlags) *Node {
if file.tokenFactory == nil {
file.tokenFactory = NewNodeFactory(NodeFactoryHooks{})
}
text := file.text[pos:end]
switch kind {
case KindNumericLiteral:
return file.tokenFactory.NewNumericLiteral(text)
case KindBigIntLiteral:
return file.tokenFactory.NewBigIntLiteral(text)
case KindStringLiteral:
return file.tokenFactory.NewStringLiteral(text)
case KindJsxText, KindJsxTextAllWhiteSpaces:
return file.tokenFactory.NewJsxText(text, kind == KindJsxTextAllWhiteSpaces)
case KindRegularExpressionLiteral:
return file.tokenFactory.NewRegularExpressionLiteral(text)
case KindNoSubstitutionTemplateLiteral:
return file.tokenFactory.NewNoSubstitutionTemplateLiteral(text)
Copy link

Copilot AI Dec 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The TokenFlags are not being set for these literal types, even though they all have LiteralLikeBase (or TemplateLiteralLikeBase) which includes a TokenFlags field. This is critical because these flags carry important information like whether a literal is unterminated (see TokenFlagsUnterminated and IsUnterminatedLiteral in utilities.go), which is exactly what this PR is trying to fix.

The flags need to be set after creating these nodes. For example:

case KindStringLiteral:
	node := file.tokenFactory.NewStringLiteral(text)
	node.AsStringLiteral().TokenFlags = flags
	return node
case KindNumericLiteral:
	node := file.tokenFactory.NewNumericLiteral(text)
	node.AsNumericLiteral().TokenFlags = flags
	return node
case KindBigIntLiteral:
	node := file.tokenFactory.NewBigIntLiteral(text)
	node.AsBigIntLiteral().TokenFlags = flags
	return node
case KindJsxText, KindJsxTextAllWhiteSpaces:
	node := file.tokenFactory.NewJsxText(text, kind == KindJsxTextAllWhiteSpaces)
	node.AsJsxText().TokenFlags = flags
	return node
case KindRegularExpressionLiteral:
	node := file.tokenFactory.NewRegularExpressionLiteral(text)
	node.AsRegularExpressionLiteral().TokenFlags = flags
	return node
case KindNoSubstitutionTemplateLiteral:
	node := file.tokenFactory.NewNoSubstitutionTemplateLiteral(text)
	node.AsNoSubstitutionTemplateLiteral().TemplateFlags = flags
	return node

Note that for NoSubstitutionTemplateLiteral, it should set TemplateFlags (not the inherited TokenFlags from LiteralLikeBase) since IsUnterminatedLiteral checks TemplateFlags for template literal kinds.

Suggested change
return file.tokenFactory.NewNumericLiteral(text)
case KindBigIntLiteral:
return file.tokenFactory.NewBigIntLiteral(text)
case KindStringLiteral:
return file.tokenFactory.NewStringLiteral(text)
case KindJsxText, KindJsxTextAllWhiteSpaces:
return file.tokenFactory.NewJsxText(text, kind == KindJsxTextAllWhiteSpaces)
case KindRegularExpressionLiteral:
return file.tokenFactory.NewRegularExpressionLiteral(text)
case KindNoSubstitutionTemplateLiteral:
return file.tokenFactory.NewNoSubstitutionTemplateLiteral(text)
node := file.tokenFactory.NewNumericLiteral(text)
node.AsNumericLiteral().TokenFlags = flags
return node
case KindBigIntLiteral:
node := file.tokenFactory.NewBigIntLiteral(text)
node.AsBigIntLiteral().TokenFlags = flags
return node
case KindStringLiteral:
node := file.tokenFactory.NewStringLiteral(text)
node.AsStringLiteral().TokenFlags = flags
return node
case KindJsxText, KindJsxTextAllWhiteSpaces:
node := file.tokenFactory.NewJsxText(text, kind == KindJsxTextAllWhiteSpaces)
node.AsJsxText().TokenFlags = flags
return node
case KindRegularExpressionLiteral:
node := file.tokenFactory.NewRegularExpressionLiteral(text)
node.AsRegularExpressionLiteral().TokenFlags = flags
return node
case KindNoSubstitutionTemplateLiteral:
node := file.tokenFactory.NewNoSubstitutionTemplateLiteral(text)
node.AsNoSubstitutionTemplateLiteral().TemplateFlags = flags
return node

Copilot uses AI. Check for mistakes.
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is unfortunately correct, and it's very annoying e.g. NewStringLiteral doesn't take the flags because I keep forgetting about it.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should it?

This code is also sort of the same as parseLiteralExpression which does sort of make it seem like this is all sort of error prone, yeah.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it should in general, as there's code in the LS that uses token flags, and code in the printer that also uses token flags, but there's tons of existing places where we don't set flags right now. I'll try and refactor that in another PR tomorrow.

case KindTemplateHead:
return file.tokenFactory.NewTemplateHead(text, "" /*rawText*/, flags)
case KindTemplateMiddle:
return file.tokenFactory.NewTemplateMiddle(text, "" /*rawText*/, flags)
case KindTemplateTail:
return file.tokenFactory.NewTemplateTail(text, "" /*rawText*/, flags)
case KindIdentifier:
return file.tokenFactory.NewIdentifier(text)
case KindPrivateIdentifier:
return file.tokenFactory.NewPrivateIdentifier(text)
default: // Punctuation and keywords
return file.tokenFactory.NewToken(kind)
}
}

func IsSourceFile(node *Node) bool {
return node.Kind == KindSourceFile
}
Expand Down
20 changes: 13 additions & 7 deletions internal/astnav/tokens.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,17 +187,18 @@ func getTokenAtPosition(
tokenFullStart := scanner.TokenFullStart()
tokenStart := core.IfElse(allowPositionInLeadingTrivia, tokenFullStart, scanner.TokenStart())
tokenEnd := scanner.TokenEnd()
flags := scanner.TokenFlags()
if tokenStart <= position && (position < tokenEnd) {
if token == ast.KindIdentifier || !ast.IsTokenKind(token) {
if ast.IsJSDocKind(current.Kind) {
return current
}
panic(fmt.Sprintf("did not expect %s to have %s in its trivia", current.Kind.String(), token.String()))
}
return sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, current)
return sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, current, flags)
}
if includePrecedingTokenAtEndPosition != nil && tokenEnd == position {
prevToken := sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, current)
prevToken := sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, current, flags)
if includePrecedingTokenAtEndPosition(prevToken) {
return prevToken
}
Expand Down Expand Up @@ -514,7 +515,8 @@ func findRightmostValidToken(endPos int, sourceFile *ast.SourceFile, containingN
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
startPos = tokenEnd
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, n))
flags := scanner.TokenFlags()
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, n, flags))
scanner.Scan()
}
startPos = visitedNode.End()
Expand All @@ -531,7 +533,8 @@ func findRightmostValidToken(endPos int, sourceFile *ast.SourceFile, containingN
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
startPos = tokenEnd
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, n))
flags := scanner.TokenFlags()
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, n, flags))
scanner.Scan()
}

Expand Down Expand Up @@ -616,8 +619,9 @@ func FindNextToken(previousToken *ast.Node, parent *ast.Node, file *ast.SourceFi
tokenFullStart := scanner.TokenFullStart()
tokenStart := scanner.TokenStart()
tokenEnd := scanner.TokenEnd()
flags := scanner.TokenFlags()
if tokenStart == previousToken.End() {
return file.GetOrCreateToken(token, tokenFullStart, tokenEnd, n)
return file.GetOrCreateToken(token, tokenFullStart, tokenEnd, n, flags)
}
panic(fmt.Sprintf("Expected to find next token at %d, got token %s at %d", previousToken.End(), token, tokenStart))
}
Expand Down Expand Up @@ -690,7 +694,8 @@ func FindChildOfKind(containingNode *ast.Node, kind ast.Kind, sourceFile *ast.So
tokenKind := scan.Token()
tokenFullStart := scan.TokenFullStart()
tokenEnd := scan.TokenEnd()
token := sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, containingNode)
flags := scan.TokenFlags()
token := sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, containingNode, flags)
if tokenKind == kind {
foundChild = token
return true
Expand Down Expand Up @@ -720,7 +725,8 @@ func FindChildOfKind(containingNode *ast.Node, kind ast.Kind, sourceFile *ast.So
tokenKind := scan.Token()
tokenFullStart := scan.TokenFullStart()
tokenEnd := scan.TokenEnd()
token := sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, containingNode)
flags := scan.TokenFlags()
token := sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, containingNode, flags)
if tokenKind == kind {
return token
}
Expand Down
25 changes: 25 additions & 0 deletions internal/fourslash/tests/completionsUnterminatedLiteral_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
package fourslash_test

import (
"testing"

"github.com/microsoft/typescript-go/internal/fourslash"
. "github.com/microsoft/typescript-go/internal/fourslash/tests/util"
"github.com/microsoft/typescript-go/internal/testutil"
)

func TestCompletionsUnterminatedLiteral(t *testing.T) {
t.Parallel()

defer testutil.RecoverAndFail(t, "Panic on fourslash test")
const content = `// @noLib: true
function foo(a"/*1*/`
f, done := fourslash.NewFourslash(t, nil /*capabilities*/, content)
defer done()
f.VerifyCompletions(t, "1", &fourslash.CompletionsExpectedList{
ItemDefaults: &fourslash.CompletionsExpectedItemDefaults{
CommitCharacters: &DefaultCommitCharacters,
},
Items: &fourslash.CompletionsExpectedItems{},
})
}
6 changes: 3 additions & 3 deletions internal/ls/change/tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func (t *Tracker) InsertNodeBefore(sourceFile *ast.SourceFile, before *ast.Node,
// InsertModifierBefore inserts a modifier token (like 'type') before a node with a trailing space.
func (t *Tracker) InsertModifierBefore(sourceFile *ast.SourceFile, modifier ast.Kind, before *ast.Node) {
pos := astnav.GetStartOfNode(before, sourceFile, false)
token := sourceFile.GetOrCreateToken(modifier, pos, pos, before.Parent)
token := sourceFile.GetOrCreateToken(modifier, pos, pos, before.Parent, ast.TokenFlagsNone)
t.InsertNodeAt(sourceFile, core.TextPos(pos), token, NodeOptions{Suffix: " "})
}

Expand Down Expand Up @@ -262,7 +262,7 @@ func (t *Tracker) endPosForInsertNodeAfter(sourceFile *ast.SourceFile, after *as
endPos := t.converters.PositionToLineAndCharacter(sourceFile, core.TextPos(after.End()))
t.ReplaceRange(sourceFile,
lsproto.Range{Start: endPos, End: endPos},
sourceFile.GetOrCreateToken(ast.KindSemicolonToken, after.End(), after.End(), after.Parent),
sourceFile.GetOrCreateToken(ast.KindSemicolonToken, after.End(), after.End(), after.Parent, ast.TokenFlagsNone),
NodeOptions{},
)
}
Expand Down Expand Up @@ -347,7 +347,7 @@ func (t *Tracker) InsertNodeInListAfter(sourceFile *ast.SourceFile, after *ast.N

// insert separator immediately following the 'after' node to preserve comments in trailing trivia
// !!! formatcontext
t.ReplaceRange(sourceFile, lsproto.Range{Start: end, End: end}, sourceFile.GetOrCreateToken(separator, after.End(), after.End()+len(separatorString), after.Parent), NodeOptions{})
t.ReplaceRange(sourceFile, lsproto.Range{Start: end, End: end}, sourceFile.GetOrCreateToken(separator, after.End(), after.End()+len(separatorString), after.Parent, ast.TokenFlagsNone), NodeOptions{})
Copy link

Copilot AI Dec 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This creates a synthetic token with end position after.End()+len(separatorString), which extends beyond the actual text currently in the source file. The new createToken function (added in this PR at internal/ast/ast.go line 10976) performs text := file.text[pos:end], which will panic with "slice bounds out of range" when end > len(file.text).

To fix this, createToken needs to handle cases where positions extend beyond the source text. Add bounds checking:

func createToken(kind Kind, file *SourceFile, pos, end int, flags TokenFlags) *Node {
	if file.tokenFactory == nil {
		file.tokenFactory = NewNodeFactory(NodeFactoryHooks{})
	}
	var text string
	if pos >= 0 && end <= len(file.text) && pos <= end {
		text = file.text[pos:end]
	}
	// For punctuation/keywords (default case), text isn't used anyway
	// For literals/identifiers created with invalid bounds, this is a programming error
	// that should be caught, but we can default to empty text for robustness
	switch kind {
	// ... rest of the cases
	}
}

Copilot uses AI. Check for mistakes.
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I noticed this too, will follow up on it in a separate PR: the change tracker should not be calling file.GetOrCreateToken() at all to create synthetic tokens.

// use the same indentation as 'after' item
indentation := format.FindFirstNonWhitespaceColumn(afterStartLinePosition, afterStart, sourceFile, t.formatSettings)
// insert element before the line break on the line that contains 'after' element
Expand Down
4 changes: 2 additions & 2 deletions internal/ls/lsutil/children.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func GetLastChild(node *ast.Node, sourceFile *ast.SourceFile) *ast.Node {
tokenKind := scanner.Token()
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
lastToken = sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, node)
lastToken = sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, node, scanner.TokenFlags())
startPos = tokenEnd
scanner.Scan()
}
Expand Down Expand Up @@ -108,7 +108,7 @@ func GetFirstToken(node *ast.Node, sourceFile *ast.SourceFile) *ast.Node {
tokenKind := scanner.Token()
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
firstToken = sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, node)
firstToken = sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, node, scanner.TokenFlags())
}

if firstToken != nil {
Expand Down
2 changes: 1 addition & 1 deletion internal/ls/signaturehelp.go
Original file line number Diff line number Diff line change
Expand Up @@ -1231,7 +1231,7 @@ func getTokenFromNodeList(nodeList *ast.NodeList, nodeListParent *ast.Node, sour
token := scanner.Token()
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, nodeListParent))
tokens = append(tokens, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, nodeListParent, scanner.TokenFlags()))
left = tokenEnd
}
}
Expand Down
6 changes: 3 additions & 3 deletions internal/ls/utilities.go
Original file line number Diff line number Diff line change
Expand Up @@ -752,7 +752,7 @@ func nodeEndsWith(n *ast.Node, expectedLastToken ast.Kind, sourceFile *ast.Sourc
tokenKind := scanner.Token()
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
token := sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, n)
token := sourceFile.GetOrCreateToken(tokenKind, tokenFullStart, tokenEnd, n, scanner.TokenFlags())
lastNodeAndTokens = append(lastNodeAndTokens, token)
startPos = tokenEnd
scanner.Scan()
Expand Down Expand Up @@ -1571,7 +1571,7 @@ func getChildrenFromNonJSDocNode(node *ast.Node, sourceFile *ast.SourceFile) []*
token := scanner.Token()
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
children = append(children, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, node))
children = append(children, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, node, scanner.TokenFlags()))
pos = tokenEnd
scanner.Scan()
}
Expand All @@ -1583,7 +1583,7 @@ func getChildrenFromNonJSDocNode(node *ast.Node, sourceFile *ast.SourceFile) []*
token := scanner.Token()
tokenFullStart := scanner.TokenFullStart()
tokenEnd := scanner.TokenEnd()
children = append(children, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, node))
children = append(children, sourceFile.GetOrCreateToken(token, tokenFullStart, tokenEnd, node, scanner.TokenFlags()))
pos = tokenEnd
scanner.Scan()
}
Expand Down