Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: TextDocument and incremental sync helper #5

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 102 additions & 0 deletions example/lsp_server_example_incremental_sync.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import 'dart:io';

import 'package:collection/collection.dart';
import 'package:lsp_server/lsp_server.dart';

void main() async {
// Create a connection that can read and write data to the LSP client.
// Supply a readable and writable stream. In this case we are using stdio.
// But you could use a socket connection or any other stream.
var connection = Connection(stdin, stdout);

// Create a TextDocuments handler. This class gives support for both full
// and incremental sync. The document returned by this handler is the
// TextDocument class, which has an API that matches
// vscode-languageserver-textdocument.
var documents = TextDocuments(connection, onDidChangeContent: (params) async {
// onDidChangeContent is called both when a document is opened
// and when it changes. It's a great place to run diagnostics.
var diagnostics = _validateTextDocument(
params.document.getText(),
params.document.uri.toString(),
);

// Send back an event notifying the client of issues we want them to render.
// To clear issues the server is responsible for sending an empty list.
connection.sendDiagnostics(
PublishDiagnosticsParams(
diagnostics: diagnostics,
uri: params.document.uri,
),
);
});

// Register a listener for when the client initialzes the server.
// You are suppose to respond with the capabilities of the server.
// Some capabilities must be enabled by the client, you can see what the client
// supports by inspecting the ClientCapabilities object, inside InitializeParams.
connection.onInitialize((params) async {
return InitializeResult(
capabilities: ServerCapabilities(
// In this example we are using the Incremental sync mode. This means
// only the content that has changed is sent, and it's up to the server
// to update its state accordingly. TextDocuments and TextDocument
// handle this for you.
textDocumentSync: const Either2.t1(TextDocumentSyncKind.Incremental),
// Tell the client what we can do
diagnosticProvider: Either2.t1(DiagnosticOptions(
interFileDependencies: true, workspaceDiagnostics: false)),
hoverProvider: Either2.t1(true),
),
);
});

// Your other listeners likely want to get the synced TextDocument based
// on the params' TextDocumentIdentifier.
connection.onHover((params) async {
var textDocument = documents.get(params.textDocument.uri);
var lines = textDocument?.lineCount ?? 0;
return Hover(contents: Either2.t2('Document has $lines lines'));
});

await connection.listen();
}

// Validate the text document and return a list of diagnostics.
// Will find each occurence of more than two uppercase letters in a row.
// Each reported value will come with the indexed location in the file,
// by line and column.
List<Diagnostic> _validateTextDocument(String text, String sourcePath) {
RegExp pattern = RegExp(r'\b[A-Z]{2,}\b');

final lines = text.split('\n');

final matches = lines.map((line) => pattern.allMatches(line));

final diagnostics = matches
.mapIndexed(
(line, lineMatches) => _convertPatternToDiagnostic(lineMatches, line),
)
.reduce((aggregate, diagnostics) => [...aggregate, ...diagnostics])
.toList();

return diagnostics;
}

// Convert each line that has uppercase strings into a list of diagnostics.
// The line "AAA bbb CCC" would be converted into two diagnostics:
// One for "AAA".
// One for "CCC".
Iterable<Diagnostic> _convertPatternToDiagnostic(
Iterable<RegExpMatch> matches, int line) {
return matches.map(
(match) => Diagnostic(
message:
'${match.input.substring(match.start, match.end)} is all uppercase.',
range: Range(
start: Position(character: match.start, line: line),
end: Position(character: match.end, line: line),
),
),
);
}
2 changes: 2 additions & 0 deletions lib/lsp_server.dart
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
export 'src/lsp_server_base.dart';
export 'src/text_document.dart';
export 'src/text_documents.dart';
export 'src/protocol/lsp_protocol/protocol_generated.dart';
export 'src/protocol/lsp_protocol/protocol_special.dart';
236 changes: 236 additions & 0 deletions lib/src/text_document.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,236 @@
import 'dart:math';

import 'package:lsp_server/lsp_server.dart';

// \n
const lineFeed = 10;
// \r
const carriageReturn = 13;

/// Mimics vscode-languageserver-node's
/// [TextDocument](https://github.com/microsoft/vscode-languageserver-node/blob/main/textDocument/src/main.ts)
class TextDocument {
final Uri _uri;
final String _languageId;
int _version;
String _content;
List<int>? _lineOffsets;

TextDocument(this._uri, this._languageId, this._version, this._content);

/// The associated URI for this document. Most documents have the file scheme, indicating that they
/// represent files on disk. However, some documents may have other schemes indicating that they
/// are not available on disk.
Uri get uri => _uri;

/// The identifier of the language associated with this document.
String get languageId => _languageId;

/// The version number of this document (it will increase after each change,
/// including undo/redo).
int get version => _version;

/// The number of lines in this document.
int get lineCount => _getLineOffsets().length;

String applyEdits(List<TextEdit> edits) {
var sortedEdits = edits.map(_getWellformedTextEdit).toList();
sortedEdits.sort((a, b) {
var diff = a.range.start.line - b.range.start.line;
if (diff == 0) {
return a.range.start.character - b.range.start.character;
}
return diff;
});

var text = getText();
var lastModifiedOffset = 0;
List<String> spans = [];

for (var edit in sortedEdits) {
var startOffset = offsetAt(edit.range.start);
if (startOffset < lastModifiedOffset) {
throw 'Overlapping edit';
} else if (startOffset > lastModifiedOffset) {
spans.add(text.substring(lastModifiedOffset, startOffset));
}
if (edit.newText.isNotEmpty) {
spans.add(edit.newText);
}
lastModifiedOffset = offsetAt(edit.range.end);
}
spans.add(text.substring(lastModifiedOffset));
return spans.join();
}

/// Get the text of this document. Provide a [Range] to get a substring.
String getText({Range? range}) {
if (range != null) {
var start = offsetAt(range.start);
var end = offsetAt(range.end);
return _content.substring(start, end);
}
return _content;
}

/// Convert a [Position] to a zero-based offset.
int offsetAt(Position position) {
var lineOffsets = _getLineOffsets();
if (position.line >= lineOffsets.length) {
return _content.length;
} else if (position.line < 0) {
return 0;
}

var lineOffset = lineOffsets[position.line];
if (position.character <= 0) {
return lineOffset;
}

var nextLineOffset = (position.line + 1 < lineOffsets.length)
? lineOffsets[position.line + 1]
: _content.length;
var offset = min(lineOffset + position.character, nextLineOffset);

return _ensureBeforeEndOfLine(offset: offset, lineOffset: lineOffset);
}

/// Converts a zero-based offset to a [Position].
Position positionAt(int offset) {
offset = max(min(offset, _content.length), 0);
var lineOffsets = _getLineOffsets();
var low = 0;
var high = lineOffsets.length;
if (high == 0) {
return Position(character: offset, line: 0);
}

while (low < high) {
var mid = ((low + high) / 2).floor();
if (lineOffsets[mid] > offset) {
high = mid;
} else {
low = mid + 1;
}
}

var line = low - 1;
offset = _ensureBeforeEndOfLine(
offset: offset,
lineOffset: lineOffsets[line],
);

return Position(character: offset - lineOffsets[line], line: line);
}

/// Updates this text document by modifying its content.
void update(List<TextDocumentContentChangeEvent> changes, int version) {
_version = version;
for (var c in changes) {
var change = c.map((v) => v, (v) => v);
if (change is TextDocumentContentChangeEvent1) {
// Incremental sync.
var range = _getWellformedRange(change.range);
var text = change.text;

var startOffset = offsetAt(range.start);
var endOffset = offsetAt(range.end);

// Update content.
_content = _content.substring(0, startOffset) +
text +
_content.substring(endOffset, _content.length);

// Update offsets without recomputing for the whole document.
var startLine = max(range.start.line, 0);
var endLine = max(range.end.line, 0);
var lineOffsets = _lineOffsets!;
var addedLineOffsets = _computeLineOffsets(text,
isAtLineStart: false, textOffset: startOffset);

if (endLine - startLine == addedLineOffsets.length) {
for (var i = 0, len = addedLineOffsets.length; i < len; i++) {
lineOffsets[i + startLine + 1] = addedLineOffsets[i];
}
} else {
// Avoid going outside the range on weird range inputs.
lineOffsets.replaceRange(
min(startLine + 1, lineOffsets.length),
min(endLine + 1, lineOffsets.length),
addedLineOffsets,
);
}

var diff = text.length - (endOffset - startOffset);
if (diff != 0) {
for (var i = startLine + 1 + addedLineOffsets.length,
len = lineOffsets.length;
i < len;
i++) {
lineOffsets[i] = lineOffsets[i] + diff;
}
}
} else if (change is TextDocumentContentChangeEvent2) {
// Full sync.
_content = change.text;
_lineOffsets = null;
}
}
}

List<int> _getLineOffsets() {
_lineOffsets ??= _computeLineOffsets(_content, isAtLineStart: true);
return _lineOffsets!;
}

List<int> _computeLineOffsets(String content,
{required bool isAtLineStart, int textOffset = 0}) {
List<int> result = isAtLineStart ? [textOffset] : [];

for (var i = 0; i < content.length; i++) {
var char = content.codeUnitAt(i);
if (_isEndOfLine(char)) {
if (char == carriageReturn) {
var nextCharIsLineFeed =
i + 1 < content.length && content.codeUnitAt(i + 1) == lineFeed;
if (nextCharIsLineFeed) {
i++;
}
}
result.add(textOffset + i + 1);
}
}

return result;
}

bool _isEndOfLine(int char) {
return char == lineFeed || char == carriageReturn;
}

int _ensureBeforeEndOfLine({required int offset, required int lineOffset}) {
while (
offset > lineOffset && _isEndOfLine(_content.codeUnitAt(offset - 1))) {
offset--;
}
return offset;
}

Range _getWellformedRange(Range range) {
var start = range.start;
var end = range.end;
if (start.line > end.line ||
(start.line == end.line && start.character > end.character)) {
return Range(start: end, end: start);
}
return range;
}

TextEdit _getWellformedTextEdit(TextEdit textEdit) {
var range = _getWellformedRange(textEdit.range);
if (range != textEdit.range) {
return TextEdit(newText: textEdit.newText, range: range);
}
return textEdit;
}
}
Loading