more API notes (#251215)

* more API notes

https://github.com/microsoft/vscode/issues/250007

* fix compile error
This commit is contained in:
Johannes Rieken
2025-06-12 11:16:54 +02:00
committed by GitHub
parent 7443076975
commit cfa7c4d2ef
3 changed files with 91 additions and 8 deletions

View File

@@ -7,7 +7,7 @@
{
"kind": 2,
"language": "github-issues",
"value": "$REPO=repo:microsoft/vscode\n$MILESTONE=milestone:\"May 2025\""
"value": "$REPO=repo:microsoft/vscode\n$MILESTONE=milestone:\"June 2025\""
},
{
"kind": 1,

View File

@@ -14,7 +14,7 @@ declare module 'vscode' {
// TODO@API name scheme
export interface LanguageModelChatResponseOptions {
export interface LanguageModelChatRequestHandleOptions {
// initiator
readonly extensionId: string;
@@ -44,17 +44,99 @@ declare module 'vscode' {
toolMode?: LanguageModelChatToolMode;
}
export interface LanguageModelChatData {
// like ChatResponseProviderMetadata
// TODO@API names: LanguageModelChatMetadata, LanguageModelChatItem
export interface LanguageModelChatInformation {
// TODO@API IMPLICT from package-json registration
// readonly vendor: string;
readonly id: string;
/**
* Human-readable name of the language model.
*/
readonly name: string;
/**
* Opaque family-name of the language model. Values might be `gpt-3.5-turbo`, `gpt4`, `phi2`, or `llama`
* but they are defined by extensions contributing languages and subject to change.
*/
readonly family: string;
/**
* An optional, human-readable description of the language model.
*/
readonly description?: string;
/**
* An optional, human-readable string representing the cost of using the language model.
*/
readonly cost?: string;
/**
* Opaque version string of the model. This is defined by the extension contributing the language model
* and subject to change while the identifier is stable.
*/
readonly version: string;
readonly maxInputTokens: number;
readonly maxOutputTokens: number;
/**
* When present, this gates the use of `requestLanguageModelAccess` behind an authorization flow where
* the user must approve of another extension accessing the models contributed by this extension.
* Additionally, the extension can provide a label that will be shown in the UI.
*/
auth?: true | { label: string };
// TODO@API maybe an enum, LanguageModelChatProviderPickerAvailability?
// TODO@API isPreselected proposed
readonly isDefault?: boolean;
// TODO@API nuke
readonly isUserSelectable?: boolean;
readonly capabilities?: {
// TODO@API have mimeTypes that you support
readonly vision?: boolean;
// TODO@API should be `boolean | number` so extensions can express how many tools they support
readonly toolCalling?: boolean | number;
// TODO@API DO NOT SUPPORT THIS
// readonly agentMode?: boolean;
// TODO@API support prompt TSX style messages, MAYBE leave it out for now
readonly promptTsx?: boolean;
};
/**
* Optional category to group models by in the model picker.
* The lower the order, the higher the category appears in the list.
* Has no effect if `isUserSelectable` is `false`.
* If not specified, the model will appear in the "Other Models" category.
*/
readonly category?: { label: string; order: number };
}
export interface LanguageModelChatProvider2 {
export interface LanguageModelChatProvider2<T extends LanguageModelChatInformation = LanguageModelChatInformation> {
provideLanguageModelChatData(options: { force: boolean }, token: CancellationToken): ProviderResult<LanguageModelChatData[]>;
// signals a change from the provider to the editor so that prepareLanguageModelChat is called again
onDidChange?: Event<void>;
provideResponse(model: LanguageModelChatData, messages: Array<LanguageModelChatMessage | LanguageModelChatMessage2>, options: LanguageModelChatResponseOptions, progress: Progress<LanguageModelTextPart | LanguageModelToolCallPart>, token: CancellationToken): Thenable<any>;
// NOT cacheable (between reloads)
prepareLanguageModelChat(options: { silent: boolean }, token: CancellationToken): ProviderResult<T[]>;
provideTokenCount(model: LanguageModelChatData, text: string | LanguageModelChatMessage | LanguageModelChatMessage2, token: CancellationToken): Thenable<number>;
provideLanguageModelChatResponse(model: T, messages: Array<LanguageModelChatMessage | LanguageModelChatMessage2>, options: LanguageModelChatRequestHandleOptions, progress: Progress<LanguageModelTextPart | LanguageModelToolCallPart>, token: CancellationToken): Thenable<any>;
provideTokenCount(model: T, text: string | LanguageModelChatMessage | LanguageModelChatMessage2, token: CancellationToken): Thenable<number>;
}
export namespace lm {
//
// export function registerChatModelProvider(vendor: string, provider: LanguageModelChatProvider2): Disposable;
}

View File

@@ -68,6 +68,7 @@ declare module 'vscode' {
* @param data Binary image data
* @param mimeType The MIME type of the image
*/
// TODO@API just use string, no enum required
static image(data: Uint8Array, mimeType: ChatImageMimeType): LanguageModelDataPart;
static json(value: any, mime?: string): LanguageModelDataPart;