Skip to content

Commit

Permalink
autocomplete draft
Browse files Browse the repository at this point in the history
  • Loading branch information
mathewpareles committed Jan 28, 2025
1 parent e4d747d commit e4bb15e
Show file tree
Hide file tree
Showing 6 changed files with 217 additions and 86 deletions.
32 changes: 24 additions & 8 deletions src/vs/platform/void/common/llmMessageTypes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,18 @@ export type _InternalLLMMessage = {
content: string;
}

type _InternalOllamaFIMMessages = {
prefix: string;
suffix: string;
stopTokens: string[];
}

type SendLLMType = {
type: 'sendLLMMessage';
messages: LLMMessage[];
} | {
type: 'ollamaFIM';
messages: {
prefix: string;
suffix: string;
}
messages: _InternalOllamaFIMMessages;
}

// service types
Expand All @@ -56,7 +58,7 @@ export type ServiceSendLLMMessageParams = {
} & SendLLMType

// params to the true sendLLMMessage function
export type SendLLMMMessageParams = {
export type SendLLMMessageParams = {
onText: OnText;
onFinalMessage: OnFinalMessage;
onError: OnError;
Expand All @@ -74,26 +76,40 @@ export type SendLLMMMessageParams = {

// can't send functions across a proxy, use listeners instead
export type BlockedMainLLMMessageParams = 'onText' | 'onFinalMessage' | 'onError' | 'abortRef'
export type MainSendLLMMessageParams = Omit<SendLLMMMessageParams, BlockedMainLLMMessageParams> & { requestId: string } & SendLLMType
export type MainSendLLMMessageParams = Omit<SendLLMMessageParams, BlockedMainLLMMessageParams> & { requestId: string } & SendLLMType

export type MainLLMMessageAbortParams = { requestId: string }

export type EventLLMMessageOnTextParams = Parameters<OnText>[0] & { requestId: string }
export type EventLLMMessageOnFinalMessageParams = Parameters<OnFinalMessage>[0] & { requestId: string }
export type EventLLMMessageOnErrorParams = Parameters<OnError>[0] & { requestId: string }


export type _InternalSendLLMMessageFnType = (
params: {
onText: OnText;
onFinalMessage: OnFinalMessage;
onError: OnError;
providerName: ProviderName;
settingsOfProvider: SettingsOfProvider;
modelName: string;
_setAborter: (aborter: () => void) => void;

messages: _InternalLLMMessage[];
}
) => void

settingsOfProvider: SettingsOfProvider;
export type _InternalOllamaFIMMessageFnType = (
params: {
onText: OnText;
onFinalMessage: OnFinalMessage;
onError: OnError;
providerName: ProviderName;
settingsOfProvider: SettingsOfProvider;
modelName: string;

_setAborter: (aborter: () => void) => void;

messages: _InternalOllamaFIMMessages;
}
) => void

Expand Down
42 changes: 40 additions & 2 deletions src/vs/platform/void/electron-main/llmMessage/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
*--------------------------------------------------------------------------------------*/

import { Ollama } from 'ollama';
import { _InternalModelListFnType, _InternalSendLLMMessageFnType, OllamaModelResponse } from '../../common/llmMessageTypes.js';
import { _InternalModelListFnType, _InternalOllamaFIMMessageFnType, _InternalSendLLMMessageFnType, OllamaModelResponse } from '../../common/llmMessageTypes.js';
import { defaultProviderSettings } from '../../common/voidSettingsTypes.js';

export const ollamaList: _InternalModelListFnType<OllamaModelResponse> = async ({ onSuccess: onSuccess_, onError: onError_, settingsOfProvider }) => {
Expand All @@ -25,7 +25,7 @@ export const ollamaList: _InternalModelListFnType<OllamaModelResponse> = async (
const ollama = new Ollama({ host: thisConfig.endpoint })
ollama.list()
.then((response) => {
console.log('MODELS!!!!!!!!!!!!!!!!!', response)
// console.log('MODELS!!!!!!!!!!!!!!!!!', response)
const { models } = response
onSuccess({ models })
})
Expand All @@ -39,6 +39,44 @@ export const ollamaList: _InternalModelListFnType<OllamaModelResponse> = async (
}


export const sendOllamaFIM: _InternalOllamaFIMMessageFnType = ({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter }) => {

const thisConfig = settingsOfProvider.ollama
// if endpoint is empty, normally ollama will send to 11434, but we want it to fail - the user should type it in
if (!thisConfig.endpoint) throw new Error(`Ollama Endpoint was empty (please enter ${defaultProviderSettings.ollama.endpoint} if you want the default).`)

let fullText = ''

const ollama = new Ollama({ host: thisConfig.endpoint })

ollama.generate({
model: modelName,
prompt: messages.prefix,
suffix: messages.suffix,
options: {
stop: messages.stopTokens,
},
raw: true,
stream: true,
// options: { num_predict: parseMaxTokensStr(thisConfig.maxTokens) } // this is max_tokens
})
.then(async stream => {
_setAborter(() => stream.abort())
// iterate through the stream
for await (const chunk of stream) {
const newText = chunk.response;
fullText += newText;
onText({ newText, fullText });
}
onFinalMessage({ fullText });
console.log('!!!!! OLLAMA RESULT', JSON.stringify(fullText))
})
// when error/fail
.catch((error) => {
onError({ message: error + '', fullError: error })
})
};


// Ollama
export const sendOllamaMsg: _InternalSendLLMMessageFnType = ({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter }) => {
Expand Down
32 changes: 16 additions & 16 deletions src/vs/platform/void/electron-main/llmMessage/sendLLMMessage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
* Licensed under the Apache License, Version 2.0. See LICENSE.txt for more information.
*--------------------------------------------------------------------------------------*/

import { SendLLMMMessageParams, OnText, OnFinalMessage, OnError, LLMMessage, _InternalLLMMessage } from '../../common/llmMessageTypes.js';
import { SendLLMMessageParams, OnText, OnFinalMessage, OnError, LLMMessage, _InternalLLMMessage } from '../../common/llmMessageTypes.js';
import { IMetricsService } from '../../common/metricsService.js';

import { sendAnthropicMsg } from './anthropic.js';
import { sendOllamaMsg } from './ollama.js';
import { sendOllamaFIM, sendOllamaMsg } from './ollama.js';
import { sendOpenAIMsg } from './openai.js';
import { sendGeminiMsg } from './gemini.js';
import { sendGroqMsg } from './groq.js';
Expand Down Expand Up @@ -59,25 +59,22 @@ export const sendLLMMessage = ({
settingsOfProvider,
providerName,
modelName,
}: SendLLMMMessageParams,
}: SendLLMMessageParams,

metricsService: IMetricsService
) => {
messages.unshift({ role: 'system', content: aiInstructions })
// messages.unshift({ role: 'system', content: aiInstructions })

const messages = type === 'sendLLMMessage' ? cleanMessages(messages_) : []


const prefixAndSuffix = type === 'ollamaFIM' ? messages_ : null
const messagesArr = type === 'sendLLMMessage' ? cleanMessages(messages_) : []

// only captures number of messages and message "shape", no actual code, instructions, prompts, etc
const captureLLMEvent = (eventId: string, extras?: object) => {
metricsService.capture(eventId, {
providerName,
modelName,
...type === 'sendLLMMessage' ? {
numMessages: messages?.length,
messagesShape: messages?.map(msg => ({ role: msg.role, length: msg.content.length })),
numMessages: messagesArr?.length,
messagesShape: messagesArr?.map(msg => ({ role: msg.role, length: msg.content.length })),
origNumMessages: messages_?.length,
origMessagesShape: messages_?.map(msg => ({ role: msg.role, length: msg.content.length })),

Expand Down Expand Up @@ -122,27 +119,30 @@ export const sendLLMMessage = ({
}
abortRef_.current = onAbort

captureLLMEvent(`${loggingName} - Sending Message`, { messageLength: messages[messages.length - 1]?.content.length })
captureLLMEvent(`${loggingName} - Sending Message`, { messageLength: messagesArr[messagesArr.length - 1]?.content.length })

try {
switch (providerName) {
case 'anthropic':
sendAnthropicMsg({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
sendAnthropicMsg({ messages: messagesArr, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
break;
case 'openAI':
case 'openRouter':
case 'deepseek':
case 'openAICompatible':
sendOpenAIMsg({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
sendOpenAIMsg({ messages: messagesArr, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
break;
case 'gemini':
sendGeminiMsg({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
sendGeminiMsg({ messages: messagesArr, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
break;
case 'ollama':
sendOllamaMsg({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
if (type === 'ollamaFIM')
sendOllamaFIM({ messages: messages_, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName })
else
sendOllamaMsg({ messages: messagesArr, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
break;
case 'groq':
sendGroqMsg({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
sendGroqMsg({ messages: messagesArr, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter, providerName });
break;
default:
onError({ message: `Error: Void provider was "${providerName}", which is not recognized.`, fullError: null })
Expand Down
4 changes: 2 additions & 2 deletions src/vs/platform/void/electron-main/llmMessageChannel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import { IServerChannel } from '../../../base/parts/ipc/common/ipc.js';
import { Emitter, Event } from '../../../base/common/event.js';
import { EventLLMMessageOnTextParams, EventLLMMessageOnErrorParams, EventLLMMessageOnFinalMessageParams, MainSendLLMMessageParams, AbortRef, SendLLMMMessageParams, MainLLMMessageAbortParams, MainModelListParams, ModelListParams, EventModelListOnSuccessParams, EventModelListOnErrorParams, OllamaModelResponse, OpenaiCompatibleModelResponse, } from '../common/llmMessageTypes.js';
import { EventLLMMessageOnTextParams, EventLLMMessageOnErrorParams, EventLLMMessageOnFinalMessageParams, MainSendLLMMessageParams, AbortRef, SendLLMMessageParams, MainLLMMessageAbortParams, MainModelListParams, ModelListParams, EventModelListOnSuccessParams, EventModelListOnErrorParams, OllamaModelResponse, OpenaiCompatibleModelResponse, } from '../common/llmMessageTypes.js';
import { sendLLMMessage } from './llmMessage/sendLLMMessage.js'
import { IMetricsService } from '../common/metricsService.js';
import { ollamaList } from './llmMessage/ollama.js';
Expand Down Expand Up @@ -97,7 +97,7 @@ export class LLMMessageChannel implements IServerChannel {
if (!(requestId in this._abortRefOfRequestId_llm))
this._abortRefOfRequestId_llm[requestId] = { current: null }

const mainThreadParams: SendLLMMMessageParams = {
const mainThreadParams: SendLLMMessageParams = {
...params,
onText: ({ newText, fullText }) => { this._onText_llm.fire({ requestId, newText, fullText }); },
onFinalMessage: ({ fullText }) => { this._onFinalMessage_llm.fire({ requestId, fullText }); },
Expand Down
Loading

0 comments on commit e4bb15e

Please sign in to comment.