Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main'
Browse files Browse the repository at this point in the history
  • Loading branch information
actions-user committed Jan 22, 2025
2 parents 406834d + 3c56346 commit e61028b
Show file tree
Hide file tree
Showing 16 changed files with 1,154 additions and 223 deletions.
24 changes: 19 additions & 5 deletions app/components/chat/AssistantMessage.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import { memo } from 'react';
import { Markdown } from './Markdown';
import type { JSONValue } from 'ai';
import type { ProgressAnnotation } from '~/types/context';
import Popover from '~/components/ui/Popover';

interface AssistantMessageProps {
content: string;
Expand All @@ -10,7 +12,12 @@ interface AssistantMessageProps {
export const AssistantMessage = memo(({ content, annotations }: AssistantMessageProps) => {
const filteredAnnotations = (annotations?.filter(
(annotation: JSONValue) => annotation && typeof annotation === 'object' && Object.keys(annotation).includes('type'),
) || []) as { type: string; value: any }[];
) || []) as { type: string; value: any } & { [key: string]: any }[];

let progressAnnotation: ProgressAnnotation[] = filteredAnnotations.filter(
(annotation) => annotation.type === 'progress',
) as ProgressAnnotation[];
progressAnnotation = progressAnnotation.sort((a, b) => b.value - a.value);

const usage: {
completionTokens: number;
Expand All @@ -20,11 +27,18 @@ export const AssistantMessage = memo(({ content, annotations }: AssistantMessage

return (
<div className="overflow-hidden w-full">
{usage && (
<div className="text-sm text-bolt-elements-textSecondary mb-2">
Tokens: {usage.totalTokens} (prompt: {usage.promptTokens}, completion: {usage.completionTokens})
<>
<div className=" flex gap-2 items-center text-sm text-bolt-elements-textSecondary mb-2">
{progressAnnotation.length > 0 && (
<Popover trigger={<div className="i-ph:info" />}>{progressAnnotation[0].message}</Popover>
)}
{usage && (
<div>
Tokens: {usage.totalTokens} (prompt: {usage.promptTokens}, completion: {usage.completionTokens})
</div>
)}
</div>
)}
</>
<Markdown html>{content}</Markdown>
</div>
);
Expand Down
20 changes: 20 additions & 0 deletions app/components/ui/Popover.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import * as Popover from '@radix-ui/react-popover';
import type { PropsWithChildren, ReactNode } from 'react';

export default ({ children, trigger }: PropsWithChildren<{ trigger: ReactNode }>) => (
<Popover.Root>
<Popover.Trigger asChild>{trigger}</Popover.Trigger>
<Popover.Anchor />
<Popover.Portal>
<Popover.Content
sideOffset={10}
side="top"
align="center"
className="bg-bolt-elements-background-depth-2 text-bolt-elements-item-contentAccent p-2 rounded-md shadow-xl z-workbench"
>
{children}
<Popover.Arrow className="bg-bolt-elements-item-background-depth-2" />
</Popover.Content>
</Popover.Portal>
</Popover.Root>
);
4 changes: 2 additions & 2 deletions app/entry.server.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import type { AppLoadContext, EntryContext } from '@remix-run/cloudflare';
import type { AppLoadContext } from '@remix-run/cloudflare';
import { RemixServer } from '@remix-run/react';
import { isbot } from 'isbot';
import { renderToReadableStream } from 'react-dom/server';
Expand All @@ -10,7 +10,7 @@ export default async function handleRequest(
request: Request,
responseStatusCode: number,
responseHeaders: Headers,
remixContext: EntryContext,
remixContext: any,
_loadContext: AppLoadContext,
) {
// await initializeModelList({});
Expand Down
33 changes: 33 additions & 0 deletions app/lib/.server/llm/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,36 @@ export const MAX_TOKENS = 8000;

// limits the number of model responses that can be returned in a single request
export const MAX_RESPONSE_SEGMENTS = 2;

export interface File {
type: 'file';
content: string;
isBinary: boolean;
}

export interface Folder {
type: 'folder';
}

type Dirent = File | Folder;

export type FileMap = Record<string, Dirent | undefined>;

export const IGNORE_PATTERNS = [
'node_modules/**',
'.git/**',
'dist/**',
'build/**',
'.next/**',
'coverage/**',
'.cache/**',
'.vscode/**',
'.idea/**',
'**/*.log',
'**/.DS_Store',
'**/npm-debug.log*',
'**/yarn-debug.log*',
'**/yarn-error.log*',
'**/*lock.json',
'**/*lock.yml',
];
138 changes: 138 additions & 0 deletions app/lib/.server/llm/create-summary.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
import { generateText, type CoreTool, type GenerateTextResult, type Message } from 'ai';
import type { IProviderSetting } from '~/types/model';
import { DEFAULT_MODEL, DEFAULT_PROVIDER, PROVIDER_LIST } from '~/utils/constants';
import { extractCurrentContext, extractPropertiesFromMessage, simplifyBoltActions } from './utils';
import { createScopedLogger } from '~/utils/logger';
import { LLMManager } from '~/lib/modules/llm/manager';

const logger = createScopedLogger('create-summary');

export async function createSummary(props: {
messages: Message[];
env?: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
promptId?: string;
contextOptimization?: boolean;
onFinish?: (resp: GenerateTextResult<Record<string, CoreTool<any, any>>, never>) => void;
}) {
const { messages, env: serverEnv, apiKeys, providerSettings, contextOptimization, onFinish } = props;
let currentModel = DEFAULT_MODEL;
let currentProvider = DEFAULT_PROVIDER.name;
const processedMessages = messages.map((message) => {
if (message.role === 'user') {
const { model, provider, content } = extractPropertiesFromMessage(message);
currentModel = model;
currentProvider = provider;

return { ...message, content };
} else if (message.role == 'assistant') {
let content = message.content;

if (contextOptimization) {
content = simplifyBoltActions(content);
}

return { ...message, content };
}

return message;
});

const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
const staticModels = LLMManager.getInstance().getStaticModelListFromProvider(provider);
let modelDetails = staticModels.find((m) => m.name === currentModel);

if (!modelDetails) {
const modelsList = [
...(provider.staticModels || []),
...(await LLMManager.getInstance().getModelListFromProvider(provider, {
apiKeys,
providerSettings,
serverEnv: serverEnv as any,
})),
];

if (!modelsList.length) {
throw new Error(`No models found for provider ${provider.name}`);
}

modelDetails = modelsList.find((m) => m.name === currentModel);

if (!modelDetails) {
// Fallback to first model
logger.warn(
`MODEL [${currentModel}] not found in provider [${provider.name}]. Falling back to first model. ${modelsList[0].name}`,
);
modelDetails = modelsList[0];
}
}

let slicedMessages = processedMessages;
const { summary } = extractCurrentContext(processedMessages);
let summaryText: string | undefined = undefined;
let chatId: string | undefined = undefined;

if (summary && summary.type === 'chatSummary') {
chatId = summary.chatId;
summaryText = `Below is the Chat Summary till now, this is chat summary before the conversation provided by the user
you should also use this as historical message while providing the response to the user.
${summary.summary}`;

if (chatId) {
let index = 0;

for (let i = 0; i < processedMessages.length; i++) {
if (processedMessages[i].id === chatId) {
index = i;
break;
}
}
slicedMessages = processedMessages.slice(index + 1);
}
}

const extractTextContent = (message: Message) =>
Array.isArray(message.content)
? (message.content.find((item) => item.type === 'text')?.text as string) || ''
: message.content;

// select files from the list of code file from the project that might be useful for the current request from the user
const resp = await generateText({
system: `
You are a software engineer. You are working on a project. tou need to summarize the work till now and provide a summary of the chat till now.
${summaryText}
RULES:
* Only provide the summary of the chat till now.
* Do not provide any new information.
`,
prompt: `
please provide a summary of the chat till now.
below is the latest chat:
---
${slicedMessages
.map((x) => {
return `---\n[${x.role}] ${extractTextContent(x)}\n---`;
})
.join('\n')}
---
`,
model: provider.getModelInstance({
model: currentModel,
serverEnv,
apiKeys,
providerSettings,
}),
});

const response = resp.text;

if (onFinish) {
onFinish(resp);
}

return response;
}
Loading

0 comments on commit e61028b

Please sign in to comment.