Skip to content

Commit

Permalink
DLLMs: o-models reasoning efforts
Browse files Browse the repository at this point in the history
  • Loading branch information
enricoros committed Dec 23, 2024
1 parent 5186df3 commit a58f703
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 10 deletions.
2 changes: 1 addition & 1 deletion src/common/components/forms/FormSelectControl.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ export const FormSelectControl = <TValue extends string>(props: {
title?: React.ReactNode;
tooltip?: React.ReactNode;
disabled?: boolean;
options: FormSelectOption<TValue>[];
options: Readonly<FormSelectOption<TValue>[]>;
value?: TValue;
onChange: (value: TValue) => void;
placeholder?: React.ReactNode;
Expand Down
51 changes: 42 additions & 9 deletions src/modules/llms/models-modal/LLMOptions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,52 @@ import { IconButton, Tooltip } from '@mui/joy';
import LocalFireDepartmentIcon from '@mui/icons-material/LocalFireDepartment';

import type { DLLM } from '~/common/stores/llms/llms.types';
import { DModelParameterSpec, FALLBACK_LLM_PARAM_RESPONSE_TOKENS, FALLBACK_LLM_PARAM_TEMPERATURE, getAllModelParameterValues } from '~/common/stores/llms/llms.parameters';
import { FormSelectControl } from '~/common/components/forms/FormSelectControl';
import { FormSliderControl } from '~/common/components/forms/FormSliderControl';
import { InlineError } from '~/common/components/InlineError';

import { FALLBACK_LLM_PARAM_RESPONSE_TOKENS, FALLBACK_LLM_PARAM_TEMPERATURE, getAllModelParameterValues } from '~/common/stores/llms/llms.parameters';
import { llmsStoreActions } from '~/common/stores/llms/store-llms';


const reasoningEffortOptions = [
{ value: 'high', label: 'High', description: 'Deep, thorough analysis' },
{ value: 'medium', label: 'Medium', description: 'Balanced reasoning depth' },
{ value: 'low', label: 'Low', description: 'Quick, concise responses' },
] as const;


export function LLMOptions(props: { llm: DLLM }) {

// derived state
const { id: llmId, maxOutputTokens, initialParameters, userParameters /*, parameterSpecs*/ } = props.llm;
const { llmResponseTokens = FALLBACK_LLM_PARAM_RESPONSE_TOKENS, llmTemperature = FALLBACK_LLM_PARAM_TEMPERATURE } = getAllModelParameterValues(initialParameters, userParameters);
// input state
const { id: llmId, maxOutputTokens, initialParameters, userParameters, parameterSpecs } = props.llm;

// external state
const { updateLLMUserParameters } = llmsStoreActions();
const allParameters = getAllModelParameterValues(initialParameters, userParameters);

// derived state
const llmTemperature = allParameters?.llmTemperature ?? FALLBACK_LLM_PARAM_TEMPERATURE;
const llmResponseTokens = allParameters?.llmResponseTokens ?? FALLBACK_LLM_PARAM_RESPONSE_TOKENS;
const llmVndOaiReasoningEffort = allParameters?.['vnd.oai.reasoning_effort'];
const tempAboveOne = llmTemperature > 1;

// state (here because the initial state depends on props)
// more state (here because the initial state depends on props)
const [overheat, setOverheat] = React.useState(llmTemperature > 1);

const showOverheatButton = overheat || llmTemperature >= 1;

// handlers

const handleOverheatToggle = React.useCallback(() => {
if (overheat && llmTemperature > 1)
if (overheat && tempAboveOne)
updateLLMUserParameters(llmId, { llmTemperature: 1 });
setOverheat(!overheat);
}, [llmId, llmTemperature, overheat, updateLLMUserParameters]);
}, [llmId, overheat, tempAboveOne, updateLLMUserParameters]);


// find the reasoning effort parameter spec
const paramVndOaiReasoningEffort = parameterSpecs?.find(p => p.paramId === 'vnd.oai.reasoning_effort') as DModelParameterSpec<'vnd.oai.reasoning_effort'> | undefined;

const showOverheatButton = overheat || tempAboveOne;


return <>
Expand Down Expand Up @@ -63,5 +85,16 @@ export function LLMOptions(props: { llm: DLLM }) {
<InlineError error='Max Output Tokens: Token computations are disabled because this model does not declare the context window size.' />
)}

{paramVndOaiReasoningEffort && (
<FormSelectControl
disabled
title='Reasoning Effort'
tooltip='Controls how much effort the model spends on reasoning'
value={llmVndOaiReasoningEffort ?? 'medium'}
onChange={(value) => updateLLMUserParameters(llmId, { 'vnd.oai.reasoning_effort': value })}
options={reasoningEffortOptions}
/>
)}

</>;
}
2 changes: 2 additions & 0 deletions src/modules/llms/server/openai/models/openai.models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ export const _knownOpenAIChatModels: ManualMappings = [
maxCompletionTokens: 100000,
trainingDataCutoff: 'Oct 2023',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn, LLM_IF_OAI_Json, LLM_IF_OAI_Vision, LLM_IF_OAI_Reasoning, LLM_IF_OAI_PromptCaching, LLM_IF_HOTFIX_NoStream],
parameterSpecs: [{ paramId: 'vnd.oai.reasoning_effort' }],
chatPrice: { input: 15, cache: { cType: 'oai-ac', read: 7.5 }, output: 60 },
benchmark: { cbaElo: 1335 + 1 },
},
Expand All @@ -177,6 +178,7 @@ export const _knownOpenAIChatModels: ManualMappings = [
maxCompletionTokens: 100000,
trainingDataCutoff: 'Oct 2023',
interfaces: [LLM_IF_OAI_Chat, LLM_IF_OAI_Fn, LLM_IF_OAI_Json, LLM_IF_OAI_Vision, LLM_IF_OAI_Reasoning, LLM_IF_OAI_PromptCaching, LLM_IF_HOTFIX_NoStream],
parameterSpecs: [{ paramId: 'vnd.oai.reasoning_effort' }],
chatPrice: { input: 15, cache: { cType: 'oai-ac', read: 7.5 }, output: 60 },
benchmark: { cbaElo: 1335 + 1 },
},
Expand Down

0 comments on commit a58f703

Please sign in to comment.