diff --git a/framework/codemodder-base/build.gradle.kts b/framework/codemodder-base/build.gradle.kts index 42106fa13..acf1cc5c2 100644 --- a/framework/codemodder-base/build.gradle.kts +++ b/framework/codemodder-base/build.gradle.kts @@ -29,7 +29,7 @@ dependencies { api(libs.javaparser.symbolsolver.model) api(libs.javadiff) api(libs.jtokkit) - api("com.azure:azure-ai-openai:1.0.0-beta.10") + api("com.azure:azure-ai-openai:1.0.0-beta.16") api("io.github.classgraph:classgraph:4.8.160") implementation(libs.tuples) diff --git a/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForBinaryVerificationAndFixingCodemod.java b/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForBinaryVerificationAndFixingCodemod.java index 58b3e9973..2ade70631 100644 --- a/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForBinaryVerificationAndFixingCodemod.java +++ b/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForBinaryVerificationAndFixingCodemod.java @@ -151,7 +151,8 @@ private BinaryThreatAnalysis analyzeThreat( // If the estimated token count, which doesn't include the function (~100 tokens) or the reply // (~200 tokens), is close to the max, then assume the code is safe (for now). int tokenCount = - model.tokens(List.of(systemMessage.getContent(), userMessage.getContent().toString())); + model.tokens( + List.of(systemMessage.getStringContent(), userMessage.getContent().toString())); if (tokenCount > model.contextWindow() - 300) { return new BinaryThreatAnalysis( "Ignoring file: estimated prompt token count (" + tokenCount + ") is too high.", diff --git a/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForMultiOutcomeCodemod.java b/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForMultiOutcomeCodemod.java index a5d06cea8..eed8acadf 100644 --- a/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForMultiOutcomeCodemod.java +++ b/plugins/codemodder-plugin-llm/src/main/java/io/codemodder/plugins/llm/SarifToLLMForMultiOutcomeCodemod.java @@ -201,7 +201,8 @@ private boolean estimatedToExceedContextWindow(final CodemodInvocationContext co int tokenCount = model.tokens( List.of( - getSystemMessage().getContent(), estimatedUserMessage.getContent().toString())); + getSystemMessage().getStringContent(), + estimatedUserMessage.getContent().toString())); // estimated token count doesn't include the function (~100 tokens) or the reply // (~200 tokens) so add those estimates before checking against window size tokenCount += 300;