Skip to content

Commit 59dce3d

Browse files
authored
[Android] Add API to use new config
Differential Revision: D73556369 Pull Request resolved: #10346
1 parent 3e38b85 commit 59dce3d

File tree

1 file changed

+24
-0
lines changed
  • extension/android/executorch_android/src/main/java/org/pytorch/executorch/extension/llm

1 file changed

+24
-0
lines changed

extension/android/executorch_android/src/main/java/org/pytorch/executorch/extension/llm/LlmModule.java

+24
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,17 @@ public LlmModule(int modelType, String modulePath, String tokenizerPath, float t
5959
mHybridData = initHybrid(modelType, modulePath, tokenizerPath, temperature, null);
6060
}
6161

62+
/** Constructs a LLM Module for a model with the given LlmModuleConfig */
63+
public LlmModule(LlmModuleConfig config) {
64+
mHybridData =
65+
initHybrid(
66+
config.getModelType(),
67+
config.getModulePath(),
68+
config.getTokenizerPath(),
69+
config.getTemperature(),
70+
config.getDataPath());
71+
}
72+
6273
public void resetNative() {
6374
mHybridData.resetNative();
6475
}
@@ -107,6 +118,19 @@ public int generate(String prompt, int seqLen, LlmCallback llmCallback, boolean
107118
return generate(null, 0, 0, 0, prompt, seqLen, llmCallback, echo);
108119
}
109120

121+
/**
122+
* Start generating tokens from the module.
123+
*
124+
* @param prompt Input prompt
125+
* @param config the config for generation
126+
* @param llmCallback callback object to receive results
127+
*/
128+
public int generate(String prompt, LlmGenerationConfig config, LlmCallback llmCallback) {
129+
int seqLen = config.getSeqLen();
130+
boolean echo = config.isEcho();
131+
return generate(null, 0, 0, 0, prompt, seqLen, llmCallback, echo);
132+
}
133+
110134
/**
111135
* Start generating tokens from the module.
112136
*

0 commit comments

Comments
 (0)