Skip to content

Commit 83d31f6

Browse files
Use only physical core count for number of llama inference threads
1 parent 5fa248c commit 83d31f6

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

src/main/java/org/myrobotlab/service/Llama.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import de.kherud.llama.LlamaModel;
44
import de.kherud.llama.Parameters;
5+
import org.myrobotlab.framework.Platform;
56
import org.myrobotlab.framework.Service;
67
import org.myrobotlab.logging.Level;
78
import org.myrobotlab.logging.LoggingFactory;
@@ -37,7 +38,7 @@ public Llama(String reservedKey, String inId) {
3738
public void loadModel(String modelPath) {
3839
Parameters params = new Parameters.Builder()
3940
.setNGpuLayers(0)
40-
.setNThreads(java.lang.Runtime.getRuntime().availableProcessors())
41+
.setNThreads(Platform.getLocalInstance().getNumPhysicalProcessors())
4142
.setTemperature(0.7f)
4243
.setPenalizeNl(true)
4344
.setMirostat(Parameters.MiroStat.V2)

0 commit comments

Comments
 (0)