Skip to content
This repository was archived by the owner on Oct 6, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion commands/install-runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ func newInstallRunner() *cobra.Command {
var gpuMode string
c := &cobra.Command{
Use: "install-runner",
Short: "Install Docker Model Runner",
Short: "Install Docker Model Runner (Docker Engine only)",
RunE: func(cmd *cobra.Command, args []string) error {
// Ensure that we're running in a supported model runner context.
engineKind := modelRunner.EngineKind()
Expand Down
2 changes: 1 addition & 1 deletion commands/list.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func newListCmd() *cobra.Command {
c := &cobra.Command{
Use: "list [OPTIONS]",
Aliases: []string{"ls"},
Short: "List the available models that can be run with the Docker Model Runner",
Short: "List the models pulled to your local environment",
RunE: func(cmd *cobra.Command, args []string) error {
if openai && quiet {
return fmt.Errorf("--quiet flag cannot be used with --openai flag")
Expand Down
4 changes: 2 additions & 2 deletions commands/logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func newLogsCmd() *cobra.Command {
},
ValidArgsFunction: completion.NoComplete,
}
c.Flags().BoolVarP(&follow, "follow", "f", false, "Follow log output")
c.Flags().BoolVar(&noEngines, "no-engines", false, "Skip inference engines logs")
c.Flags().BoolVarP(&follow, "follow", "f", false, "View logs with real-time streaming")
c.Flags().BoolVar(&noEngines, "no-engines", false, "Exclude inference engine logs from the output")
return c
}
2 changes: 1 addition & 1 deletion commands/package.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func newPackagedCmd() *cobra.Command {

c := &cobra.Command{
Use: "package --gguf <path> [--license <path>...] --push TARGET",
Short: "package a model",
Short: "Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf(
Expand Down
2 changes: 1 addition & 1 deletion commands/pull.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
func newPullCmd() *cobra.Command {
c := &cobra.Command{
Use: "pull MODEL",
Short: "Download a model",
Short: "Pull a model from Docker Hub or HuggingFace to your local environment",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf(
Expand Down
2 changes: 1 addition & 1 deletion commands/push.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
func newPushCmd() *cobra.Command {
c := &cobra.Command{
Use: "push MODEL",
Short: "Upload a model",
Short: "Push a model to Docker Hub",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf(
Expand Down
2 changes: 1 addition & 1 deletion commands/rm.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ func newRemoveCmd() *cobra.Command {

c := &cobra.Command{
Use: "rm [MODEL...]",
Short: "Remove models downloaded from Docker Hub",
Short: "Remove local models downloaded from Docker Hub",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf(
Expand Down
2 changes: 1 addition & 1 deletion commands/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func newRunCmd() *cobra.Command {
cmdArgs := "MODEL [PROMPT]"
c := &cobra.Command{
Use: "run " + cmdArgs,
Short: "Run a model with the Docker Model Runner",
Short: "Run a model and interact with it using a submitted prompt or chat mode",
RunE: func(cmd *cobra.Command, args []string) error {
model := args[0]
prompt := ""
Expand Down
4 changes: 2 additions & 2 deletions desktop/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ const (
func (k ModelRunnerEngineKind) String() string {
switch k {
case ModelRunnerEngineKindMoby:
return "Moby"
return "Docker Engine"
case ModelRunnerEngineKindMobyManual:
return "Moby (Manual Install)"
return "Docker Engine (Manual Install)"
case ModelRunnerEngineKindDesktop:
return "Docker Desktop"
case ModelRunnerEngineKindCloud:
Expand Down
4 changes: 3 additions & 1 deletion docs/reference/docker_model.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
command: docker model
short: Docker Model Runner
long: Docker Model Runner
long: |-
Use Docker Model Runner to run and interact with AI models directly from the command line.
For more information, see the [documentation](/model-runner/)
pname: docker
plink: docker.yaml
cname:
Expand Down
5 changes: 3 additions & 2 deletions docs/reference/docker_model_install-runner.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
command: docker model install-runner
short: Install Docker Model Runner
long: Install Docker Model Runner
short: Install Docker Model Runner (Docker Engine only)
long: |
This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration.
usage: docker model install-runner
pname: docker model
plink: docker_model.yaml
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/docker_model_list.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
command: docker model list
aliases: docker model list, docker model ls
short: List the available models that can be run with the Docker Model Runner
long: List the available models that can be run with the Docker Model Runner
short: List the models pulled to your local environment
long: List the models pulled to your local environment
usage: docker model list [OPTIONS]
pname: docker model
plink: docker_model.yaml
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/docker_model_logs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ options:
shorthand: f
value_type: bool
default_value: "false"
description: Follow log output
description: View logs with real-time streaming
deprecated: false
hidden: false
experimental: false
Expand All @@ -19,7 +19,7 @@ options:
- option: no-engines
value_type: bool
default_value: "false"
description: Skip inference engines logs
description: Exclude inference engine logs from the output
deprecated: false
hidden: false
experimental: false
Expand Down
6 changes: 4 additions & 2 deletions docs/reference/docker_model_package.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
command: docker model package
short: package a model
long: package a model
short: |
Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry
long: |
Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry
usage: docker model package --gguf <path> [--license <path>...] --push TARGET
pname: docker model
plink: docker_model.yaml
Expand Down
19 changes: 17 additions & 2 deletions docs/reference/docker_model_pull.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,24 @@
command: docker model pull
short: Download a model
long: Download a model
short: Pull a model from Docker Hub or HuggingFace to your local environment
long: |
Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard.
usage: docker model pull MODEL
pname: docker model
plink: docker_model.yaml
examples: |-
### Pulling a model from Docker Hub

```console
docker model pull ai/smollm2
```

### Pulling from HuggingFace

You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf).

```console
docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF
```
deprecated: false
hidden: false
experimental: false
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/docker_model_push.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
command: docker model push
short: Upload a model
long: Upload a model
short: Push a model to Docker Hub
long: Push a model to Docker Hub
usage: docker model push MODEL
pname: docker model
plink: docker_model.yaml
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/docker_model_rm.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
command: docker model rm
short: Remove models downloaded from Docker Hub
long: Remove models downloaded from Docker Hub
short: Remove local models downloaded from Docker Hub
long: Remove local models downloaded from Docker Hub
usage: docker model rm [MODEL...]
pname: docker model
plink: docker_model.yaml
Expand Down
37 changes: 35 additions & 2 deletions docs/reference/docker_model_run.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
command: docker model run
short: Run a model with the Docker Model Runner
long: Run a model with the Docker Model Runner
short: Run a model and interact with it using a submitted prompt or chat mode
long: |-
When you run a model, Docker calls an inference server API endpoint hosted by the Model Runner through Docker Desktop. The model stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes).

You do not have to use Docker model run before interacting with a specific model from a host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been pulled and is locally available.

You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab.
usage: docker model run MODEL [PROMPT]
pname: docker model
plink: docker_model.yaml
Expand All @@ -15,6 +20,34 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
examples: |-
### One-time prompt

```console
docker model run ai/smollm2 "Hi"
```

Output:

```console
Hello! How can I assist you today?
```

### Interactive chat

```console
docker model run ai/smollm2
```

Output:

```console
Interactive chat mode started. Type '/bye' to exit.
> Hi
Hi there! It's SmolLM, AI assistant. How can I help you today?
> /bye
Chat session ended.
```
deprecated: false
hidden: false
experimental: false
Expand Down
3 changes: 2 additions & 1 deletion docs/reference/docker_model_status.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
command: docker model status
short: Check if the Docker Model Runner is running
long: Check if the Docker Model Runner is running
long: |
Check whether the Docker Model Runner is running and displays the current inference engine.
usage: docker model status
pname: docker model
plink: docker_model.yaml
Expand Down
3 changes: 2 additions & 1 deletion docs/reference/docker_model_tag.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
command: docker model tag
short: Tag a model
long: Tag a model
long: |
Specify a particular version or variant of the model. If no tag is provided, Docker defaults to `latest`.
usage: docker model tag SOURCE TARGET
pname: docker model
plink: docker_model.yaml
Expand Down
40 changes: 22 additions & 18 deletions docs/reference/model.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,30 @@ Docker Model Runner (EXPERIMENTAL)

### Subcommands

| Name | Description |
|:------------------------------------------------|:-----------------------------------------------------------------------|
| [`df`](model_df.md) | Show Docker Model Runner disk usage |
| [`inspect`](model_inspect.md) | Display detailed information on one model |
| [`install-runner`](model_install-runner.md) | Install Docker Model Runner |
| [`list`](model_list.md) | List the available models that can be run with the Docker Model Runner |
| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs |
| [`package`](model_package.md) | package a model |
| [`ps`](model_ps.md) | List running models |
| [`pull`](model_pull.md) | Download a model |
| [`push`](model_push.md) | Upload a model |
| [`rm`](model_rm.md) | Remove models downloaded from Docker Hub |
| [`run`](model_run.md) | Run a model with the Docker Model Runner |
| [`status`](model_status.md) | Check if the Docker Model Runner is running |
| [`tag`](model_tag.md) | Tag a model |
| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner |
| [`unload`](model_unload.md) | Unload running models |
| [`version`](model_version.md) | Show the Docker Model Runner version |
| Name | Description |
|:------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------|
| [`df`](model_df.md) | Show Docker Model Runner disk usage |
| [`inspect`](model_inspect.md) | Display detailed information on one model |
| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) |
| [`list`](model_list.md) | List the models pulled to your local environment |
| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs |
| [`package`](model_package.md) | Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry |
| [`ps`](model_ps.md) | List running models |
| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment |
| [`push`](model_push.md) | Push a model to Docker Hub |
| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub |
| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode |
| [`status`](model_status.md) | Check if the Docker Model Runner is running |
| [`tag`](model_tag.md) | Tag a model |
| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner |
| [`unload`](model_unload.md) | Unload running models |
| [`version`](model_version.md) | Show the Docker Model Runner version |



<!---MARKER_GEN_END-->

## Description

Use Docker Model Runner to run and interact with AI models directly from the command line.
For more information, see the [documentation](https://docs.docker.com/model-runner/)
5 changes: 4 additions & 1 deletion docs/reference/model_install-runner.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# docker model install-runner

<!---MARKER_GEN_START-->
Install Docker Model Runner
Install Docker Model Runner (Docker Engine only)

### Options

Expand All @@ -13,3 +13,6 @@ Install Docker Model Runner

<!---MARKER_GEN_END-->

## Description

This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration.
2 changes: 1 addition & 1 deletion docs/reference/model_list.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# docker model list

<!---MARKER_GEN_START-->
List the available models that can be run with the Docker Model Runner
List the models pulled to your local environment

### Aliases

Expand Down
8 changes: 4 additions & 4 deletions docs/reference/model_logs.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ Fetch the Docker Model Runner logs

### Options

| Name | Type | Default | Description |
|:-----------------|:-------|:--------|:----------------------------|
| `-f`, `--follow` | `bool` | | Follow log output |
| `--no-engines` | `bool` | | Skip inference engines logs |
| Name | Type | Default | Description |
|:-----------------|:-------|:--------|:----------------------------------------------|
| `-f`, `--follow` | `bool` | | View logs with real-time streaming |
| `--no-engines` | `bool` | | Exclude inference engine logs from the output |


<!---MARKER_GEN_END-->
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/model_package.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# docker model package

<!---MARKER_GEN_START-->
package a model
Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry

### Options

Expand Down
21 changes: 20 additions & 1 deletion docs/reference/model_pull.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,27 @@
# docker model pull

<!---MARKER_GEN_START-->
Download a model
Pull a model from Docker Hub or HuggingFace to your local environment


<!---MARKER_GEN_END-->

## Description

Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard.

## Examples

### Pulling a model from Docker Hub

```console
docker model pull ai/smollm2
```

### Pulling from HuggingFace

You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf).

```console
docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF
```
7 changes: 6 additions & 1 deletion docs/reference/model_push.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
# docker model push

<!---MARKER_GEN_START-->
Upload a model
Push a model to Docker Hub


<!---MARKER_GEN_END-->

### Example

```console
docker model push <namespace>/<model>
```
Loading