diff --git a/commands/install-runner.go b/commands/install-runner.go index eef5fba8..6a25827e 100644 --- a/commands/install-runner.go +++ b/commands/install-runner.go @@ -113,7 +113,7 @@ func newInstallRunner() *cobra.Command { var gpuMode string c := &cobra.Command{ Use: "install-runner", - Short: "Install Docker Model Runner", + Short: "Install Docker Model Runner (Docker Engine only)", RunE: func(cmd *cobra.Command, args []string) error { // Ensure that we're running in a supported model runner context. engineKind := modelRunner.EngineKind() diff --git a/commands/list.go b/commands/list.go index 3d757ac5..af651a14 100644 --- a/commands/list.go +++ b/commands/list.go @@ -19,7 +19,7 @@ func newListCmd() *cobra.Command { c := &cobra.Command{ Use: "list [OPTIONS]", Aliases: []string{"ls"}, - Short: "List the available models that can be run with the Docker Model Runner", + Short: "List the models pulled to your local environment", RunE: func(cmd *cobra.Command, args []string) error { if openai && quiet { return fmt.Errorf("--quiet flag cannot be used with --openai flag") diff --git a/commands/logs.go b/commands/logs.go index 98824166..a7f12902 100644 --- a/commands/logs.go +++ b/commands/logs.go @@ -125,7 +125,7 @@ func newLogsCmd() *cobra.Command { }, ValidArgsFunction: completion.NoComplete, } - c.Flags().BoolVarP(&follow, "follow", "f", false, "Follow log output") - c.Flags().BoolVar(&noEngines, "no-engines", false, "Skip inference engines logs") + c.Flags().BoolVarP(&follow, "follow", "f", false, "View logs with real-time streaming") + c.Flags().BoolVar(&noEngines, "no-engines", false, "Exclude inference engine logs from the output") return c } diff --git a/commands/package.go b/commands/package.go index 0d21fe28..0dd1aeba 100644 --- a/commands/package.go +++ b/commands/package.go @@ -19,7 +19,7 @@ func newPackagedCmd() *cobra.Command { c := &cobra.Command{ Use: "package --gguf [--license ...] --push TARGET", - Short: "package a model", + Short: "Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry", Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf( diff --git a/commands/pull.go b/commands/pull.go index 66596a49..de6361c3 100644 --- a/commands/pull.go +++ b/commands/pull.go @@ -11,7 +11,7 @@ import ( func newPullCmd() *cobra.Command { c := &cobra.Command{ Use: "pull MODEL", - Short: "Download a model", + Short: "Pull a model from Docker Hub or HuggingFace to your local environment", Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf( diff --git a/commands/push.go b/commands/push.go index 0f4547a3..32b4c82c 100644 --- a/commands/push.go +++ b/commands/push.go @@ -11,7 +11,7 @@ import ( func newPushCmd() *cobra.Command { c := &cobra.Command{ Use: "push MODEL", - Short: "Upload a model", + Short: "Push a model to Docker Hub", Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf( diff --git a/commands/rm.go b/commands/rm.go index 1bec7c2d..1712ba90 100644 --- a/commands/rm.go +++ b/commands/rm.go @@ -12,7 +12,7 @@ func newRemoveCmd() *cobra.Command { c := &cobra.Command{ Use: "rm [MODEL...]", - Short: "Remove models downloaded from Docker Hub", + Short: "Remove local models downloaded from Docker Hub", Args: func(cmd *cobra.Command, args []string) error { if len(args) < 1 { return fmt.Errorf( diff --git a/commands/run.go b/commands/run.go index 16024d7c..ca103f7e 100644 --- a/commands/run.go +++ b/commands/run.go @@ -18,7 +18,7 @@ func newRunCmd() *cobra.Command { cmdArgs := "MODEL [PROMPT]" c := &cobra.Command{ Use: "run " + cmdArgs, - Short: "Run a model with the Docker Model Runner", + Short: "Run a model and interact with it using a submitted prompt or chat mode", RunE: func(cmd *cobra.Command, args []string) error { model := args[0] prompt := "" diff --git a/desktop/context.go b/desktop/context.go index a04cabda..09c79efd 100644 --- a/desktop/context.go +++ b/desktop/context.go @@ -98,9 +98,9 @@ const ( func (k ModelRunnerEngineKind) String() string { switch k { case ModelRunnerEngineKindMoby: - return "Moby" + return "Docker Engine" case ModelRunnerEngineKindMobyManual: - return "Moby (Manual Install)" + return "Docker Engine (Manual Install)" case ModelRunnerEngineKindDesktop: return "Docker Desktop" case ModelRunnerEngineKindCloud: diff --git a/docs/reference/docker_model.yaml b/docs/reference/docker_model.yaml index bbc08323..e33eb58a 100644 --- a/docs/reference/docker_model.yaml +++ b/docs/reference/docker_model.yaml @@ -1,6 +1,8 @@ command: docker model short: Docker Model Runner -long: Docker Model Runner +long: |- + Use Docker Model Runner to run and interact with AI models directly from the command line. + For more information, see the [documentation](/model-runner/) pname: docker plink: docker.yaml cname: diff --git a/docs/reference/docker_model_install-runner.yaml b/docs/reference/docker_model_install-runner.yaml index eb7ef498..60443efb 100644 --- a/docs/reference/docker_model_install-runner.yaml +++ b/docs/reference/docker_model_install-runner.yaml @@ -1,6 +1,7 @@ command: docker model install-runner -short: Install Docker Model Runner -long: Install Docker Model Runner +short: Install Docker Model Runner (Docker Engine only) +long: | + This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration. usage: docker model install-runner pname: docker model plink: docker_model.yaml diff --git a/docs/reference/docker_model_list.yaml b/docs/reference/docker_model_list.yaml index 95b7a1aa..292704ad 100644 --- a/docs/reference/docker_model_list.yaml +++ b/docs/reference/docker_model_list.yaml @@ -1,7 +1,7 @@ command: docker model list aliases: docker model list, docker model ls -short: List the available models that can be run with the Docker Model Runner -long: List the available models that can be run with the Docker Model Runner +short: List the models pulled to your local environment +long: List the models pulled to your local environment usage: docker model list [OPTIONS] pname: docker model plink: docker_model.yaml diff --git a/docs/reference/docker_model_logs.yaml b/docs/reference/docker_model_logs.yaml index a54decee..84a01f89 100644 --- a/docs/reference/docker_model_logs.yaml +++ b/docs/reference/docker_model_logs.yaml @@ -9,7 +9,7 @@ options: shorthand: f value_type: bool default_value: "false" - description: Follow log output + description: View logs with real-time streaming deprecated: false hidden: false experimental: false @@ -19,7 +19,7 @@ options: - option: no-engines value_type: bool default_value: "false" - description: Skip inference engines logs + description: Exclude inference engine logs from the output deprecated: false hidden: false experimental: false diff --git a/docs/reference/docker_model_package.yaml b/docs/reference/docker_model_package.yaml index cb5ef39d..1e5a7ec9 100644 --- a/docs/reference/docker_model_package.yaml +++ b/docs/reference/docker_model_package.yaml @@ -1,6 +1,8 @@ command: docker model package -short: package a model -long: package a model +short: | + Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry +long: | + Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry usage: docker model package --gguf [--license ...] --push TARGET pname: docker model plink: docker_model.yaml diff --git a/docs/reference/docker_model_pull.yaml b/docs/reference/docker_model_pull.yaml index 64c0836a..f10e4368 100644 --- a/docs/reference/docker_model_pull.yaml +++ b/docs/reference/docker_model_pull.yaml @@ -1,9 +1,24 @@ command: docker model pull -short: Download a model -long: Download a model +short: Pull a model from Docker Hub or HuggingFace to your local environment +long: | + Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard. usage: docker model pull MODEL pname: docker model plink: docker_model.yaml +examples: |- + ### Pulling a model from Docker Hub + + ```console + docker model pull ai/smollm2 + ``` + + ### Pulling from HuggingFace + + You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf). + + ```console + docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF + ``` deprecated: false hidden: false experimental: false diff --git a/docs/reference/docker_model_push.yaml b/docs/reference/docker_model_push.yaml index 915509a5..4bd953bc 100644 --- a/docs/reference/docker_model_push.yaml +++ b/docs/reference/docker_model_push.yaml @@ -1,6 +1,6 @@ command: docker model push -short: Upload a model -long: Upload a model +short: Push a model to Docker Hub +long: Push a model to Docker Hub usage: docker model push MODEL pname: docker model plink: docker_model.yaml diff --git a/docs/reference/docker_model_rm.yaml b/docs/reference/docker_model_rm.yaml index 80357003..426bfd88 100644 --- a/docs/reference/docker_model_rm.yaml +++ b/docs/reference/docker_model_rm.yaml @@ -1,6 +1,6 @@ command: docker model rm -short: Remove models downloaded from Docker Hub -long: Remove models downloaded from Docker Hub +short: Remove local models downloaded from Docker Hub +long: Remove local models downloaded from Docker Hub usage: docker model rm [MODEL...] pname: docker model plink: docker_model.yaml diff --git a/docs/reference/docker_model_run.yaml b/docs/reference/docker_model_run.yaml index ee4a9f55..4d18d3c6 100644 --- a/docs/reference/docker_model_run.yaml +++ b/docs/reference/docker_model_run.yaml @@ -1,6 +1,11 @@ command: docker model run -short: Run a model with the Docker Model Runner -long: Run a model with the Docker Model Runner +short: Run a model and interact with it using a submitted prompt or chat mode +long: |- + When you run a model, Docker calls an inference server API endpoint hosted by the Model Runner through Docker Desktop. The model stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes). + + You do not have to use Docker model run before interacting with a specific model from a host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been pulled and is locally available. + + You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab. usage: docker model run MODEL [PROMPT] pname: docker model plink: docker_model.yaml @@ -15,6 +20,34 @@ options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### One-time prompt + + ```console + docker model run ai/smollm2 "Hi" + ``` + + Output: + + ```console + Hello! How can I assist you today? + ``` + + ### Interactive chat + + ```console + docker model run ai/smollm2 + ``` + + Output: + + ```console + Interactive chat mode started. Type '/bye' to exit. + > Hi + Hi there! It's SmolLM, AI assistant. How can I help you today? + > /bye + Chat session ended. + ``` deprecated: false hidden: false experimental: false diff --git a/docs/reference/docker_model_status.yaml b/docs/reference/docker_model_status.yaml index de68c46c..07da71d0 100644 --- a/docs/reference/docker_model_status.yaml +++ b/docs/reference/docker_model_status.yaml @@ -1,6 +1,7 @@ command: docker model status short: Check if the Docker Model Runner is running -long: Check if the Docker Model Runner is running +long: | + Check whether the Docker Model Runner is running and displays the current inference engine. usage: docker model status pname: docker model plink: docker_model.yaml diff --git a/docs/reference/docker_model_tag.yaml b/docs/reference/docker_model_tag.yaml index 13a58ba7..2aa0b35e 100644 --- a/docs/reference/docker_model_tag.yaml +++ b/docs/reference/docker_model_tag.yaml @@ -1,6 +1,7 @@ command: docker model tag short: Tag a model -long: Tag a model +long: | + Specify a particular version or variant of the model. If no tag is provided, Docker defaults to `latest`. usage: docker model tag SOURCE TARGET pname: docker model plink: docker_model.yaml diff --git a/docs/reference/model.md b/docs/reference/model.md index 149acfa6..f9032718 100644 --- a/docs/reference/model.md +++ b/docs/reference/model.md @@ -5,26 +5,30 @@ Docker Model Runner (EXPERIMENTAL) ### Subcommands -| Name | Description | -|:------------------------------------------------|:-----------------------------------------------------------------------| -| [`df`](model_df.md) | Show Docker Model Runner disk usage | -| [`inspect`](model_inspect.md) | Display detailed information on one model | -| [`install-runner`](model_install-runner.md) | Install Docker Model Runner | -| [`list`](model_list.md) | List the available models that can be run with the Docker Model Runner | -| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs | -| [`package`](model_package.md) | package a model | -| [`ps`](model_ps.md) | List running models | -| [`pull`](model_pull.md) | Download a model | -| [`push`](model_push.md) | Upload a model | -| [`rm`](model_rm.md) | Remove models downloaded from Docker Hub | -| [`run`](model_run.md) | Run a model with the Docker Model Runner | -| [`status`](model_status.md) | Check if the Docker Model Runner is running | -| [`tag`](model_tag.md) | Tag a model | -| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner | -| [`unload`](model_unload.md) | Unload running models | -| [`version`](model_version.md) | Show the Docker Model Runner version | +| Name | Description | +|:------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------| +| [`df`](model_df.md) | Show Docker Model Runner disk usage | +| [`inspect`](model_inspect.md) | Display detailed information on one model | +| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) | +| [`list`](model_list.md) | List the models pulled to your local environment | +| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs | +| [`package`](model_package.md) | Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry | +| [`ps`](model_ps.md) | List running models | +| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment | +| [`push`](model_push.md) | Push a model to Docker Hub | +| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub | +| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode | +| [`status`](model_status.md) | Check if the Docker Model Runner is running | +| [`tag`](model_tag.md) | Tag a model | +| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner | +| [`unload`](model_unload.md) | Unload running models | +| [`version`](model_version.md) | Show the Docker Model Runner version | +## Description + +Use Docker Model Runner to run and interact with AI models directly from the command line. +For more information, see the [documentation](https://docs.docker.com/model-runner/) diff --git a/docs/reference/model_install-runner.md b/docs/reference/model_install-runner.md index 3e969bbe..afcbbf2e 100644 --- a/docs/reference/model_install-runner.md +++ b/docs/reference/model_install-runner.md @@ -1,7 +1,7 @@ # docker model install-runner -Install Docker Model Runner +Install Docker Model Runner (Docker Engine only) ### Options @@ -13,3 +13,6 @@ Install Docker Model Runner +## Description + + This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration. diff --git a/docs/reference/model_list.md b/docs/reference/model_list.md index d3c94343..b6c051f2 100644 --- a/docs/reference/model_list.md +++ b/docs/reference/model_list.md @@ -1,7 +1,7 @@ # docker model list -List the available models that can be run with the Docker Model Runner +List the models pulled to your local environment ### Aliases diff --git a/docs/reference/model_logs.md b/docs/reference/model_logs.md index 010c671b..8c581092 100644 --- a/docs/reference/model_logs.md +++ b/docs/reference/model_logs.md @@ -5,10 +5,10 @@ Fetch the Docker Model Runner logs ### Options -| Name | Type | Default | Description | -|:-----------------|:-------|:--------|:----------------------------| -| `-f`, `--follow` | `bool` | | Follow log output | -| `--no-engines` | `bool` | | Skip inference engines logs | +| Name | Type | Default | Description | +|:-----------------|:-------|:--------|:----------------------------------------------| +| `-f`, `--follow` | `bool` | | View logs with real-time streaming | +| `--no-engines` | `bool` | | Exclude inference engine logs from the output | diff --git a/docs/reference/model_package.md b/docs/reference/model_package.md index 3e512153..4cfccf30 100644 --- a/docs/reference/model_package.md +++ b/docs/reference/model_package.md @@ -1,7 +1,7 @@ # docker model package -package a model +Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry ### Options diff --git a/docs/reference/model_pull.md b/docs/reference/model_pull.md index 8ad39437..bae41fc3 100644 --- a/docs/reference/model_pull.md +++ b/docs/reference/model_pull.md @@ -1,8 +1,27 @@ # docker model pull -Download a model +Pull a model from Docker Hub or HuggingFace to your local environment +## Description + +Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard. + +## Examples + +### Pulling a model from Docker Hub + +```console +docker model pull ai/smollm2 +``` + +### Pulling from HuggingFace + +You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf). + +```console +docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF +``` diff --git a/docs/reference/model_push.md b/docs/reference/model_push.md index b7581c96..b50a425e 100644 --- a/docs/reference/model_push.md +++ b/docs/reference/model_push.md @@ -1,8 +1,13 @@ # docker model push -Upload a model +Push a model to Docker Hub +### Example + +```console +docker model push / +``` diff --git a/docs/reference/model_rm.md b/docs/reference/model_rm.md index e7031ea9..6463903b 100644 --- a/docs/reference/model_rm.md +++ b/docs/reference/model_rm.md @@ -1,7 +1,7 @@ # docker model rm -Remove models downloaded from Docker Hub +Remove local models downloaded from Docker Hub ### Options diff --git a/docs/reference/model_run.md b/docs/reference/model_run.md index 9f15f954..3010f26c 100644 --- a/docs/reference/model_run.md +++ b/docs/reference/model_run.md @@ -1,7 +1,7 @@ # docker model run -Run a model with the Docker Model Runner +Run a model and interact with it using a submitted prompt or chat mode ### Options @@ -12,3 +12,40 @@ Run a model with the Docker Model Runner +## Description + +When you run a model, Docker calls an inference server API endpoint hosted by the Model Runner through Docker Desktop. The model stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes). + +You do not have to use Docker model run before interacting with a specific model from a host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been pulled and is locally available. + +You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab. + +## Examples + +### One-time prompt + +```console +docker model run ai/smollm2 "Hi" +``` + +Output: + +```console +Hello! How can I assist you today? +``` + +### Interactive chat + +```console +docker model run ai/smollm2 +``` + +Output: + +```console +Interactive chat mode started. Type '/bye' to exit. +> Hi +Hi there! It's SmolLM, AI assistant. How can I help you today? +> /bye +Chat session ended. +``` diff --git a/docs/reference/model_status.md b/docs/reference/model_status.md index 9d6accaf..72a0bf79 100644 --- a/docs/reference/model_status.md +++ b/docs/reference/model_status.md @@ -6,3 +6,6 @@ Check if the Docker Model Runner is running +## Description + +Check whether the Docker Model Runner is running and displays the current inference engine. diff --git a/docs/reference/model_tag.md b/docs/reference/model_tag.md index 5acf4bc2..3f1615e2 100644 --- a/docs/reference/model_tag.md +++ b/docs/reference/model_tag.md @@ -6,3 +6,6 @@ Tag a model +## Description + +Specify a particular version or variant of the model. If no tag is provided, Docker defaults to `latest`.