diff --git a/.gitignore b/.gitignore index 5919645..00f2906 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ tmp/ +testing/ dist/ node_modules/ .env diff --git a/dash/.gitignore b/dash/.gitignore index a547bf3..019e1dc 100644 --- a/dash/.gitignore +++ b/dash/.gitignore @@ -1,5 +1,4 @@ # Logs -logs *.log npm-debug.log* yarn-debug.log* diff --git a/dash/src/components/applications/live-logs-viewer.tsx b/dash/src/components/applications/live-logs-viewer.tsx index c9bbe1f..736ee4e 100644 --- a/dash/src/components/applications/live-logs-viewer.tsx +++ b/dash/src/components/applications/live-logs-viewer.tsx @@ -3,6 +3,7 @@ import { Card, CardHeader, CardTitle, CardContent } from "@/components/ui/card" import { Button } from "@/components/ui/button" import { Badge } from "@/components/ui/badge" import { useContainerLogs } from "@/hooks" +import { LogLine } from "@/components/logs/log-line" import { Loader2, Wifi, @@ -63,7 +64,7 @@ export const LiveLogsViewer = ({ appId, enabled = true }: LiveLogsViewerProps) = }, []) const downloadLogs = () => { - const logText = logs.join("\n") + const logText = logs.map(log => log.line).join("\n") const blob = new Blob([logText], { type: "text/plain" }) const url = URL.createObjectURL(blob) const a = document.createElement("a") @@ -176,19 +177,17 @@ export const LiveLogsViewer = ({ appId, enabled = true }: LiveLogsViewerProps) = <>
{logs.map((log, index) => ( -
- - {String(index + 1).padStart(4, " ")} - - {log} -
+ line={log.line} + index={index} + showLineNumbers={true} + streamType={log.stream} + /> ))}
diff --git a/dash/src/components/deployments/deployment-monitor.tsx b/dash/src/components/deployments/deployment-monitor.tsx index 47a6101..3f761fd 100644 --- a/dash/src/components/deployments/deployment-monitor.tsx +++ b/dash/src/components/deployments/deployment-monitor.tsx @@ -1,14 +1,14 @@ import { useEffect, useRef } from 'react'; import { - Dialog, - DialogContent, - DialogHeader, - DialogTitle, -} from '@/components/ui/dialog'; -import { Button } from '@/components/ui/button'; + Sheet, + SheetContent, + SheetHeader, + SheetTitle, +} from '@/components/ui/sheet'; import { Badge } from '@/components/ui/badge'; -import { Terminal, X, CheckCircle2, XCircle, AlertCircle, Loader2 } from 'lucide-react'; +import { Terminal, CheckCircle2, XCircle, AlertCircle, Loader2 } from 'lucide-react'; import { useDeploymentMonitor } from '@/hooks'; +import { LogLine } from '@/components/logs/log-line'; import { cn } from '@/lib/utils'; import { toast } from 'sonner'; @@ -94,17 +94,17 @@ export const DeploymentMonitor = ({ deploymentId, open, onClose, onComplete }: P const statusInfo = getStatusInfo(); return ( - - + {/* Header */} - - + + Deployment Monitor - +
{/* Connection Status */} @@ -127,15 +127,8 @@ export const DeploymentMonitor = ({ deploymentId, open, onClose, onComplete }: P #{deploymentId} - -
-
+ {/* Status Bar */} {status && ( @@ -194,7 +187,7 @@ export const DeploymentMonitor = ({ deploymentId, open, onClose, onComplete }: P )} {/* Logs Viewer */} -
+
{isLoading ? (
@@ -208,9 +201,13 @@ export const DeploymentMonitor = ({ deploymentId, open, onClose, onComplete }: P ) : (
{logs.map((log, index) => ( -
- {log} -
+ ))}
@@ -218,12 +215,7 @@ export const DeploymentMonitor = ({ deploymentId, open, onClose, onComplete }: P
{/* Footer */} -
- -
- -
+ + ); }; diff --git a/dash/src/components/logs/index.ts b/dash/src/components/logs/index.ts new file mode 100644 index 0000000..d268a59 --- /dev/null +++ b/dash/src/components/logs/index.ts @@ -0,0 +1,2 @@ +export { LogLine } from './log-line'; +export type { LogLineProps } from './log-line'; diff --git a/dash/src/components/logs/log-line.tsx b/dash/src/components/logs/log-line.tsx new file mode 100644 index 0000000..422a5bc --- /dev/null +++ b/dash/src/components/logs/log-line.tsx @@ -0,0 +1,89 @@ +import { memo } from 'react'; +import { parseAnsi, type AnsiSegment } from '@/lib/ansi-parser'; +import { cn } from '@/lib/utils'; + +export interface LogLineProps { + line: string; + index: number; + showLineNumbers?: boolean; + streamType?: 'stdout' | 'stderr'; + className?: string; +} + +/** + * Renders a single log line with ANSI color support and stream type indicators + */ +export const LogLine = memo(({ + line, + index, + showLineNumbers = true, + streamType, + className, +}: LogLineProps) => { + const segments = parseAnsi(line); + + return ( +
+ {/* Line Number */} + {showLineNumbers && ( + + {String(index + 1).padStart(4, ' ')} + + )} + + {/* Stream Type Indicator */} + {streamType && ( + + {streamType === 'stderr' ? 'ERR' : 'OUT'} + + )} + + {/* Log Content with ANSI colors */} +
+ {segments.map((segment: AnsiSegment, i: number) => { + const style: React.CSSProperties = {}; + + if (segment.styles.color) { + style.color = segment.styles.color; + } + if (segment.styles.backgroundColor) { + style.backgroundColor = segment.styles.backgroundColor; + } + if (segment.styles.bold) { + style.fontWeight = 'bold'; + } + if (segment.styles.italic) { + style.fontStyle = 'italic'; + } + if (segment.styles.underline) { + style.textDecoration = 'underline'; + } + if (segment.styles.dim) { + style.opacity = 0.5; + } + + return ( + + {segment.text} + + ); + })} +
+
+ ); +}); + +LogLine.displayName = 'LogLine'; diff --git a/dash/src/hooks/index.ts b/dash/src/hooks/index.ts index 94821c7..3edcee7 100644 --- a/dash/src/hooks/index.ts +++ b/dash/src/hooks/index.ts @@ -1,6 +1,8 @@ export { useDeploymentMonitor } from './use-deployment-monitor'; +export type { DeploymentLogEntry } from './use-deployment-monitor'; export { useIsMobile } from './use-mobile'; export { useContainerLogs } from './use-container-logs'; +export type { ContainerLogEntry } from './use-container-logs'; export { useProjects } from './use-projects'; export { useProject } from './use-project'; export { useApplications } from './use-applications'; diff --git a/dash/src/hooks/use-container-logs.ts b/dash/src/hooks/use-container-logs.ts index 11ea57f..13d4cb6 100644 --- a/dash/src/hooks/use-container-logs.ts +++ b/dash/src/hooks/use-container-logs.ts @@ -1,10 +1,16 @@ import { useEffect, useRef, useState, useCallback } from 'react'; +export interface ContainerLogEntry { + line: string; + stream?: 'stdout' | 'stderr'; +} + interface ContainerLogEvent { type: 'log' | 'status' | 'error' | 'end'; timestamp: string; data: { line?: string; + stream?: 'stdout' | 'stderr'; message?: string; container?: string; state?: string; @@ -23,7 +29,7 @@ export const useContainerLogs = ({ enabled, onError, }: UseContainerLogsOptions) => { - const [logs, setLogs] = useState([]); + const [logs, setLogs] = useState([]); const [containerState, setContainerState] = useState(''); const [error, setError] = useState(null); const [isConnected, setIsConnected] = useState(false); @@ -70,7 +76,13 @@ export const useContainerLogs = ({ switch (logEvent.type) { case 'log': { if (logEvent.data.line && logEvent.data.line.trim()) { - setLogs((prev) => [...prev, logEvent.data.line!]); + setLogs((prev) => [ + ...prev, + { + line: logEvent.data.line!, + stream: logEvent.data.stream, + }, + ]); } break; } diff --git a/dash/src/hooks/use-deployment-monitor.ts b/dash/src/hooks/use-deployment-monitor.ts index e04797f..a74a2f6 100644 --- a/dash/src/hooks/use-deployment-monitor.ts +++ b/dash/src/hooks/use-deployment-monitor.ts @@ -1,6 +1,146 @@ import { useEffect, useRef, useState, useCallback } from 'react'; import type { DeploymentEvent, StatusUpdate, LogUpdate, Deployment } from '@/types/deployment'; +export interface DeploymentLogEntry { + line: string; + stream?: 'stdout' | 'stderr'; +} + +interface DockerLogEntry { + stream?: string; + aux?: Record; + error?: string; + errorDetail?: Record; + status?: string; // Docker pull status + id?: string; // Layer ID for pull + progress?: string; // Progress string + progressDetail?: Record; // Detailed progress +} + +/** + * Parse Docker build JSON format and extract content with stream detection + */ +function parseDockerLogLine(line: string): DeploymentLogEntry | null { + const trimmed = line.trim(); + + // Check if line looks like JSON + if (!trimmed.startsWith('{')) { + // Not JSON, detect stream type from content + return { + line: trimmed, + stream: detectStreamType(trimmed), + }; + } + + try { + const dockerLog: DockerLogEntry = JSON.parse(trimmed); + + // Extract content based on what's available + if (dockerLog.error) { + // Error field present - this is an error message + return { + line: dockerLog.error, + stream: 'stderr', + }; + } + + if (dockerLog.stream) { + // Stream field present - this is the main content + const content = dockerLog.stream; + return { + line: content, + stream: detectStreamType(content), + }; + } + + // Handle Docker image pull progress logs (status field) + // Skip most progress updates to reduce noise + if (dockerLog.status) { + switch (dockerLog.status) { + case 'Downloading': + case 'Extracting': + case 'Waiting': + case 'Verifying Checksum': + // Skip noisy progress updates + return null; + case 'Pull complete': + case 'Download complete': + case 'Already exists': + // Show completion messages + const completeMsg = dockerLog.id + ? `${dockerLog.status}: ${dockerLog.id}` + : dockerLog.status; + return { + line: completeMsg, + stream: 'stdout', + }; + default: + // Show other status messages (like "Pulling from...") + const statusMsg = dockerLog.id + ? `${dockerLog.status}: ${dockerLog.id}` + : dockerLog.status; + return { + line: statusMsg, + stream: 'stdout', + }; + } + } + + // Aux field only (metadata like image IDs) - skip these + if (dockerLog.aux) { + return null; + } + } catch { + // Failed to parse as JSON, treat as regular line + return { + line: trimmed, + stream: detectStreamType(trimmed), + }; + } + + // Unknown format, return original + return { + line: trimmed, + stream: 'stdout', + }; +} + +/** + * Detect if a log line is from stderr based on common patterns + */ +function detectStreamType(line: string): 'stdout' | 'stderr' { + const lineLower = line.toLowerCase(); + + const stderrPatterns = [ + 'error:', + 'err:', + 'fatal:', + 'panic:', + 'warning:', + 'warn:', + 'failed', + 'failure', + 'exception:', + 'traceback', + 'stack trace', + ' err ', + '[error]', + '[err]', + '[fatal]', + '[panic]', + '[warning]', + '[warn]', + ]; + + for (const pattern of stderrPatterns) { + if (lineLower.includes(pattern)) { + return 'stderr'; + } + } + + return 'stdout'; +} + interface UseDeploymentMonitorOptions { deploymentId: number; enabled: boolean; @@ -16,7 +156,7 @@ export const useDeploymentMonitor = ({ onError, onClose, }: UseDeploymentMonitorOptions) => { - const [logs, setLogs] = useState([]); + const [logs, setLogs] = useState([]); const [status, setStatus] = useState(null); const [error, setError] = useState(null); const [isConnected, setIsConnected] = useState(false); @@ -63,7 +203,13 @@ export const useDeploymentMonitor = ({ const logsContent: string = result.data.logs; if (logsContent) { - setLogs(logsContent.split('\n').filter(line => line.length > 0)); + setLogs( + logsContent + .split('\n') + .filter(line => line.length > 0) + .map(line => parseDockerLogLine(line)) + .filter(entry => entry !== null) as DeploymentLogEntry[] + ); } setStatus({ @@ -128,7 +274,13 @@ export const useDeploymentMonitor = ({ case 'log': { const logData = deploymentEvent.data as LogUpdate; if (logData.line && logData.line.trim()) { - setLogs((prev) => [...prev, logData.line]); + setLogs((prev) => [ + ...prev, + { + line: logData.line, + stream: logData.stream, + }, + ]); } break; } diff --git a/dash/src/lib/ansi-parser.ts b/dash/src/lib/ansi-parser.ts new file mode 100644 index 0000000..e905975 --- /dev/null +++ b/dash/src/lib/ansi-parser.ts @@ -0,0 +1,196 @@ +/** + * ANSI Parser for Terminal Color Codes + * Converts ANSI escape sequences to styled HTML spans + */ + +export interface AnsiSegment { + text: string; + styles: { + color?: string; + backgroundColor?: string; + bold?: boolean; + italic?: boolean; + underline?: boolean; + dim?: boolean; + }; +} + +// ANSI color mappings (standard 16 colors) +const ANSI_COLORS: Record = { + // Standard colors + 30: '#000000', // black + 31: '#ef4444', // red + 32: '#22c55e', // green + 33: '#eab308', // yellow + 34: '#3b82f6', // blue + 35: '#a855f7', // magenta + 36: '#06b6d4', // cyan + 37: '#f5f5f5', // white + // Bright colors + 90: '#6b7280', // bright black (gray) + 91: '#f87171', // bright red + 92: '#4ade80', // bright green + 93: '#fbbf24', // bright yellow + 94: '#60a5fa', // bright blue + 95: '#c084fc', // bright magenta + 96: '#22d3ee', // bright cyan + 97: '#ffffff', // bright white +}; + +const ANSI_BG_COLORS: Record = { + // Background colors + 40: '#000000', + 41: '#ef4444', + 42: '#22c55e', + 43: '#eab308', + 44: '#3b82f6', + 45: '#a855f7', + 46: '#06b6d4', + 47: '#f5f5f5', + // Bright background colors + 100: '#6b7280', + 101: '#f87171', + 102: '#4ade80', + 103: '#fbbf24', + 104: '#60a5fa', + 105: '#c084fc', + 106: '#22d3ee', + 107: '#ffffff', +}; + +interface AnsiState { + color?: string; + backgroundColor?: string; + bold: boolean; + italic: boolean; + underline: boolean; + dim: boolean; +} + +/** + * Parse ANSI escape sequences from a string and return styled segments + */ +export function parseAnsi(text: string): AnsiSegment[] { + // First, strip out non-SGR ANSI escape sequences (cursor movement, clear screen, etc.) + // Keep only SGR sequences (those ending in 'm') + text = text.replace(/\x1b\[[\d;]*[A-HJKSTfhilmnsu]/g, ''); + + const segments: AnsiSegment[] = []; + const state: AnsiState = { + bold: false, + italic: false, + underline: false, + dim: false, + }; + + // Match ANSI escape sequences: \x1b[...m or \u001b[...m + const ansiRegex = /\x1b\[([0-9;]*)m/g; + let lastIndex = 0; + let match: RegExpExecArray | null; + + while ((match = ansiRegex.exec(text)) !== null) { + // Add text before this escape sequence + if (match.index > lastIndex) { + const textSegment = text.substring(lastIndex, match.index); + if (textSegment) { + segments.push({ + text: textSegment, + styles: { ...state }, + }); + } + } + + // Parse the escape codes + const codes = match[1].split(';').map((code) => parseInt(code, 10)); + for (const code of codes) { + if (isNaN(code)) continue; + + switch (code) { + case 0: // Reset + state.color = undefined; + state.backgroundColor = undefined; + state.bold = false; + state.italic = false; + state.underline = false; + state.dim = false; + break; + case 1: // Bold + state.bold = true; + break; + case 2: // Dim + state.dim = true; + break; + case 3: // Italic + state.italic = true; + break; + case 4: // Underline + state.underline = true; + break; + case 22: // Normal intensity (not bold or dim) + state.bold = false; + state.dim = false; + break; + case 23: // Not italic + state.italic = false; + break; + case 24: // Not underlined + state.underline = false; + break; + case 39: // Default foreground color + state.color = undefined; + break; + case 49: // Default background color + state.backgroundColor = undefined; + break; + default: + // Foreground colors (30-37, 90-97) + if (ANSI_COLORS[code]) { + state.color = ANSI_COLORS[code]; + } + // Background colors (40-47, 100-107) + else if (ANSI_BG_COLORS[code]) { + state.backgroundColor = ANSI_BG_COLORS[code]; + } + break; + } + } + + lastIndex = match.index + match[0].length; + } + + // Add remaining text + if (lastIndex < text.length) { + const textSegment = text.substring(lastIndex); + if (textSegment) { + segments.push({ + text: textSegment, + styles: { ...state }, + }); + } + } + + // If no segments were created, return the whole text as one segment + if (segments.length === 0) { + segments.push({ + text, + styles: {}, + }); + } + + return segments; +} + +/** + * Strip ANSI escape sequences from text + */ +export function stripAnsi(text: string): string { + // Remove all ANSI escape sequences including SGR (m), cursor movement, etc. + return text.replace(/\x1b\[[0-9;]*[A-HJKSTfhilmnsu]?/g, ''); +} + +/** + * Detect if text contains ANSI escape sequences + */ +export function hasAnsi(text: string): boolean { + return /\x1b\[[0-9;]*[A-HJKSTfhilmnsu]?/.test(text); +} diff --git a/dash/src/pages/Logs.tsx b/dash/src/pages/Logs.tsx index 73e34b3..0ed7b13 100644 --- a/dash/src/pages/Logs.tsx +++ b/dash/src/pages/Logs.tsx @@ -4,6 +4,7 @@ import { Button } from "@/components/ui/button"; import { Badge } from "@/components/ui/badge"; import { Alert, AlertDescription } from "@/components/ui/alert"; import { Terminal, Trash2, Download, Pause, Play, AlertCircle, RefreshCw } from "lucide-react"; +import { LogLine } from "@/components/logs/log-line"; import { toast } from "sonner"; interface LogEvent { @@ -242,7 +243,7 @@ export const LogsPage = () => {
{logs.length === 0 && !connected && (
@@ -254,11 +255,16 @@ export const LogsPage = () => {

Waiting for logs...

)} - {logs.map((log, index) => ( -
- {log} -
- ))} +
+ {logs.map((log, index) => ( + + ))} +
{paused && pausedLogsRef.current.length > 0 && (
{pausedLogsRef.current.length} new log entries (paused) diff --git a/dash/src/types/deployment.ts b/dash/src/types/deployment.ts index 6bf4062..bae0594 100644 --- a/dash/src/types/deployment.ts +++ b/dash/src/types/deployment.ts @@ -30,6 +30,7 @@ export interface DeploymentEvent { export interface LogUpdate { line: string; + stream?: 'stdout' | 'stderr'; timestamp: string; } diff --git a/server/api/handlers/applications/delete.go b/server/api/handlers/applications/delete.go index 05d7543..83d55b9 100644 --- a/server/api/handlers/applications/delete.go +++ b/server/api/handlers/applications/delete.go @@ -6,7 +6,6 @@ import ( "fmt" "net/http" "os" - "os/exec" "path/filepath" "strconv" "time" @@ -16,6 +15,7 @@ import ( "github.com/corecollectives/mist/constants" "github.com/corecollectives/mist/docker" "github.com/corecollectives/mist/models" + "github.com/moby/moby/client" "github.com/rs/zerolog/log" ) @@ -78,30 +78,73 @@ func DeleteApplication(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() - stopCmd := exec.CommandContext(ctx, "docker", "stop", containerName) - if err := stopCmd.Run(); err != nil { - log.Warn().Err(err).Str("container", containerName).Msg("Failed to stop container during app deletion") - } + cli, err := client.New(client.FromEnv) + if err != nil { + log.Warn().Err(err).Str("container", containerName).Msg("Failed to create Docker client during app deletion") + } else { + _, err = cli.ContainerStop(ctx, containerName, client.ContainerStopOptions{}) + if err != nil { + log.Warn().Err(err).Str("container", containerName).Msg("Failed to stop container during app deletion") + } - removeCmd := exec.CommandContext(ctx, "docker", "rm", containerName) - if err := removeCmd.Run(); err != nil { - log.Warn().Err(err).Str("container", containerName).Msg("Failed to remove container during app deletion") + _, err = cli.ContainerRemove(ctx, containerName, client.ContainerRemoveOptions{}) + if err != nil { + log.Warn().Err(err).Str("container", containerName).Msg("Failed to remove container during app deletion") + } } + + // legacy exec method + // + // + // stopCmd := exec.CommandContext(ctx, "docker", "stop", containerName) + // if err := stopCmd.Run(); err != nil { + // log.Warn().Err(err).Str("container", containerName).Msg("Failed to stop container during app deletion") + // } + // + // removeCmd := exec.CommandContext(ctx, "docker", "rm", containerName) + // if err := removeCmd.Run(); err != nil { + // log.Warn().Err(err).Str("container", containerName).Msg("Failed to remove container during app deletion") + // } } imagePattern := fmt.Sprintf("mist-app-%d-", app.ID) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() - listImagesCmd := exec.CommandContext(ctx, "docker", "images", "-q", "--filter", fmt.Sprintf("reference=%s*", imagePattern)) - output, err := listImagesCmd.Output() - if err == nil && len(output) > 0 { - rmiCmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("docker images -q --filter 'reference=%s*' | xargs -r docker rmi -f", imagePattern)) - if err := rmiCmd.Run(); err != nil { - log.Warn().Err(err).Int64("app_id", app.ID).Msg("Failed to remove Docker images during app deletion") + cli, err := client.New(client.FromEnv) + if err != nil { + log.Warn().Err(err).Int64("app_id", app.ID).Msg("Failed to create Docker client for image cleanup during app deletion") + } else { + filterArgs := make(client.Filters) + filterArgs.Add("reference", fmt.Sprintf("%s*", imagePattern)) + + imageListResult, err := cli.ImageList(ctx, client.ImageListOptions{ + Filters: filterArgs, + }) + if err == nil && len(imageListResult.Items) > 0 { + for _, img := range imageListResult.Items { + _, err := cli.ImageRemove(ctx, img.ID, client.ImageRemoveOptions{ + Force: true, + }) + if err != nil { + log.Warn().Err(err).Str("image_id", img.ID).Int64("app_id", app.ID).Msg("Failed to remove Docker image during app deletion") + } + } } } + // legacy exec method + // + // + // listImagesCmd := exec.CommandContext(ctx, "docker", "images", "-q", "--filter", fmt.Sprintf("reference=%s*", imagePattern)) + // output, err := listImagesCmd.Output() + // if err == nil && len(output) > 0 { + // rmiCmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("docker images -q --filter 'reference=%s*' | xargs -r docker rmi -f", imagePattern)) + // if err := rmiCmd.Run(); err != nil { + // log.Warn().Err(err).Int64("app_id", app.ID).Msg("Failed to remove Docker images during app deletion") + // } + // } + appPath := fmt.Sprintf("/var/lib/mist/projects/%d/apps/%s", app.ProjectID, app.Name) if _, err := os.Stat(appPath); err == nil { if err := os.RemoveAll(appPath); err != nil { diff --git a/server/api/handlers/applications/update.go b/server/api/handlers/applications/update.go index fd9a3e1..c00fba8 100644 --- a/server/api/handlers/applications/update.go +++ b/server/api/handlers/applications/update.go @@ -1,10 +1,10 @@ package applications import ( + "context" "encoding/json" "fmt" "net/http" - "os/exec" "strings" "time" @@ -12,6 +12,7 @@ import ( "github.com/corecollectives/mist/api/middleware" "github.com/corecollectives/mist/docker" "github.com/corecollectives/mist/models" + "github.com/moby/moby/client" ) func UpdateApplication(w http.ResponseWriter, r *http.Request) { @@ -173,10 +174,28 @@ func recreateContainerAsync(appID int64) error { containerName := fmt.Sprintf("app-%d", appID) - cmd := exec.Command("docker", "inspect", containerName) - if err := cmd.Run(); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + cli, err := client.New(client.FromEnv) + if err != nil { + return nil + } + + _, err = cli.ContainerInspect(ctx, containerName, client.ContainerInspectOptions{}) + if err != nil { return nil } return docker.RecreateContainer(app) + + // legacy exec method + // + // + // cmd := exec.Command("docker", "inspect", containerName) + // if err := cmd.Run(); err != nil { + // return nil + // } + // + // return docker.RecreateContainer(app) } diff --git a/server/api/handlers/deployments/logsHandler.go b/server/api/handlers/deployments/logsHandler.go index 58d0e19..0dbf2cf 100644 --- a/server/api/handlers/deployments/logsHandler.go +++ b/server/api/handlers/deployments/logsHandler.go @@ -79,11 +79,8 @@ func LogsHandler(w http.ResponseWriter, r *http.Request) { }() go func() { - // Wait up to 10 seconds for log file to appear - fileFound := false for i := 0; i < 20; i++ { if _, err := os.Stat(logPath); err == nil { - fileFound = true break } select { @@ -93,62 +90,25 @@ func LogsHandler(w http.ResponseWriter, r *http.Request) { } } - if !fileFound { - select { - case <-ctx.Done(): - return - case events <- websockets.DeploymentEvent{ - Type: "error", - Timestamp: time.Now(), - Data: map[string]string{ - "message": "Deployment log file not found", - }, - }: - } - return - } - send := make(chan string, 100) - errChan := make(chan error, 1) go func() { - err := websockets.WatcherLogs(ctx, logPath, send) - if err != nil { - errChan <- err - } + _ = websockets.WatcherLogs(ctx, logPath, send) close(send) }() - for { + for line := range send { select { case <-ctx.Done(): return - case err := <-errChan: - if err != nil { - events <- websockets.DeploymentEvent{ - Type: "error", - Timestamp: time.Now(), - Data: map[string]string{ - "message": "Failed to read deployment logs: " + err.Error(), - }, - } - return - } - case line, ok := <-send: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case events <- websockets.DeploymentEvent{ - Type: "log", + case events <- websockets.DeploymentEvent{ + Type: "log", + Timestamp: time.Now(), + Data: websockets.LogUpdate{ + Line: line, + Stream: websockets.DetectStreamType(line), Timestamp: time.Now(), - Data: websockets.LogUpdate{ - Line: line, - Timestamp: time.Now(), - }, - }: - } + }, + }: } } }() diff --git a/server/docker/build.go b/server/docker/build.go deleted file mode 100644 index 3b13a2a..0000000 --- a/server/docker/build.go +++ /dev/null @@ -1,275 +0,0 @@ -package docker - -import ( - "context" - "fmt" - "os" - "os/exec" - "strings" - "time" - - "github.com/corecollectives/mist/models" - "github.com/rs/zerolog/log" -) - -func BuildImage(imageTag, contextPath string, envVars map[string]string, logfile *os.File) error { - buildArgs := []string{"build", "-t", imageTag} - - for key, value := range envVars { - buildArgs = append(buildArgs, "--build-arg", fmt.Sprintf("%s=%s", key, value)) - } - - buildArgs = append(buildArgs, contextPath) - - log.Debug().Strs("build_args", buildArgs).Str("image_tag", imageTag).Msg("Building Docker image") - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) - defer cancel() - - cmd := exec.CommandContext(ctx, "docker", buildArgs...) - cmd.Stdout = logfile - cmd.Stderr = logfile - - if err := cmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return fmt.Errorf("docker build timed out after 15 minutes") - } - exitCode := -1 - if exitErr, ok := err.(*exec.ExitError); ok { - exitCode = exitErr.ExitCode() - } - return fmt.Errorf("docker build failed with exit code %d: %w", exitCode, err) - } - return nil -} - -func StopRemoveContainer(containerName string, logfile *os.File) error { - ifExists := ContainerExists(containerName) - if !ifExists { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancel() - - stopCmd := exec.CommandContext(ctx, "docker", "stop", containerName) - stopCmd.Stdout = logfile - stopCmd.Stderr = logfile - if err := stopCmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return fmt.Errorf("docker stop timed out after 2 minutes for container %s", containerName) - } - return fmt.Errorf("failed to stop container %s: %w", containerName, err) - } - - removeCmd := exec.CommandContext(ctx, "docker", "rm", containerName) - removeCmd.Stdout = logfile - removeCmd.Stderr = logfile - if err := removeCmd.Run(); err != nil { - return fmt.Errorf("failed to remove container %s: %w", containerName, err) - } - - return nil -} - -func ContainerExists(name string) bool { - cmd := exec.Command("docker", "inspect", name) - output, err := cmd.CombinedOutput() - - if err != nil { - if strings.Contains(string(output), "No such object") { - return false - } - return false - } - - return true -} - -func RunContainer(app *models.App, imageTag, containerName string, domains []string, Port int, envVars map[string]string, logfile *os.File) error { - - runArgs := []string{ - "run", "-d", - "--name", containerName, - } - - restartPolicy := string(app.RestartPolicy) - if restartPolicy == "" { - restartPolicy = "unless-stopped" - } - runArgs = append(runArgs, "--restart", restartPolicy) - - if app.CPULimit != nil && *app.CPULimit > 0 { - runArgs = append(runArgs, "--cpus", fmt.Sprintf("%.2f", *app.CPULimit)) - } - - if app.MemoryLimit != nil && *app.MemoryLimit > 0 { - runArgs = append(runArgs, "-m", fmt.Sprintf("%dm", *app.MemoryLimit)) - } - - // Add volumes from the volumes table (user-configurable) - volumes, err := models.GetVolumesByAppID(app.ID) - if err == nil { - for _, vol := range volumes { - volumeArg := fmt.Sprintf("%s:%s", vol.HostPath, vol.ContainerPath) - if vol.ReadOnly { - volumeArg += ":ro" - } - runArgs = append(runArgs, "-v", volumeArg) - } - } - - for key, value := range envVars { - runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", key, value)) - } - - switch app.AppType { - case models.AppTypeWeb: - if len(domains) > 0 { - runArgs = append(runArgs, - "--network", "traefik-net", - "-l", "traefik.enable=true", - ) - - var hostRules []string - for _, domain := range domains { - hostRules = append(hostRules, fmt.Sprintf("Host(`%s`)", domain)) - } - hostRule := strings.Join(hostRules, " || ") - - runArgs = append(runArgs, - "-l", fmt.Sprintf("traefik.http.routers.%s.rule=%s", containerName, hostRule), - "-l", fmt.Sprintf("traefik.http.routers.%s.entrypoints=websecure", containerName), - "-l", fmt.Sprintf("traefik.http.routers.%s.tls=true", containerName), - "-l", fmt.Sprintf("traefik.http.routers.%s.tls.certresolver=le", containerName), - "-l", fmt.Sprintf("traefik.http.services.%s.loadbalancer.server.port=%d", containerName, Port), - ) - - runArgs = append(runArgs, - - "-l", fmt.Sprintf("traefik.http.routers.%s-http.rule=%s", containerName, hostRule), - "-l", fmt.Sprintf("traefik.http.routers.%s-http.entrypoints=web", containerName), - "-l", fmt.Sprintf("traefik.http.routers.%s-http.middlewares=%s-https-redirect", containerName, containerName), - - "-l", fmt.Sprintf("traefik.http.middlewares.%s-https-redirect.redirectscheme.scheme=https", containerName), - ) - } else { - runArgs = append(runArgs, - "-p", fmt.Sprintf("%d:%d", Port, Port), - ) - } - - case models.AppTypeService: - runArgs = append(runArgs, "--network", "traefik-net") - - case models.AppTypeDatabase: - runArgs = append(runArgs, "--network", "traefik-net") - - default: - runArgs = append(runArgs, - "-p", fmt.Sprintf("%d:%d", Port, Port), - ) - } - - runArgs = append(runArgs, imageTag) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - cmd := exec.CommandContext(ctx, "docker", runArgs...) - cmd.Stdout = logfile - cmd.Stderr = logfile - - if err := cmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return fmt.Errorf("docker run timed out after 5 minutes") - } - exitCode := -1 - if exitErr, ok := err.(*exec.ExitError); ok { - exitCode = exitErr.ExitCode() - } - return fmt.Errorf("docker run failed with exit code %d: %w", exitCode, err) - } - - return nil -} - -func PullDockerImage(imageName string, logfile *os.File) error { - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) - defer cancel() - - pullCmd := exec.CommandContext(ctx, "docker", "pull", imageName) - pullCmd.Stdout = logfile - pullCmd.Stderr = logfile - - if err := pullCmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return fmt.Errorf("docker pull timed out after 15 minutes for image %s", imageName) - } - exitCode := -1 - if exitErr, ok := err.(*exec.ExitError); ok { - exitCode = exitErr.ExitCode() - } - return fmt.Errorf("docker pull failed with exit code %d: %w", exitCode, err) - } - return nil -} - -func RecreateContainer(app *models.App) error { - containerName := GetContainerName(app.Name, app.ID) - - if !ContainerExists(containerName) { - return fmt.Errorf("container %s does not exist", containerName) - } - - cmd := exec.Command("docker", "inspect", containerName, "--format", "{{.Config.Image}}") - output, err := cmd.Output() - if err != nil { - return fmt.Errorf("failed to get container image: %w", err) - } - imageTag := strings.TrimSpace(string(output)) - - port, domains, envVars, err := GetDeploymentConfigForApp(app) - if err != nil { - return fmt.Errorf("failed to get deployment configuration: %w", err) - } - - if err := StopRemoveContainer(containerName, nil); err != nil { - return fmt.Errorf("failed to stop/remove container: %w", err) - } - - if err := RunContainer(app, imageTag, containerName, domains, port, envVars, nil); err != nil { - return fmt.Errorf("failed to run container: %w", err) - } - - return nil -} - -func GetDeploymentConfigForApp(app *models.App) (int, []string, map[string]string, error) { - port := 3000 - if app.Port != nil { - port = int(*app.Port) - } - - domains, err := models.GetDomainsByAppID(app.ID) - if err != nil && err.Error() != "sql: no rows in result set" { - return 0, nil, nil, fmt.Errorf("get domains failed: %w", err) - } - - var domainStrings []string - for _, d := range domains { - domainStrings = append(domainStrings, d.Domain) - } - - envs, err := models.GetEnvVariablesByAppID(app.ID) - if err != nil && err.Error() != "sql: no rows in result set" { - return 0, nil, nil, fmt.Errorf("get env variables failed: %w", err) - } - - envMap := make(map[string]string) - for _, env := range envs { - envMap[env.Key] = env.Value - } - - return port, domainStrings, envMap, nil -} diff --git a/server/docker/cleanup.go b/server/docker/cleanup.go index 68eefbd..d67483b 100644 --- a/server/docker/cleanup.go +++ b/server/docker/cleanup.go @@ -3,11 +3,10 @@ package docker import ( "context" "fmt" - "os/exec" "sort" - "strings" "time" + "github.com/moby/moby/client" "github.com/rs/zerolog/log" ) @@ -21,12 +20,17 @@ func CleanupOldImages(appID int64, keepCount int) error { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() - listCmd := exec.CommandContext(ctx, "docker", "images", - "--filter", fmt.Sprintf("reference=%s*", imagePattern), - "--format", "{{.Repository}}:{{.Tag}} {{.CreatedAt}}", - ) + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) + } - output, err := listCmd.Output() + filterArgs := make(client.Filters) + filterArgs.Add("reference", fmt.Sprintf("%s*", imagePattern)) + + imageListResult, err := cli.ImageList(ctx, client.ImageListOptions{ + Filters: filterArgs, + }) if err != nil { if ctx.Err() == context.DeadlineExceeded { return fmt.Errorf("listing images timed out") @@ -34,36 +38,31 @@ func CleanupOldImages(appID int64, keepCount int) error { return fmt.Errorf("failed to list images: %w", err) } - if len(output) == 0 { + imageList := imageListResult.Items + + if len(imageList) == 0 { return nil } - lines := strings.Split(strings.TrimSpace(string(output)), "\n") - if len(lines) <= keepCount { + if len(imageList) <= keepCount { return nil } type imageInfo struct { - name string - timestamp string + id string + created int64 } var images []imageInfo - for _, line := range lines { - if line == "" { - continue - } - parts := strings.SplitN(line, " ", 2) - if len(parts) == 2 { - images = append(images, imageInfo{ - name: parts[0], - timestamp: parts[1], - }) - } + for _, img := range imageList { + images = append(images, imageInfo{ + id: img.ID, + created: img.Created, + }) } sort.Slice(images, func(i, j int) bool { - return images[i].timestamp > images[j].timestamp + return images[i].created > images[j].created }) if len(images) > keepCount { @@ -71,23 +70,103 @@ func CleanupOldImages(appID int64, keepCount int) error { for _, img := range imagesToRemove { rmiCtx, rmiCancel := context.WithTimeout(context.Background(), 1*time.Minute) - rmiCmd := exec.CommandContext(rmiCtx, "docker", "rmi", "-f", img.name) - if err := rmiCmd.Run(); err != nil { - log.Warn().Err(err).Str("image", img.name).Msg("Failed to remove old image") + _, err := cli.ImageRemove(rmiCtx, img.id, client.ImageRemoveOptions{ + Force: true, + }) + if err != nil { + log.Warn().Err(err).Str("image_id", img.id).Msg("Failed to remove old image") } rmiCancel() } } return nil + + // legacy exec method + // + // + // imagePattern := fmt.Sprintf("mist-app-%d-", appID) + // + // ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + // defer cancel() + // + // listCmd := exec.CommandContext(ctx, "docker", "images", + // "--filter", fmt.Sprintf("reference=%s*", imagePattern), + // "--format", "{{.Repository}}:{{.Tag}} {{.CreatedAt}}", + // ) + // + // output, err := listCmd.Output() + // if err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("listing images timed out") + // } + // return fmt.Errorf("failed to list images: %w", err) + // } + // + // if len(output) == 0 { + // return nil + // } + // + // lines := strings.Split(strings.TrimSpace(string(output)), "\n") + // if len(lines) <= keepCount { + // return nil + // } + // + // type imageInfo struct { + // name string + // timestamp string + // } + // + // var images []imageInfo + // for _, line := range lines { + // if line == "" { + // continue + // } + // parts := strings.SplitN(line, " ", 2) + // if len(parts) == 2 { + // images = append(images, imageInfo{ + // name: parts[0], + // timestamp: parts[1], + // }) + // } + // } + // + // sort.Slice(images, func(i, j int) bool { + // return images[i].timestamp > images[j].timestamp + // }) + // + // if len(images) > keepCount { + // imagesToRemove := images[keepCount:] + // + // for _, img := range imagesToRemove { + // rmiCtx, rmiCancel := context.WithTimeout(context.Background(), 1*time.Minute) + // rmiCmd := exec.CommandContext(rmiCtx, "docker", "rmi", "-f", img.name) + // if err := rmiCmd.Run(); err != nil { + // log.Warn().Err(err).Str("image", img.name).Msg("Failed to remove old image") + // } + // rmiCancel() + // } + // } + // + // return nil } func CleanupDanglingImages() error { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() - pruneCmd := exec.CommandContext(ctx, "docker", "image", "prune", "-f") - if err := pruneCmd.Run(); err != nil { + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) + } + + filterArgs := make(client.Filters) + filterArgs.Add("dangling", "true") + + _, err = cli.ImagePrune(ctx, client.ImagePruneOptions{ + Filters: filterArgs, + }) + if err != nil { if ctx.Err() == context.DeadlineExceeded { return fmt.Errorf("pruning images timed out") } @@ -95,14 +174,35 @@ func CleanupDanglingImages() error { } return nil + + // legacy exec method + // + // + // ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + // defer cancel() + // + // pruneCmd := exec.CommandContext(ctx, "docker", "image", "prune", "-f") + // if err := pruneCmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("pruning images timed out") + // } + // return fmt.Errorf("failed to prune dangling images: %w", err) + // } + // + // return nil } func CleanupStoppedContainers() error { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() - pruneCmd := exec.CommandContext(ctx, "docker", "container", "prune", "-f") - if err := pruneCmd.Run(); err != nil { + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) + } + + _, err = cli.ContainerPrune(ctx, client.ContainerPruneOptions{}) + if err != nil { if ctx.Err() == context.DeadlineExceeded { return fmt.Errorf("pruning containers timed out") } @@ -110,36 +210,157 @@ func CleanupStoppedContainers() error { } return nil + + // legacy exec method + // + // + // ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + // defer cancel() + // + // pruneCmd := exec.CommandContext(ctx, "docker", "container", "prune", "-f") + // if err := pruneCmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("pruning containers timed out") + // } + // return fmt.Errorf("failed to prune stopped containers: %w", err) + // } + // + // return nil } func SystemPrune() (string, error) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - pruneCmd := exec.CommandContext(ctx, "docker", "system", "prune", "-f") - output, err := pruneCmd.CombinedOutput() + cli, err := client.New(client.FromEnv) if err != nil { - if ctx.Err() == context.DeadlineExceeded { - return "", fmt.Errorf("system prune timed out") - } - return string(output), fmt.Errorf("failed to run system prune: %w", err) + return "", fmt.Errorf("error creating moby client: %s", err.Error()) + } + + var output string + + // Prune containers + containerReport, err := cli.ContainerPrune(ctx, client.ContainerPruneOptions{}) + if err != nil { + return output, fmt.Errorf("failed to prune containers: %w", err) + } + output += fmt.Sprintf("Deleted Containers: %v\n", containerReport.Report.ContainersDeleted) + output += fmt.Sprintf("Space Reclaimed: %d bytes\n", containerReport.Report.SpaceReclaimed) + + // Prune images (dangling only for basic prune) + filterArgs := make(client.Filters) + filterArgs.Add("dangling", "true") + imageReport, err := cli.ImagePrune(ctx, client.ImagePruneOptions{ + Filters: filterArgs, + }) + if err != nil { + return output, fmt.Errorf("failed to prune images: %w", err) + } + output += fmt.Sprintf("Deleted Images: %v\n", imageReport.Report.ImagesDeleted) + output += fmt.Sprintf("Space Reclaimed: %d bytes\n", imageReport.Report.SpaceReclaimed) + + // Prune networks + networkReport, err := cli.NetworkPrune(ctx, client.NetworkPruneOptions{}) + if err != nil { + return output, fmt.Errorf("failed to prune networks: %w", err) + } + output += fmt.Sprintf("Deleted Networks: %v\n", networkReport.Report.NetworksDeleted) + + // Prune build cache + buildCacheReport, err := cli.BuildCachePrune(ctx, client.BuildCachePruneOptions{}) + if err != nil { + return output, fmt.Errorf("failed to prune build cache: %w", err) } + output += fmt.Sprintf("Space Reclaimed from build cache: %d bytes\n", buildCacheReport.Report.SpaceReclaimed) - return string(output), nil + if ctx.Err() == context.DeadlineExceeded { + return output, fmt.Errorf("system prune timed out") + } + + return output, nil + + // legacy exec method + // + // + // ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + // defer cancel() + // + // pruneCmd := exec.CommandContext(ctx, "docker", "system", "prune", "-f") + // output, err := pruneCmd.CombinedOutput() + // if err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return "", fmt.Errorf("system prune timed out") + // } + // return string(output), fmt.Errorf("failed to run system prune: %w", err) + // } + // + // return string(output), nil } func SystemPruneAll() (string, error) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - pruneCmd := exec.CommandContext(ctx, "docker", "system", "prune", "-a", "-f") - output, err := pruneCmd.CombinedOutput() + cli, err := client.New(client.FromEnv) if err != nil { - if ctx.Err() == context.DeadlineExceeded { - return "", fmt.Errorf("aggressive system prune timed out") - } - return string(output), fmt.Errorf("failed to run aggressive system prune: %w", err) + return "", fmt.Errorf("error creating moby client: %s", err.Error()) + } + + var output string + + // Prune containers + containerReport, err := cli.ContainerPrune(ctx, client.ContainerPruneOptions{}) + if err != nil { + return output, fmt.Errorf("failed to prune containers: %w", err) + } + output += fmt.Sprintf("Deleted Containers: %v\n", containerReport.Report.ContainersDeleted) + output += fmt.Sprintf("Space Reclaimed: %d bytes\n", containerReport.Report.SpaceReclaimed) + + // Prune ALL unused images (not just dangling, equivalent to -a flag) + // By not specifying dangling=true filter, it will prune all unused images + imageReport, err := cli.ImagePrune(ctx, client.ImagePruneOptions{}) + if err != nil { + return output, fmt.Errorf("failed to prune images: %w", err) + } + output += fmt.Sprintf("Deleted Images: %v\n", imageReport.Report.ImagesDeleted) + output += fmt.Sprintf("Space Reclaimed: %d bytes\n", imageReport.Report.SpaceReclaimed) + + // Prune networks + networkReport, err := cli.NetworkPrune(ctx, client.NetworkPruneOptions{}) + if err != nil { + return output, fmt.Errorf("failed to prune networks: %w", err) } + output += fmt.Sprintf("Deleted Networks: %v\n", networkReport.Report.NetworksDeleted) + + // Prune build cache (all) + buildCacheReport, err := cli.BuildCachePrune(ctx, client.BuildCachePruneOptions{ + All: true, + }) + if err != nil { + return output, fmt.Errorf("failed to prune build cache: %w", err) + } + output += fmt.Sprintf("Space Reclaimed from build cache: %d bytes\n", buildCacheReport.Report.SpaceReclaimed) + + if ctx.Err() == context.DeadlineExceeded { + return output, fmt.Errorf("aggressive system prune timed out") + } + + return output, nil - return string(output), nil + // legacy exec method + // + // + // ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + // defer cancel() + // + // pruneCmd := exec.CommandContext(ctx, "docker", "system", "prune", "-a", "-f") + // output, err := pruneCmd.CombinedOutput() + // if err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return "", fmt.Errorf("aggressive system prune timed out") + // } + // return string(output), fmt.Errorf("failed to run aggressive system prune: %w", err) + // } + // + // return string(output), nil } diff --git a/server/docker/container.go b/server/docker/container.go index 0551f2a..5b9a948 100644 --- a/server/docker/container.go +++ b/server/docker/container.go @@ -2,13 +2,517 @@ package docker import ( "context" - "encoding/json" "fmt" - "os/exec" + "net/netip" + "os" "strings" "time" + + "github.com/corecollectives/mist/models" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" + "github.com/moby/moby/client" + "github.com/rs/zerolog/log" ) +func StopContainer(containerName string) error { + if !ContainerExists(containerName) { + return fmt.Errorf("container %s does not exist", containerName) + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error making moby client: %s", err.Error()) + } + _, err = cli.ContainerStop(ctx, containerName, client.ContainerStopOptions{}) + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("docker stop timed out after 2 minutes for container %s", containerName) + } + return fmt.Errorf("failed to stop container: %w", err) + } + return nil + + //legacy exec method + // + // + // cmd := exec.CommandContext(ctx, "docker", "stop", containerName) + // if err := cmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("docker stop timed out after 2 minutes for container %s", containerName) + // } + // return fmt.Errorf("failed to stop container: %w", err) + // } + // + // return nil +} + +func StartContainer(containerName string) error { + if !ContainerExists(containerName) { + return fmt.Errorf("container %s does not exist", containerName) + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) + } + _, err = cli.ContainerStart(ctx, containerName, client.ContainerStartOptions{}) + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("docker start timed out after 1 minute for container %s", containerName) + } + return fmt.Errorf("failed to start container: %w", err) + } + return nil + + // legacy exec method + // + // + // cmd := exec.CommandContext(ctx, "docker", "start", containerName) + // if err := cmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("docker start timed out after 1 minute for container %s", containerName) + // } + // return fmt.Errorf("failed to start container: %w", err) + // } + // + // return nil +} + +func RestartContainer(containerName string) error { + if !ContainerExists(containerName) { + return fmt.Errorf("container %s does not exist", containerName) + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) + } + + _, err = cli.ContainerRestart(ctx, containerName, client.ContainerRestartOptions{}) + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("dockre restart timed out after 3 minuts for container %s", containerName) + } + return fmt.Errorf("failed to restart container: %w", err) + } + return nil + + // legacy exec method + // + // + // cmd := exec.CommandContext(ctx, "docker", "restart", containerName) + // if err := cmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("docker restart timed out after 3 minutes for container %s", containerName) + // } + // return fmt.Errorf("failed to restart container: %w", err) + // } + // + // return nil +} + +func GetContainerLogs(containerName string, tail int) (string, error) { + if !ContainerExists(containerName) { + return "", fmt.Errorf("container %s does not exist", containerName) + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) + if err != nil { + return "", fmt.Errorf("error creating moby client: %s", err.Error()) + } + + tailStr := fmt.Sprintf("%d", tail) + options := client.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Tail: tailStr, + } + + logReader, err := cli.ContainerLogs(ctx, containerName, options) + if err != nil { + return "", fmt.Errorf("failed to get container logs: %w", err) + } + defer logReader.Close() + + var logs strings.Builder + buf := make([]byte, 8192) + for { + n, err := logReader.Read(buf) + if n > 0 { + logs.Write(buf[:n]) + } + if err != nil { + break + } + } + + return logs.String(), nil + + // legacy exec method + // + // + // tailStr := fmt.Sprintf("%d", tail) + // cmd := exec.Command("docker", "logs", "--tail", tailStr, containerName) + // output, err := cmd.CombinedOutput() + // if err != nil { + // return "", fmt.Errorf("failed to get container logs: %w", err) + // } + // + // return string(output), nil +} + +func GetContainerName(appName string, appId int64) string { + return fmt.Sprintf("app-%d", appId) +} + +func StopRemoveContainer(containerName string, logfile *os.File) error { + ifExists := ContainerExists(containerName) + if !ifExists { + return nil + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) + } + _, err = cli.ContainerStop(ctx, containerName, client.ContainerStopOptions{}) + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("docker stop timed out after 2 minutes for container %s", containerName) + } + return fmt.Errorf("failed to stop container %s: %w", containerName, err) + } + + _, err = cli.ContainerRemove(ctx, containerName, client.ContainerRemoveOptions{}) + if err != nil { + return fmt.Errorf("failed to remove container %s: %w", containerName, err) + } + + return nil + + // legacy exec method + // + // + // stopCmd := exec.CommandContext(ctx, "docker", "stop", containerName) + // stopCmd.Stdout = logfile + // stopCmd.Stderr = logfile + // if err := stopCmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("docker stop timed out after 2 minutes for container %s", containerName) + // } + // return fmt.Errorf("failed to stop container %s: %w", containerName, err) + // } + // + // removeCmd := exec.CommandContext(ctx, "docker", "rm", containerName) + // removeCmd.Stdout = logfile + // removeCmd.Stderr = logfile + // if err := removeCmd.Run(); err != nil { + // return fmt.Errorf("failed to remove container %s: %w", containerName, err) + // } + // + // return nil +} +func ContainerExists(name string) bool { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) + if err != nil { + log.Error().Err(err).Msg("failed to create docker client") + return false + } + _, err = cli.ContainerInspect(ctx, name, client.ContainerInspectOptions{}) + if err != nil { + log.Error().Msg("container not found") + return false + } + return true + + // legacy exec method + // + // + // cmd := exec.Command("docker", "inspect", name) + // output, err := cmd.CombinedOutput() + // + // if err != nil { + // if strings.Contains(string(output), "No such object") { + // return false + // } + // return false + // } + // + // return true +} + +func RunContainer(app *models.App, imageTag, containerName string, domains []string, Port int, envVars map[string]string, logfile *os.File) error { + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) + } + var restartPolicy container.RestartPolicyMode + switch string(app.RestartPolicy) { + + case string(models.RestartPolicyNo): + restartPolicy = container.RestartPolicyDisabled + case string(models.RestartPolicyAlways): + restartPolicy = container.RestartPolicyAlways + case string(models.RestartPolicyOnFailure): + restartPolicy = container.RestartPolicyOnFailure + case string(models.RestartPolicyUnlessStopped): + restartPolicy = container.RestartPolicyUnlessStopped + default: + restartPolicy = container.RestartPolicyUnlessStopped + } + + var volumeBinds []string + volumes, err := models.GetVolumesByAppID(app.ID) + if err == nil { + for _, vol := range volumes { + volumeBindArg := fmt.Sprintf("%s:%s", vol.HostPath, vol.ContainerPath) + if vol.ReadOnly { + volumeBindArg += ":ro" + } + volumeBinds = append(volumeBinds, volumeBindArg) + } + } + + var envList []string + for key, value := range envVars { + envList = append(envList, fmt.Sprintf("%s=%s", key, value)) + } + + labels := make(map[string]string) + + networkMode := "" + exposedPorts := make(network.PortSet) + portBindings := make(network.PortMap) + + switch app.AppType { + case models.AppTypeWeb: + if len(domains) > 0 { + networkMode = "traefik-net" + labels["traefik.enable"] = "true" + + var hostRules []string + for _, domain := range domains { + hostRules = append(hostRules, fmt.Sprintf("Host(`%s`)", domain)) + } + hostRule := strings.Join(hostRules, " || ") + + labels[fmt.Sprintf("traefik.http.routers.%s.rule", containerName)] = hostRule + labels[fmt.Sprintf("traefik.http.routers.%s.entrypoints", containerName)] = "websecure" + labels[fmt.Sprintf("traefik.http.routers.%s.tls", containerName)] = "true" + labels[fmt.Sprintf("traefik.http.routers.%s.tls.certresolver", containerName)] = "le" + labels[fmt.Sprintf("traefik.http.services.%s.loadbalancer.server.port", containerName)] = fmt.Sprintf("%d", Port) + + labels[fmt.Sprintf("traefik.http.routers.%s-http.rule", containerName)] = hostRule + labels[fmt.Sprintf("traefik.http.routers.%s-http.entrypoints", containerName)] = "web" + labels[fmt.Sprintf("traefik.http.routers.%s-http.middlewares", containerName)] = fmt.Sprintf("%s-https-redirect", containerName) + labels[fmt.Sprintf("traefik.http.middlewares.%s-https-redirect.redirectscheme.scheme", containerName)] = "https" + } else { + port, err := network.ParsePort(fmt.Sprintf("%d/tcp", Port)) + if err != nil { + return fmt.Errorf("failed to parse port: %w", err) + } + hostIP, err := netip.ParseAddr("0.0.0.0") + if err != nil { + return fmt.Errorf("failed to parse host IP: %w", err) + } + exposedPorts[port] = struct{}{} + portBindings[port] = []network.PortBinding{ + { + HostIP: hostIP, + HostPort: fmt.Sprintf("%d", Port), + }, + } + } + + case models.AppTypeService: + networkMode = "traefik-net" + + case models.AppTypeDatabase: + networkMode = "traefik-net" + + default: + port, err := network.ParsePort(fmt.Sprintf("%d/tcp", Port)) + if err != nil { + return fmt.Errorf("failed to parse port: %w", err) + } + hostIP, err := netip.ParseAddr("0.0.0.0") + if err != nil { + return fmt.Errorf("failed to parse host IP: %w", err) + } + exposedPorts[port] = struct{}{} + portBindings[port] = []network.PortBinding{ + { + HostIP: hostIP, + HostPort: fmt.Sprintf("%d", Port), + }, + } + } + + hostConfig := container.HostConfig{ + RestartPolicy: container.RestartPolicy{ + Name: restartPolicy, + }, + Resources: container.Resources{}, + Binds: volumeBinds, + NetworkMode: container.NetworkMode(networkMode), + PortBindings: portBindings, + } + + config := container.Config{ + Image: imageTag, + Env: envList, + Labels: labels, + ExposedPorts: exposedPorts, + } + + if app.CPULimit != nil && *app.CPULimit > 0 { + hostConfig.Resources.NanoCPUs = int64(*app.CPULimit * 1e9) + } + if app.MemoryLimit != nil && *app.MemoryLimit > 0 { + hostConfig.Resources.Memory = int64(*app.MemoryLimit) * 1024 * 1024 + } + + resp, err := cli.ContainerCreate(ctx, client.ContainerCreateOptions{ + Name: containerName, + Config: &config, + HostConfig: &hostConfig, + }) + if err != nil { + return fmt.Errorf("failed to create container: %w", err) + } + + _, err = cli.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{}) + if err != nil { + return fmt.Errorf("failed to start container: %w", err) + } + + return nil + + // legacey exec method + // + // + // runArgs := []string{ + // "run", "-d", + // "--name", containerName, + // } + + // restartPolicy := string(app.RestartPolicy) + // if restartPolicy == "" { + // restartPolicy = "unless-stopped" + // } + // runArgs = append(runArgs, "--restart", restartPolicy) + // + // if app.CPULimit != nil && *app.CPULimit > 0 { + // runArgs = append(runArgs, "--cpus", fmt.Sprintf("%.2f", *app.CPULimit)) + // } + // + // if app.MemoryLimit != nil && *app.MemoryLimit > 0 { + // runArgs = append(runArgs, "-m", fmt.Sprintf("%dm", *app.MemoryLimit)) + // } + // + // // Add volumes from the volumes table (user-configurable) + // volumes, err := models.GetVolumesByAppID(app.ID) + // if err == nil { + // for _, vol := range volumes { + // volumeArg := fmt.Sprintf("%s:%s", vol.HostPath, vol.ContainerPath) + // if vol.ReadOnly { + // volumeArg += ":ro" + // } + // runArgs = append(runArgs, "-v", volumeArg) + // } + // } + // + // for key, value := range envVars { + // runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", key, value)) + // } + // + // switch app.AppType { + // case models.AppTypeWeb: + // if len(domains) > 0 { + // runArgs = append(runArgs, + // "--network", "traefik-net", + // "-l", "traefik.enable=true", + // ) + // + // var hostRules []string + // for _, domain := range domains { + // hostRules = append(hostRules, fmt.Sprintf("Host(`%s`)", domain)) + // } + // hostRule := strings.Join(hostRules, " || ") + // + // runArgs = append(runArgs, + // "-l", fmt.Sprintf("traefik.http.routers.%s.rule=%s", containerName, hostRule), + // "-l", fmt.Sprintf("traefik.http.routers.%s.entrypoints=websecure", containerName), + // "-l", fmt.Sprintf("traefik.http.routers.%s.tls=true", containerName), + // "-l", fmt.Sprintf("traefik.http.routers.%s.tls.certresolver=le", containerName), + // "-l", fmt.Sprintf("traefik.http.services.%s.loadbalancer.server.port=%d", containerName, Port), + // ) + // + // runArgs = append(runArgs, + // + // "-l", fmt.Sprintf("traefik.http.routers.%s-http.rule=%s", containerName, hostRule), + // "-l", fmt.Sprintf("traefik.http.routers.%s-http.entrypoints=web", containerName), + // "-l", fmt.Sprintf("traefik.http.routers.%s-http.middlewares=%s-https-redirect", containerName, containerName), + // + // "-l", fmt.Sprintf("traefik.http.middlewares.%s-https-redirect.redirectscheme.scheme=https", containerName), + // ) + // } else { + // runArgs = append(runArgs, + // "-p", fmt.Sprintf("%d:%d", Port, Port), + // ) + // } + // + // case models.AppTypeService: + // runArgs = append(runArgs, "--network", "traefik-net") + // + // case models.AppTypeDatabase: + // runArgs = append(runArgs, "--network", "traefik-net") + // + // default: + // runArgs = append(runArgs, + // "-p", fmt.Sprintf("%d:%d", Port, Port), + // ) + // } + // + // runArgs = append(runArgs, imageTag) + // + // // ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + // // defer cancel() + // + // cmd := exec.CommandContext(ctx, "docker", runArgs...) + // cmd.Stdout = logfile + // cmd.Stderr = logfile + // + // if err := cmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("docker run timed out after 5 minutes") + // } + // exitCode := -1 + // if exitErr, ok := err.(*exec.ExitError); ok { + // exitCode = exitErr.ExitCode() + // } + // return fmt.Errorf("docker run failed with exit code %d: %w", exitCode, err) + // } + // + // return nil +} + type ContainerStatus struct { Name string `json:"name"` Status string `json:"status"` @@ -28,130 +532,163 @@ func GetContainerStatus(containerName string) (*ContainerStatus, error) { }, nil } - cmd := exec.Command("docker", "inspect", containerName, "--format", "{{json .}}") - output, err := cmd.Output() + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) if err != nil { - return nil, fmt.Errorf("failed to inspect container: %w", err) + return nil, fmt.Errorf("error creating moby client: %s", err.Error()) } - var inspectData struct { - State struct { - Status string `json:"Status"` - Running bool `json:"Running"` - Paused bool `json:"Paused"` - Health *struct { - Status string `json:"Status"` - } `json:"Health"` - } `json:"State"` - Name string `json:"Name"` + inspectResult, err := cli.ContainerInspect(ctx, containerName, client.ContainerInspectOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to inspect container: %w", err) } - if err := json.Unmarshal(output, &inspectData); err != nil { - return nil, fmt.Errorf("failed to parse inspect output: %w", err) - } + inspectData := inspectResult.Container - uptimeCmd := exec.Command("docker", "inspect", containerName, "--format", "{{.State.StartedAt}}") - uptimeOutput, err := uptimeCmd.Output() uptime := "N/A" - if err == nil { - uptime = strings.TrimSpace(string(uptimeOutput)) + if inspectData.State != nil && inspectData.State.StartedAt != "" { + uptime = inspectData.State.StartedAt } state := "stopped" - if inspectData.State.Running { - state = "running" - } else if inspectData.State.Status == "exited" { - state = "stopped" - } else { - state = inspectData.State.Status + status := "" + if inspectData.State != nil { + status = string(inspectData.State.Status) + if inspectData.State.Running { + state = "running" + } else if inspectData.State.Status == "exited" { + state = "stopped" + } else { + state = string(inspectData.State.Status) + } } healthy := true - if inspectData.State.Health != nil { + if inspectData.State != nil && inspectData.State.Health != nil { healthy = inspectData.State.Health.Status == "healthy" } return &ContainerStatus{ Name: strings.TrimPrefix(inspectData.Name, "/"), - Status: inspectData.State.Status, + Status: status, State: state, Uptime: uptime, Healthy: healthy, }, nil -} - -func StopContainer(containerName string) error { - if !ContainerExists(containerName) { - return fmt.Errorf("container %s does not exist", containerName) - } - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancel() - - cmd := exec.CommandContext(ctx, "docker", "stop", containerName) - if err := cmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return fmt.Errorf("docker stop timed out after 2 minutes for container %s", containerName) - } - return fmt.Errorf("failed to stop container: %w", err) - } - - return nil + // legacy exec method + // + // + // cmd := exec.Command("docker", "inspect", containerName, "--format", "{{json .}}") + // output, err := cmd.Output() + // if err != nil { + // return nil, fmt.Errorf("failed to inspect container: %w", err) + // } + // + // var inspectData struct { + // State struct { + // Status string `json:"Status"` + // Running bool `json:"Running"` + // Paused bool `json:"Paused"` + // Health *struct { + // Status string `json:"Status"` + // } `json:"Health"` + // } `json:"State"` + // Name string `json:"Name"` + // } + // + // if err := json.Unmarshal(output, &inspectData); err != nil { + // return nil, fmt.Errorf("failed to parse inspect output: %w", err) + // } + // + // uptimeCmd := exec.Command("docker", "inspect", containerName, "--format", "{{.State.StartedAt}}") + // uptimeOutput, err := uptimeCmd.Output() + // uptime := "N/A" + // if err == nil { + // uptime = strings.TrimSpace(string(uptimeOutput)) + // } + // + // state := "stopped" + // if inspectData.State.Running { + // state = "running" + // } else if inspectData.State.Status == "exited" { + // state = "stopped" + // } else { + // state = inspectData.State.Status + // } + // + // healthy := true + // if inspectData.State.Health != nil { + // healthy = inspectData.State.Health.Status == "healthy" + // } + // + // return &ContainerStatus{ + // Name: strings.TrimPrefix(inspectData.Name, "/"), + // Status: inspectData.State.Status, + // State: state, + // Uptime: uptime, + // Healthy: healthy, + // }, nil } -func StartContainer(containerName string) error { +func RecreateContainer(app *models.App) error { + containerName := GetContainerName(app.Name, app.ID) + if !ContainerExists(containerName) { return fmt.Errorf("container %s does not exist", containerName) } ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() - - cmd := exec.CommandContext(ctx, "docker", "start", containerName) - if err := cmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return fmt.Errorf("docker start timed out after 1 minute for container %s", containerName) - } - return fmt.Errorf("failed to start container: %w", err) + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error creating moby client: %s", err.Error()) } - return nil -} - -func RestartContainer(containerName string) error { - if !ContainerExists(containerName) { - return fmt.Errorf("container %s does not exist", containerName) + inspectResult, err := cli.ContainerInspect(ctx, containerName, client.ContainerInspectOptions{}) + if err != nil { + return fmt.Errorf("failed to get container image: %w", err) } + imageTag := inspectResult.Container.Image - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) - defer cancel() - - cmd := exec.CommandContext(ctx, "docker", "restart", containerName) - if err := cmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return fmt.Errorf("docker restart timed out after 3 minutes for container %s", containerName) - } - return fmt.Errorf("failed to restart container: %w", err) + port, domains, envVars, err := GetDeploymentConfigForApp(app) + if err != nil { + return fmt.Errorf("failed to get deployment configuration: %w", err) } - return nil -} - -func GetContainerLogs(containerName string, tail int) (string, error) { - if !ContainerExists(containerName) { - return "", fmt.Errorf("container %s does not exist", containerName) + if err := StopRemoveContainer(containerName, nil); err != nil { + return fmt.Errorf("failed to stop/remove container: %w", err) } - tailStr := fmt.Sprintf("%d", tail) - cmd := exec.Command("docker", "logs", "--tail", tailStr, containerName) - output, err := cmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("failed to get container logs: %w", err) + if err := RunContainer(app, imageTag, containerName, domains, port, envVars, nil); err != nil { + return fmt.Errorf("failed to run container: %w", err) } - return string(output), nil -} + return nil -func GetContainerName(appName string, appId int64) string { - return fmt.Sprintf("app-%d", appId) + // legacy exec method + // + // + // cmd := exec.Command("docker", "inspect", containerName, "--format", "{{.Config.Image}}") + // output, err := cmd.Output() + // if err != nil { + // return fmt.Errorf("failed to get container image: %w", err) + // } + // imageTag := strings.TrimSpace(string(output)) + // + // port, domains, envVars, err := GetDeploymentConfigForApp(app) + // if err != nil { + // return fmt.Errorf("failed to get deployment configuration: %w", err) + // } + // + // if err := StopRemoveContainer(containerName, nil); err != nil { + // return fmt.Errorf("failed to stop/remove container: %w", err) + // } + // + // if err := RunContainer(app, imageTag, containerName, domains, port, envVars, nil); err != nil { + // return fmt.Errorf("failed to run container: %w", err) + // } + // + // return nil } diff --git a/server/docker/image.go b/server/docker/image.go new file mode 100644 index 0000000..cff75f9 --- /dev/null +++ b/server/docker/image.go @@ -0,0 +1,133 @@ +package docker + +import ( + "context" + "fmt" + "io" + "os" + "time" + + "github.com/moby/go-archive" + "github.com/moby/moby/client" + "github.com/rs/zerolog/log" +) + +func BuildImage(imageTag, contextPath string, envVars map[string]string, logfile *os.File) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) + defer cancel() + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error opening moby client: %s", err.Error()) + } + buildCtx, err := archive.TarWithOptions(contextPath, &archive.TarOptions{ + ExcludePatterns: []string{}, + }) + + if err != nil { + return fmt.Errorf("error building build Context") + } + var tags []string + tags = append(tags, imageTag) + env := make(map[string]*string) + for k, v := range envVars { + val := v + env[k] = &val + } + buildOptions := client.ImageBuildOptions{ + Tags: tags, + Remove: true, + BuildArgs: env, + } + + log.Info().Str("image_tag", imageTag).Msg("Building Docker image") + + resp, err := cli.ImageBuild(ctx, buildCtx, buildOptions) + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("image build timed out after 15 minutes") + } + return err + } + defer resp.Body.Close() + _, err = io.Copy(logfile, resp.Body) + if err != nil { + return err + } + return nil + + // legacy exec method + // + // + // buildArgs := []string{"build", "-t", imageTag} + // + // for key, value := range envVars { + // buildArgs = append(buildArgs, "--build-arg", fmt.Sprintf("%s=%s", key, value)) + // } + // + // buildArgs = append(buildArgs, contextPath) + // + // log.Debug().Strs("build_args", buildArgs).Str("image_tag", imageTag).Msg("Building Docker image") + // + // ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) + // defer cancel() + // + // cmd := exec.CommandContext(ctx, "docker", buildArgs...) + // cmd.Stdout = logfile + // cmd.Stderr = logfile + // + // if err := cmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("docker build timed out after 15 minutes") + // } + // exitCode := -1 + // if exitErr, ok := err.(*exec.ExitError); ok { + // exitCode = exitErr.ExitCode() + // } + // return fmt.Errorf("docker build failed with exit code %d: %w", exitCode, err) + // } + // return nil +} + +func PullDockerImage(imageName string, logfile *os.File) error { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) + defer cancel() + + cli, err := client.New(client.FromEnv) + if err != nil { + return fmt.Errorf("error opening moby client: %s", err.Error()) + } + + log.Debug().Str("image_name", imageName).Msg("pulling image") + resp, err := cli.ImagePull(ctx, imageName, client.ImagePullOptions{}) + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("image pull timed out after 15 minutes") + } + return err + } + defer resp.Close() + _, err = io.Copy(logfile, resp) + if err != nil { + return err + } + return nil + + // legacy exec method + // + // + // pullCmd := exec.CommandContext(ctx, "docker", "pull", imageName) + // pullCmd.Stdout = logfile + // pullCmd.Stderr = logfile + // + // if err := pullCmd.Run(); err != nil { + // if ctx.Err() == context.DeadlineExceeded { + // return fmt.Errorf("docker pull timed out after 15 minutes for image %s", imageName) + // } + // exitCode := -1 + // if exitErr, ok := err.(*exec.ExitError); ok { + // exitCode = exitErr.ExitCode() + // } + // return fmt.Errorf("docker pull failed with exit code %d: %w", exitCode, err) + // } + // return nil +} diff --git a/server/docker/runtime.go b/server/docker/runtime.go new file mode 100644 index 0000000..1cdc3ff --- /dev/null +++ b/server/docker/runtime.go @@ -0,0 +1 @@ +package docker diff --git a/server/docker/utils.go b/server/docker/utils.go new file mode 100644 index 0000000..3ff601e --- /dev/null +++ b/server/docker/utils.go @@ -0,0 +1,36 @@ +package docker + +import ( + "fmt" + + "github.com/corecollectives/mist/models" +) + +func GetDeploymentConfigForApp(app *models.App) (int, []string, map[string]string, error) { + port := 3000 + if app.Port != nil { + port = int(*app.Port) + } + + domains, err := models.GetDomainsByAppID(app.ID) + if err != nil && err.Error() != "sql: no rows in result set" { + return 0, nil, nil, fmt.Errorf("get domains failed: %w", err) + } + + var domainStrings []string + for _, d := range domains { + domainStrings = append(domainStrings, d.Domain) + } + + envs, err := models.GetEnvVariablesByAppID(app.ID) + if err != nil && err.Error() != "sql: no rows in result set" { + return 0, nil, nil, fmt.Errorf("get env variables failed: %w", err) + } + + envMap := make(map[string]string) + for _, env := range envs { + envMap[env.Key] = env.Value + } + + return port, domainStrings, envMap, nil +} diff --git a/server/go.mod b/server/go.mod index 735abfe..f09e037 100644 --- a/server/go.mod +++ b/server/go.mod @@ -18,6 +18,7 @@ require ( github.com/cloudflare/circl v1.6.1 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-connections v0.6.0 // indirect @@ -30,16 +31,23 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/klauspost/compress v1.18.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/kevinburke/ssh_config v1.4.0 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.2.0 // indirect github.com/moby/moby/api v1.52.0 // indirect github.com/moby/moby/client v0.2.1 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/pjbgf/sha1cd v0.5.0 // indirect github.com/sergi/go-diff v1.4.0 // indirect github.com/stretchr/testify v1.11.1 // indirect diff --git a/server/go.sum b/server/go.sum index 20093ef..00b31ba 100644 --- a/server/go.sum +++ b/server/go.sum @@ -8,6 +8,8 @@ github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= @@ -44,6 +46,8 @@ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8J github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ= github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= @@ -60,10 +64,20 @@ github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuE github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= +github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg= github.com/moby/moby/api v1.52.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= github.com/moby/moby/client v0.2.1 h1:1Grh1552mvv6i+sYOdY+xKKVTvzJegcVMhuXocyDz/k= github.com/moby/moby/client v0.2.1/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -80,6 +94,10 @@ github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= @@ -107,6 +125,7 @@ golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -116,6 +135,7 @@ golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/server/utils/traefik.go b/server/utils/traefik.go index 689bb80..563d8d9 100644 --- a/server/utils/traefik.go +++ b/server/utils/traefik.go @@ -176,6 +176,7 @@ func ChangeLetsEncryptEmail(email string) error { func RestartTraefik() error { log.Info().Msg("Restarting Traefik container...") + // NOTE: we still use the exec method here because moby doesn't support docker-compose for now cmd := exec.Command("docker", "compose", "-f", "/opt/mist/traefik-compose.yml", "restart", "traefik") output, err := cmd.CombinedOutput() diff --git a/server/websockets/containerLogs.go b/server/websockets/containerLogs.go index f3b0c5c..cace404 100644 --- a/server/websockets/containerLogs.go +++ b/server/websockets/containerLogs.go @@ -1,17 +1,18 @@ package websockets import ( - "bufio" "context" + "encoding/binary" "fmt" + "io" "net/http" - "os/exec" "strconv" "time" "github.com/corecollectives/mist/docker" "github.com/corecollectives/mist/models" "github.com/gorilla/websocket" + "github.com/moby/moby/client" "github.com/rs/zerolog/log" ) @@ -104,43 +105,97 @@ func ContainerLogsHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - logChan := make(chan string, 100) + type logMessage struct { + line string + streamType string + } + + logChan := make(chan logMessage, 100) errChan := make(chan error, 1) go func() { defer close(logChan) - cmd := exec.CommandContext(ctx, "sh", "-c", - fmt.Sprintf("docker logs -f --tail 100 %s 2>&1", containerName)) - - stdout, err := cmd.StdoutPipe() + cli, err := client.New(client.FromEnv) if err != nil { - errChan <- fmt.Errorf("failed to create stdout pipe: %w", err) + errChan <- fmt.Errorf("failed to create docker client: %w", err) return } + defer cli.Close() + + options := client.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + Tail: "100", + Timestamps: false, + } - if err := cmd.Start(); err != nil { - errChan <- fmt.Errorf("failed to start docker logs: %w", err) + logReader, err := cli.ContainerLogs(ctx, containerName, options) + if err != nil { + errChan <- fmt.Errorf("failed to get container logs: %w", err) return } + defer logReader.Close() - scanner := bufio.NewScanner(stdout) - scanner.Buffer(make([]byte, 64*1024), 1024*1024) - - for scanner.Scan() { - line := scanner.Text() + buf := make([]byte, 8) + for { select { case <-ctx.Done(): return - case logChan <- line: + default: } - } - if err := scanner.Err(); err != nil { - errChan <- fmt.Errorf("scanner error: %w", err) - } + _, err := io.ReadFull(logReader, buf) + if err != nil { + if err == io.EOF { + return + } + errChan <- fmt.Errorf("failed to read log header: %w", err) + return + } - cmd.Wait() + streamType := "stdout" + if buf[0] == 2 { + streamType = "stderr" + } + + payloadSize := binary.BigEndian.Uint32(buf[4:8]) + + payload := make([]byte, payloadSize) + _, err = io.ReadFull(logReader, payload) + if err != nil { + if err == io.EOF { + return + } + errChan <- fmt.Errorf("failed to read log payload: %w", err) + return + } + + lines := string(payload) + currentLine := "" + for i := 0; i < len(lines); i++ { + if lines[i] == '\n' { + if currentLine != "" { + select { + case <-ctx.Done(): + return + case logChan <- logMessage{line: currentLine, streamType: streamType}: + } + } + currentLine = "" + } else { + currentLine += string(lines[i]) + } + } + if currentLine != "" { + select { + case <-ctx.Done(): + return + case logChan <- logMessage{line: currentLine, streamType: streamType}: + } + } + } }() go func() { @@ -190,7 +245,7 @@ func ContainerLogsHandler(w http.ResponseWriter, r *http.Request) { }) return - case line, ok := <-logChan: + case msg, ok := <-logChan: if !ok { conn.WriteJSON(ContainerLogsEvent{ Type: "end", @@ -206,7 +261,8 @@ func ContainerLogsHandler(w http.ResponseWriter, r *http.Request) { Type: "log", Timestamp: time.Now().Format(time.RFC3339), Data: map[string]interface{}{ - "line": line, + "line": msg.line, + "stream": msg.streamType, }, } diff --git a/server/websockets/logWatcher.go b/server/websockets/logWatcher.go index a116631..bf3f824 100644 --- a/server/websockets/logWatcher.go +++ b/server/websockets/logWatcher.go @@ -3,11 +3,24 @@ package websockets import ( "bufio" "context" + "encoding/json" "io" "os" + "strings" "time" ) +type DockerLogEntry struct { + Stream string `json:"stream"` + Aux map[string]interface{} `json:"aux,omitempty"` + Error string `json:"error,omitempty"` + ErrorDetail map[string]interface{} `json:"errorDetail,omitempty"` + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + Progress string `json:"progress,omitempty"` + ProgressDetail map[string]interface{} `json:"progressDetail,omitempty"` +} + func WatcherLogs(ctx context.Context, filePath string, send chan<- string) error { file, err := os.Open(filePath) if err != nil { @@ -41,8 +54,54 @@ func WatcherLogs(ctx context.Context, filePath string, send chan<- string) error } if len(line) > 0 { - send <- line + processedLine := parseDockerLog(line) + if processedLine != "" { + send <- processedLine + } + } + } + } +} + +func parseDockerLog(line string) string { + trimmed := strings.TrimSpace(line) + if !strings.HasPrefix(trimmed, "{") { + return line + } + + var dockerLog DockerLogEntry + if err := json.Unmarshal([]byte(trimmed), &dockerLog); err != nil { + return line + } + + if dockerLog.Error != "" { + return dockerLog.Error + } + + if dockerLog.Stream != "" { + return dockerLog.Stream + } + + if dockerLog.Status != "" { + switch dockerLog.Status { + case "Downloading", "Extracting", "Waiting", "Verifying Checksum": + return "" + case "Pull complete", "Download complete", "Already exists": + if dockerLog.ID != "" { + return dockerLog.Status + ": " + dockerLog.ID } + return dockerLog.Status + default: + if dockerLog.ID != "" { + return dockerLog.Status + ": " + dockerLog.ID + } + return dockerLog.Status } } + + if dockerLog.Aux != nil { + return "" + } + + return line } diff --git a/server/websockets/statusWatcher.go b/server/websockets/statusWatcher.go index bbb68aa..8e517e4 100644 --- a/server/websockets/statusWatcher.go +++ b/server/websockets/statusWatcher.go @@ -2,6 +2,7 @@ package websockets import ( "context" + "strings" "time" "github.com/corecollectives/mist/models" @@ -9,7 +10,7 @@ import ( ) type DeploymentEvent struct { - Type string `json:"type"` // "log", "status", "progress", "error" + Type string `json:"type"` Timestamp time.Time `json:"timestamp"` Data interface{} `json:"data"` } @@ -25,9 +26,43 @@ type StatusUpdate struct { type LogUpdate struct { Line string `json:"line"` + Stream string `json:"stream,omitempty"` Timestamp time.Time `json:"timestamp"` } +func DetectStreamType(line string) string { + lineLower := strings.ToLower(line) + + stderrPatterns := []string{ + "error:", + "err:", + "fatal:", + "panic:", + "warning:", + "warn:", + "failed", + "failure", + "exception:", + "traceback", + "stack trace", + " err ", + "[error]", + "[err]", + "[fatal]", + "[panic]", + "[warning]", + "[warn]", + } + + for _, pattern := range stderrPatterns { + if strings.Contains(lineLower, pattern) { + return "stderr" + } + } + + return "stdout" +} + func WatchDeploymentStatus(ctx context.Context, depID int64, events chan<- DeploymentEvent) { ticker := time.NewTicker(500 * time.Millisecond) defer ticker.Stop()