diff --git a/controllers/reconcile.go b/controllers/reconcile.go index 74df20f2..2f0a170c 100644 --- a/controllers/reconcile.go +++ b/controllers/reconcile.go @@ -82,6 +82,10 @@ func (r *FrontendReconciliation) run() error { } } + if err := r.createUploadAssetsJob(); err != nil { + return err + } + if err := r.createFrontendIngress(); err != nil { return err } @@ -686,6 +690,73 @@ func (r *FrontendReconciliation) manageExistingJob() (bool, error) { return true, nil } +func (r *FrontendReconciliation) populateAssetUploadContainer(j *batchv1.Job) error { + // get minio pod info for ephemeral + nn := types.NamespacedName{ + Name: "minio", + Namespace: r.Frontend.Namespace, + } + minioPod := &v1.Pod{} + err := r.Client.Get(r.Ctx, nn, minioPod) + if err != nil { + return err + } + // here we will need the arguments for the upload job like s3 secrets bucket names, etc + frontendPaths := strings.Join(r.Frontend.Spec.Frontend.Paths[:], ",") + uploadCommand := fmt.Sprintf(`echo "uploading assets to %s"`, frontendPaths) + if r.Frontend.Spec.Image != "" { + assetUploadContainer := v1.Container{ + Name: "s3-asset-upload", + // we should have the valpop cli ready in the frontend container + Image: r.Frontend.Spec.Image, + // Run the upload script + Command: []string{"/bin/bash", "-c", uploadCommand}, + } + // add the container to the spec containers + j.Spec.Template.Spec.Containers = []v1.Container{assetUploadContainer} + + } + return nil +} + +func (r *FrontendReconciliation) createUploadAssetsJob() error { + jobName := r.Frontend.Name + "-upload-assets" + + nn := types.NamespacedName{ + Name: jobName, + Namespace: r.Frontend.Namespace, + } + + j := &batchv1.Job{} + j.SetName(jobName) + j.SetNamespace(r.Frontend.Namespace) + labels := r.Frontend.GetLabels() + labeler := utils.GetCustomLabeler(labels, nn, r.Frontend) + labeler(j) + + j.SetOwnerReferences([]metav1.OwnerReference{r.Frontend.MakeOwnerReference()}) + + j.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyNever + + j.Spec.Completions = utils.Int32Ptr(1) + + // Set the image frontend image annotation + annotations := j.Spec.Template.ObjectMeta.Annotations + if annotations == nil { + annotations = make(map[string]string) + } + annotations["frontend-image"] = r.Frontend.Spec.Image + annotations["kube-linter.io/ignore-all"] = "we don't need no any checking" + + j.Spec.Template.ObjectMeta.SetAnnotations(annotations) + + err := r.populateAssetUploadContainer(j) + if err != nil { + return err + } + return r.Client.Create(r.Ctx, j) +} + // createOrUpdateCacheBustJob will create a new job if it doesn't exist // If it does exist and is from the current frontend image it will return // If it does exist and is not from the current frontend image it will delete it and create a new one diff --git a/examples/landing.yaml b/examples/landing.yaml index cca9c453..fe338071 100644 --- a/examples/landing.yaml +++ b/examples/landing.yaml @@ -14,7 +14,7 @@ spec: frontend: paths: - /apps/landing - image: quay.io/redhat-cloud-services/landing + image: quay.io/redhat-services-prod/rh-platform-experien-tenant/landing-page-frontend:ee90e57 module: manifestLocation: "/apps/landing/fed-mods.json" modules: diff --git a/examples/minio.yaml b/examples/minio.yaml new file mode 100644 index 00000000..132037b0 --- /dev/null +++ b/examples/minio.yaml @@ -0,0 +1,33 @@ + +# Deploys a new MinIO Pod into the metadata.namespace Kubernetes namespace +# +# The `spec.containers[0].args` contains the command run on the pod +# The `/data` directory corresponds to the `spec.containers[0].volumeMounts[0].mountPath` +# That mount path corresponds to a Kubernetes HostPath which binds `/data` to a local drive or volume on the worker node where the pod runs +# +apiVersion: v1 +kind: Pod +metadata: + labels: + app: minio + name: minio + namespace: default # Change this value to match the namespace metadata.name +spec: + containers: + - name: minio + image: quay.io/minio/minio:latest + command: + - /bin/bash + - -c + args: + - minio server /data --console-address :9090 + volumeMounts: + - mountPath: /data + name: localvolume # Corresponds to the `spec.volumes` Persistent Volume + # nodeSelector: + # kubernetes.io/hostname: kubealpha.local # Specify a node label associated to the Worker Node on which you want to deploy the pod. + volumes: + - name: localvolume + hostPath: # MinIO generally recommends using locally-attached volumes + path: /mnt/disk1/data # Specify a path to a local drive or volume on the Kubernetes worker node + type: DirectoryOrCreate # The path to the last directory must exist \ No newline at end of file