Skip to content
This repository was archived by the owner on Apr 12, 2022. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 61 additions & 51 deletions pkg/firmament/resource_desc.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions pkg/firmament/resource_desc.proto
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,6 @@ message ResourceDescriptor {
uint64 trace_machine_id = 21;
// Resource labels
repeated Label labels = 32;
// Max pods allowed per node
uint64 max_pods = 33;
}
3 changes: 3 additions & 0 deletions pkg/k8sclient/nodewatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ func (nw *NodeWatcher) parseNode(node *v1.Node, phase NodePhase) *Node {
memCap, _ := memCapQuantity.AsInt64()
memAllocQuantity := node.Status.Allocatable[v1.ResourceMemory]
memAlloc, _ := memAllocQuantity.AsInt64()
podAllocQuantity := node.Status.Allocatable[v1.ResourcePods]
return &Node{
Hostname: node.Name,
Phase: phase,
Expand All @@ -115,6 +116,7 @@ func (nw *NodeWatcher) parseNode(node *v1.Node, phase NodePhase) *Node {
CPUAllocatable: cpuAllocQuantity.MilliValue(),
MemCapacityKb: memCap / bytesToKb,
MemAllocatableKb: memAlloc / bytesToKb,
PodAllocatable: podAllocQuantity.Value(),
Labels: node.Labels,
Annotations: node.Annotations,
}
Expand Down Expand Up @@ -302,6 +304,7 @@ func (nw *NodeWatcher) createResourceTopologyForNode(node *Node) *firmament.Reso
RamCap: uint64(node.MemCapacityKb),
CpuCores: float32(node.CPUCapacity),
},
MaxPods: uint64(node.PodAllocatable),
},
}
ResIDToNode[resUUID] = node.Hostname
Expand Down
1 change: 1 addition & 0 deletions pkg/k8sclient/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ type Node struct {
CPUAllocatable int64
MemCapacityKb int64
MemAllocatableKb int64
PodAllocatable int64
Labels map[string]string
Annotations map[string]string
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ func (f *Framework) AfterEach() {
f.FetchLogsFromFirmament(f.TestingNS)
f.FetchLogsFromPoseidon(f.TestingNS)
Logf("Delete namespace called")
err = f.deleteNamespace(f.TestingNS)
err = f.DeleteNamespace(f.TestingNS)
Expect(err).NotTo(HaveOccurred())

err = f.DeleteDeploymentIfExist(f.TestingNS, poseidonDeploymentName)
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/framework/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,14 +62,14 @@ func (f *Framework) deleteNamespaceIfExist(nsName string) error {
} else {
//delete the namespace as it exist
Logf("Deleting %v namespace as it exists", nsName)
if err = f.deleteNamespace(nsName); err != nil {
if err = f.DeleteNamespace(nsName); err != nil {
return fmt.Errorf("unable to delete %v namespace, error: %v occurred", nsName, err)
}
}
return nil
}

func (f *Framework) deleteNamespace(nsName string) error {
func (f *Framework) DeleteNamespace(nsName string) error {

if err := f.ClientSet.CoreV1().Namespaces().Delete(nsName, nil); err != nil {
return err
Expand Down
88 changes: 87 additions & 1 deletion test/e2e/poseidon_integration.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ import (
"fmt"
"math/rand"
"os"
"strconv"
"time"

"github.com/golang/glog"
"github.com/kubernetes-sigs/poseidon/test/e2e/framework"
Expand All @@ -32,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"time"
)

var _ = Describe("Poseidon", func() {
Expand Down Expand Up @@ -483,6 +484,91 @@ var _ = Describe("Poseidon", func() {
Expect(err).To(HaveOccurred())
})

// Test whether the kubelet flag max_pods works
// Currently the test uses the node's default max_pods value, i.e., we don't manually set the kubelet flag max_pods;
// And only test on one of the nodes.
It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {

nodeList, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
maxPodNamespace := "max-pods-test"
_, err = clientset.CoreV1().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: maxPodNamespace},
})
if err != nil {
glog.Errorf("unable to create namespace %s, error: %v occurred", maxPodNamespace, err)
}
for idx, node := range nodeList.Items {
framework.Logf("Node: %v", node.Name)
podAlloc, found := node.Status.Allocatable[v1.ResourcePods]
Expect(found).To(Equal(true))
podList, err := clientset.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
podCount := podList.Size()

name := "max-pods-" + strconv.Itoa(idx)
var replicas int32
replicas = int32(podAlloc.Value())
if podCount < 1 {
replicas += 5
}
_, err = clientset.ExtensionsV1beta1().Deployments(maxPodNamespace).Create(&v1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "nginx"},
Name: name,
},
Spec: v1beta1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "nginx", "name": "test-max-pods"},
},
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": "test-max-pods", "app": "nginx", "schedulerName": "poseidon"},
Name: name,
},
Spec: v1.PodSpec{
SchedulerName: "poseidon",
Containers: []v1.Container{
{
Name: fmt.Sprintf("container-%s", name),
Image: "nginx:latest",
ImagePullPolicy: "IfNotPresent",
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
// time for pods to be Running
time.Sleep(3 * time.Minute)
podList, err = clientset.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var podRunningCount, podPendingCount, podFailedCount int64 = 0, 0, 0
for _, pod := range podList.Items {
if pod.Status.Phase == v1.PodRunning {
podRunningCount++
} else if pod.Status.Phase == v1.PodPending {
podPendingCount++
} else if pod.Status.Phase == v1.PodFailed {
podFailedCount++
}
}
Expect(podRunningCount == podAlloc.Value()).To(Equal(true))
glog.Errorf("Pods: %d Running, %d Pening, %d Failed", podRunningCount, podPendingCount, podFailedCount)

framework.Logf("Time to clean up the ns [%s] now...", maxPodNamespace)
if err = f.DeleteNamespace(maxPodNamespace); err != nil {
glog.Errorf("unable to delete %v namespace, error: %v occurred", maxPodNamespace, err)
}
// time for Terminating pods to disappear
// TODO: deal with the deletion better. Tried deleting the deployment but didn't work well.
time.Sleep(1 * time.Minute)
// Test one node for now since the test takes much time.
break
}
})
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// nonempty Selector set.
/*
Expand Down