Skip to content
4 changes: 3 additions & 1 deletion pkg/scheduler/api/resource_info/resource_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,9 @@ func ResourceFromResourceList(rList v1.ResourceList) *Resource {
default:
if IsMigResource(rName) {
r.scalarResources[rName] += rQuant.Value()
} else if k8s_internal.IsScalarResourceName(rName) || rName == v1.ResourceEphemeralStorage || rName == v1.ResourceStorage {
} else if rName == v1.ResourceEphemeralStorage || rName == v1.ResourceStorage {
r.scalarResources[rName] += rQuant.Value()
} else if k8s_internal.IsScalarResourceName(rName) {
r.scalarResources[rName] += rQuant.MilliValue()
}
}
Expand Down
9 changes: 7 additions & 2 deletions pkg/scheduler/api/resource_info/resource_requirment.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,10 @@ func RequirementsFromResourceList(rl v1.ResourceList) *ResourceRequirements {
default:
if IsMigResource(rName) {
r.MigResources()[rName] += rQuant.Value()
} else if k8s_internal.IsScalarResourceName(rName) || rName == v1.ResourceEphemeralStorage || rName == v1.ResourceStorage {
} else if k8s_internal.IsScalarResourceName(rName) {
r.scalarResources[rName] += rQuant.MilliValue()
} else if rName == v1.ResourceEphemeralStorage || rName == v1.ResourceStorage {
r.scalarResources[rName] += rQuant.Value()
}
}
}
Expand Down Expand Up @@ -151,7 +153,10 @@ func (r *ResourceRequirements) DetailedString() string {
messageBuilder.WriteString(r.String())

for rName, rQuant := range r.scalarResources {
messageBuilder.WriteString(fmt.Sprintf(", %s: %v", rName, rQuant))
if rName == v1.ResourceEphemeralStorage || rName == v1.ResourceStorage {
rQuant = rQuant / int64(MemoryToGB) // convert from milli-bytes to GB
}
messageBuilder.WriteString(fmt.Sprintf(", %s: %v (GB)", rName, rQuant))
}
for migName, migQuant := range r.MigResources() {
messageBuilder.WriteString(fmt.Sprintf(", mig %s: %d", migName, migQuant))
Expand Down
9 changes: 8 additions & 1 deletion pkg/scheduler/k8s_internal/predicates/maxNodeResources.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,15 @@ func (mnr *MaxNodeResourcesPredicate) PreFilter(_ context.Context, _ ksf.CycleSt
for rName, rQuant := range podInfo.ResReq.ScalarResources() {
rrQuant, found := mnr.maxResources.ScalarResources()[rName]
if !found || rQuant > rrQuant {
units := ""
maxVal := float64(0)
// Humanize ephemeral / storage values: rrQuant is milli-bytes, convert to GB
if rName == v1.ResourceEphemeralStorage || rName == v1.ResourceStorage {
units = "GB"
maxVal = float64(rrQuant) / resource_info.MemoryToGB
}
return nil, ksf.NewStatus(ksf.Unschedulable,
mnr.buildUnschedulableMessage(podInfo, string(rName), float64(rrQuant), ""))
mnr.buildUnschedulableMessage(podInfo, string(rName), float64(maxVal), units))
}
}

Expand Down
48 changes: 48 additions & 0 deletions pkg/scheduler/k8s_internal/predicates/maxNodeResources_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,54 @@ func Test_podToMaxNodeResourcesFiltering(t *testing.T) {
"The pod n1/name1 requires GPU: 0.5, CPU: 0 (cores), memory: 0 (GB). No node in the default node-pool has GPU resources"),
},
},
{
"not enough ephemeral storage",
args{
nodesMap: map[string]*node_info.NodeInfo{
"n1": {
Allocatable: resource_info.ResourceFromResourceList(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
resource_info.GPUResourceName: resource.MustParse("1"),
"kai.scheduler/r1": resource.MustParse("2"),
v1.ResourceEphemeralStorage: resource.MustParse("10Gi"),
}),
},
"n2": {
Allocatable: resource_info.ResourceFromResourceList(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("500m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
resource_info.GPUResourceName: resource.MustParse("1"),
"kai.scheduler/r1": resource.MustParse("2"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
}),
},
},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "name1",
Namespace: "n1",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Resources: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceEphemeralStorage: resource.MustParse("25G"),
},
},
},
},
},
},
},
expected{
ksf.NewStatus(ksf.Unschedulable,
"The pod n1/name1 requires GPU: 0, CPU: 0 (cores), memory: 0 (GB), ephemeral-storage: 25 (GB). "+
"Max ephemeral-storage resources available in a single node in the default node-pool is topped at 21.474 GB"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand Down