| /* |
| Copyright 2016 The Kubernetes Authors. |
| |
| Licensed under the Apache License, Version 2.0 (the "License"); |
| you may not use this file except in compliance with the License. |
| You may obtain a copy of the License at |
| |
| http://www.apache.org/licenses/LICENSE-2.0 |
| |
| Unless required by applicable law or agreed to in writing, software |
| distributed under the License is distributed on an "AS IS" BASIS, |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| See the License for the specific language governing permissions and |
| limitations under the License. |
| */ |
| |
| package priorities |
| |
| import ( |
| "reflect" |
| "testing" |
| |
| "k8s.io/api/core/v1" |
| "k8s.io/apimachinery/pkg/api/resource" |
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" |
| schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" |
| ) |
| |
| func TestMostRequested(t *testing.T) { |
| labels1 := map[string]string{ |
| "foo": "bar", |
| "baz": "blah", |
| } |
| labels2 := map[string]string{ |
| "bar": "foo", |
| "baz": "blah", |
| } |
| noResources := v1.PodSpec{ |
| Containers: []v1.Container{}, |
| } |
| cpuOnly := v1.PodSpec{ |
| NodeName: "machine1", |
| Containers: []v1.Container{ |
| { |
| Resources: v1.ResourceRequirements{ |
| Requests: v1.ResourceList{ |
| v1.ResourceCPU: resource.MustParse("1000m"), |
| v1.ResourceMemory: resource.MustParse("0"), |
| }, |
| }, |
| }, |
| { |
| Resources: v1.ResourceRequirements{ |
| Requests: v1.ResourceList{ |
| v1.ResourceCPU: resource.MustParse("2000m"), |
| v1.ResourceMemory: resource.MustParse("0"), |
| }, |
| }, |
| }, |
| }, |
| } |
| cpuOnly2 := cpuOnly |
| cpuOnly2.NodeName = "machine2" |
| cpuAndMemory := v1.PodSpec{ |
| NodeName: "machine2", |
| Containers: []v1.Container{ |
| { |
| Resources: v1.ResourceRequirements{ |
| Requests: v1.ResourceList{ |
| v1.ResourceCPU: resource.MustParse("1000m"), |
| v1.ResourceMemory: resource.MustParse("2000"), |
| }, |
| }, |
| }, |
| { |
| Resources: v1.ResourceRequirements{ |
| Requests: v1.ResourceList{ |
| v1.ResourceCPU: resource.MustParse("2000m"), |
| v1.ResourceMemory: resource.MustParse("3000"), |
| }, |
| }, |
| }, |
| }, |
| } |
| bigCPUAndMemory := v1.PodSpec{ |
| NodeName: "machine1", |
| Containers: []v1.Container{ |
| { |
| Resources: v1.ResourceRequirements{ |
| Requests: v1.ResourceList{ |
| v1.ResourceCPU: resource.MustParse("2000m"), |
| v1.ResourceMemory: resource.MustParse("4000"), |
| }, |
| }, |
| }, |
| { |
| Resources: v1.ResourceRequirements{ |
| Requests: v1.ResourceList{ |
| v1.ResourceCPU: resource.MustParse("3000m"), |
| v1.ResourceMemory: resource.MustParse("5000"), |
| }, |
| }, |
| }, |
| }, |
| } |
| tests := []struct { |
| pod *v1.Pod |
| pods []*v1.Pod |
| nodes []*v1.Node |
| expectedList schedulerapi.HostPriorityList |
| name string |
| }{ |
| { |
| /* |
| Node1 scores (used resources) on 0-10 scale |
| CPU Score: (0 * 10 / 4000 = 0 |
| Memory Score: (0 * 10) / 10000 = 0 |
| Node1 Score: (0 + 0) / 2 = 0 |
| |
| Node2 scores (used resources) on 0-10 scale |
| CPU Score: (0 * 10 / 4000 = 0 |
| Memory Score: (0 * 10 / 10000 = 0 |
| Node2 Score: (0 + 0) / 2 = 0 |
| */ |
| pod: &v1.Pod{Spec: noResources}, |
| nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, |
| expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, |
| name: "nothing scheduled, nothing requested", |
| }, |
| { |
| /* |
| Node1 scores on 0-10 scale |
| CPU Score: (3000 * 10 / 4000 = 7.5 |
| Memory Score: (5000 * 10) / 10000 = 5 |
| Node1 Score: (7.5 + 5) / 2 = 6 |
| |
| Node2 scores on 0-10 scale |
| CPU Score: (3000 * 10 / 6000 = 5 |
| Memory Score: (5000 * 10 / 10000 = 5 |
| Node2 Score: (5 + 5) / 2 = 5 |
| */ |
| pod: &v1.Pod{Spec: cpuAndMemory}, |
| nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, |
| expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}}, |
| name: "nothing scheduled, resources requested, differently sized machines", |
| }, |
| { |
| /* |
| Node1 scores on 0-10 scale |
| CPU Score: (6000 * 10) / 10000 = 6 |
| Memory Score: (0 * 10) / 20000 = 10 |
| Node1 Score: (6 + 0) / 2 = 3 |
| |
| Node2 scores on 0-10 scale |
| CPU Score: (6000 * 10) / 10000 = 6 |
| Memory Score: (5000 * 10) / 20000 = 2.5 |
| Node2 Score: (6 + 2.5) / 2 = 4 |
| */ |
| pod: &v1.Pod{Spec: noResources}, |
| nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, |
| expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}}, |
| name: "no resources requested, pods scheduled with resources", |
| pods: []*v1.Pod{ |
| {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, |
| {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, |
| {Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, |
| {Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, |
| }, |
| }, |
| { |
| /* |
| Node1 scores on 0-10 scale |
| CPU Score: (6000 * 10) / 10000 = 6 |
| Memory Score: (5000 * 10) / 20000 = 2.5 |
| Node1 Score: (6 + 2.5) / 2 = 4 |
| |
| Node2 scores on 0-10 scale |
| CPU Score: (6000 * 10) / 10000 = 6 |
| Memory Score: (10000 * 10) / 20000 = 5 |
| Node2 Score: (6 + 5) / 2 = 5 |
| */ |
| pod: &v1.Pod{Spec: cpuAndMemory}, |
| nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, |
| expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}}, |
| name: "resources requested, pods scheduled with resources", |
| pods: []*v1.Pod{ |
| {Spec: cpuOnly}, |
| {Spec: cpuAndMemory}, |
| }, |
| }, |
| { |
| /* |
| Node1 scores on 0-10 scale |
| CPU Score: 5000 > 4000 return 0 |
| Memory Score: (9000 * 10) / 10000 = 9 |
| Node1 Score: (0 + 9) / 2 = 4 |
| |
| Node2 scores on 0-10 scale |
| CPU Score: (5000 * 10) / 10000 = 5 |
| Memory Score: 9000 > 8000 return 0 |
| Node2 Score: (5 + 0) / 2 = 2 |
| */ |
| pod: &v1.Pod{Spec: bigCPUAndMemory}, |
| nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)}, |
| expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}}, |
| name: "resources requested with more than the node, pods scheduled with resources", |
| }, |
| } |
| |
| for _, test := range tests { |
| t.Run(test.name, func(t *testing.T) { |
| nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) |
| list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) |
| if err != nil { |
| t.Errorf("unexpected error: %v", err) |
| } |
| if !reflect.DeepEqual(test.expectedList, list) { |
| t.Errorf("expected %#v, got %#v", test.expectedList, list) |
| } |
| }) |
| } |
| } |