Skip to content

Commit 2d6ad79

Browse files
authored
koord-scheduler: support Reservation reserved CPU Cores (#1140)
Signed-off-by: Joseph <joseph.t.lee@outlook.com>
1 parent 832ecd0 commit 2d6ad79

File tree

10 files changed

+530
-87
lines changed

10 files changed

+530
-87
lines changed

apis/extension/resource.go

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"encoding/json"
2121

2222
corev1 "k8s.io/api/core/v1"
23+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2324
)
2425

2526
const (
@@ -155,18 +156,20 @@ func GetResourceStatus(annotations map[string]string) (*ResourceStatus, error) {
155156
return resourceStatus, nil
156157
}
157158

158-
func SetResourceStatus(pod *corev1.Pod, status *ResourceStatus) error {
159-
if pod == nil {
159+
func SetResourceStatus(obj metav1.Object, status *ResourceStatus) error {
160+
if obj == nil {
160161
return nil
161162
}
162-
if pod.Annotations == nil {
163-
pod.Annotations = map[string]string{}
163+
annotations := obj.GetAnnotations()
164+
if annotations == nil {
165+
annotations = map[string]string{}
164166
}
165167
data, err := json.Marshal(status)
166168
if err != nil {
167169
return err
168170
}
169-
pod.Annotations[AnnotationResourceStatus] = string(data)
171+
annotations[AnnotationResourceStatus] = string(data)
172+
obj.SetAnnotations(annotations)
170173
return nil
171174
}
172175

pkg/scheduler/plugins/nodenumaresource/cpu_accumulator.go

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,29 @@ import (
2626
"github.com/koordinator-sh/koordinator/pkg/util/cpuset"
2727
)
2828

29+
func takePreferredCPUs(
30+
topology *CPUTopology,
31+
maxRefCount int,
32+
availableCPUs cpuset.CPUSet,
33+
preferredCPUs cpuset.CPUSet,
34+
allocatedCPUs CPUDetails,
35+
numCPUsNeeded int,
36+
cpuBindPolicy schedulingconfig.CPUBindPolicy,
37+
cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy,
38+
numaAllocatedStrategy schedulingconfig.NUMAAllocateStrategy,
39+
) (cpuset.CPUSet, error) {
40+
preferredCPUs = availableCPUs.Intersection(preferredCPUs)
41+
size := preferredCPUs.Size()
42+
if size == 0 {
43+
return cpuset.CPUSet{}, nil
44+
}
45+
if numCPUsNeeded > size {
46+
numCPUsNeeded = size
47+
}
48+
49+
return takeCPUs(topology, maxRefCount, preferredCPUs, allocatedCPUs, numCPUsNeeded, cpuBindPolicy, cpuExclusivePolicy, numaAllocatedStrategy)
50+
}
51+
2952
func takeCPUs(
3053
topology *CPUTopology,
3154
maxRefCount int,

pkg/scheduler/plugins/nodenumaresource/cpu_accumulator_test.go

Lines changed: 25 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,7 @@ func TestTakeCPUsWithMaxRefCount(t *testing.T) {
569569

570570
// first pod request 4 CPUs
571571
podUID := uuid.NewUUID()
572-
availableCPUs, allocatedCPUsDetails := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
572+
availableCPUs, allocatedCPUsDetails := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
573573
result, err := takeCPUs(
574574
cpuTopology, 2, availableCPUs, allocatedCPUsDetails,
575575
4, schedulingconfig.CPUBindPolicyFullPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
@@ -579,7 +579,7 @@ func TestTakeCPUsWithMaxRefCount(t *testing.T) {
579579

580580
// second pod request 5 CPUs
581581
podUID = uuid.NewUUID()
582-
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
582+
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
583583
result, err = takeCPUs(
584584
cpuTopology, 2, availableCPUs, allocatedCPUsDetails,
585585
5, schedulingconfig.CPUBindPolicyFullPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
@@ -589,7 +589,7 @@ func TestTakeCPUsWithMaxRefCount(t *testing.T) {
589589

590590
// third pod request 4 cpus
591591
podUID = uuid.NewUUID()
592-
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
592+
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
593593
result, err = takeCPUs(
594594
cpuTopology, 2, availableCPUs, allocatedCPUsDetails,
595595
4, schedulingconfig.CPUBindPolicyFullPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
@@ -610,7 +610,7 @@ func TestTakeCPUsSortByRefCount(t *testing.T) {
610610

611611
// first pod request 16 CPUs
612612
podUID := uuid.NewUUID()
613-
availableCPUs, allocatedCPUsDetails := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
613+
availableCPUs, allocatedCPUsDetails := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
614614
result, err := takeCPUs(
615615
cpuTopology, 2, availableCPUs, allocatedCPUsDetails,
616616
16, schedulingconfig.CPUBindPolicySpreadByPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
@@ -620,7 +620,7 @@ func TestTakeCPUsSortByRefCount(t *testing.T) {
620620

621621
// second pod request 16 CPUs
622622
podUID = uuid.NewUUID()
623-
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
623+
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
624624
result, err = takeCPUs(
625625
cpuTopology, 2, availableCPUs, allocatedCPUsDetails,
626626
16, schedulingconfig.CPUBindPolicyFullPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
@@ -630,7 +630,7 @@ func TestTakeCPUsSortByRefCount(t *testing.T) {
630630

631631
// third pod request 16 cpus
632632
podUID = uuid.NewUUID()
633-
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
633+
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
634634
result, err = takeCPUs(
635635
cpuTopology, 2, availableCPUs, allocatedCPUsDetails,
636636
16, schedulingconfig.CPUBindPolicySpreadByPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
@@ -640,15 +640,15 @@ func TestTakeCPUsSortByRefCount(t *testing.T) {
640640

641641
// forth pod request 16 cpus
642642
podUID = uuid.NewUUID()
643-
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
643+
availableCPUs, allocatedCPUsDetails = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
644644
result, err = takeCPUs(
645645
cpuTopology, 2, availableCPUs, allocatedCPUsDetails,
646646
16, schedulingconfig.CPUBindPolicyFullPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
647647
assert.True(t, result.Equals(cpuset.MustParse("16-31")))
648648
assert.NoError(t, err)
649649
allocationState.addCPUs(cpuTopology, podUID, result, schedulingconfig.CPUExclusivePolicyPCPULevel)
650650

651-
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
651+
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
652652
assert.Equal(t, cpuset.MustParse(""), availableCPUs)
653653
}
654654

@@ -754,3 +754,20 @@ func BenchmarkTakeCPUsWithSpread(b *testing.B) {
754754
})
755755
}
756756
}
757+
758+
func TestTakePreferredCPUs(t *testing.T) {
759+
topology := buildCPUTopologyForTest(2, 1, 16, 2)
760+
cpus := topology.CPUDetails.CPUs()
761+
result, err := takeCPUs(topology, 1, cpus, nil, 2, schedulingconfig.CPUBindPolicySpreadByPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
762+
assert.NoError(t, err)
763+
assert.Equal(t, []int{0, 2}, result.ToSlice())
764+
765+
result, err = takePreferredCPUs(topology, 1, cpus, cpuset.NewCPUSet(), nil, 2, schedulingconfig.CPUBindPolicySpreadByPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
766+
assert.NoError(t, err)
767+
assert.Empty(t, result.ToSlice())
768+
769+
preferredCPUs := cpuset.NewCPUSet(11, 13, 15, 17)
770+
result, err = takePreferredCPUs(topology, 1, cpus, preferredCPUs, nil, 2, schedulingconfig.CPUBindPolicySpreadByPCPUs, schedulingconfig.CPUExclusivePolicyNone, schedulingconfig.NUMAMostAllocated)
771+
assert.NoError(t, err)
772+
assert.Equal(t, []int{11, 13}, result.ToSlice())
773+
}

pkg/scheduler/plugins/nodenumaresource/cpu_allocation.go

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,11 @@ func (n *cpuAllocation) updateAllocatedCPUSet(cpuTopology *CPUTopology, podUID t
4545
n.addCPUs(cpuTopology, podUID, cpuset, cpuExclusivePolicy)
4646
}
4747

48+
func (n *cpuAllocation) getCPUs(podUID types.UID) (cpuset.CPUSet, bool) {
49+
cpuset, ok := n.allocatedPods[podUID]
50+
return cpuset, ok
51+
}
52+
4853
func (n *cpuAllocation) addCPUs(cpuTopology *CPUTopology, podUID types.UID, cpuset cpuset.CPUSet, exclusivePolicy schedulingconfig.CPUExclusivePolicy) {
4954
if _, ok := n.allocatedPods[podUID]; ok {
5055
return
@@ -83,8 +88,21 @@ func (n *cpuAllocation) releaseCPUs(podUID types.UID) {
8388
}
8489
}
8590

86-
func (n *cpuAllocation) getAvailableCPUs(cpuTopology *CPUTopology, maxRefCount int, reservedCPUs cpuset.CPUSet) (availableCPUs cpuset.CPUSet, allocateInfo CPUDetails) {
91+
func (n *cpuAllocation) getAvailableCPUs(cpuTopology *CPUTopology, maxRefCount int, reservedCPUs, preferredCPUs cpuset.CPUSet) (availableCPUs cpuset.CPUSet, allocateInfo CPUDetails) {
8792
allocateInfo = n.allocatedCPUs.Clone()
93+
if !preferredCPUs.IsEmpty() {
94+
for _, cpuID := range preferredCPUs.ToSliceNoSort() {
95+
cpuInfo, ok := allocateInfo[cpuID]
96+
if ok {
97+
cpuInfo.RefCount--
98+
if cpuInfo.RefCount == 0 {
99+
delete(allocateInfo, cpuID)
100+
} else {
101+
allocateInfo[cpuID] = cpuInfo
102+
}
103+
}
104+
}
105+
}
88106
allocated := allocateInfo.CPUs().Filter(func(cpuID int) bool {
89107
return allocateInfo[cpuID].RefCount >= maxRefCount
90108
})

pkg/scheduler/plugins/nodenumaresource/cpu_allocation_test.go

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ func TestNodeAllocationStateAddCPUs(t *testing.T) {
5454
assert.Equal(t, expectAllocatedPods, allocationState.allocatedPods)
5555
assert.Equal(t, expectAllocatedCPUs, allocationState.allocatedCPUs)
5656

57-
availableCPUs, _ := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
57+
availableCPUs, _ := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
5858
expectAvailableCPUs := cpuset.MustParse("0-15")
5959
assert.Equal(t, expectAvailableCPUs, availableCPUs)
6060

@@ -63,7 +63,7 @@ func TestNodeAllocationStateAddCPUs(t *testing.T) {
6363
assert.Equal(t, expectAllocatedPods, allocationState.allocatedPods)
6464
assert.Equal(t, expectAllocatedCPUs, allocationState.allocatedCPUs)
6565

66-
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
66+
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
6767
cpuset.MustParse("0-15")
6868
assert.Equal(t, expectAvailableCPUs, availableCPUs)
6969

@@ -120,19 +120,39 @@ func Test_cpuAllocation_getAvailableCPUs(t *testing.T) {
120120
podUID := uuid.NewUUID()
121121
allocationState.addCPUs(cpuTopology, podUID, cpuset.MustParse("1-4"), schedulingconfig.CPUExclusivePolicyPCPULevel)
122122

123-
availableCPUs, _ := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
123+
availableCPUs, _ := allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
124124
expectAvailableCPUs := cpuset.MustParse("0-15")
125125
assert.Equal(t, expectAvailableCPUs, availableCPUs)
126126

127127
// test with add already allocated cpu(refCount > 1 but less than maxRefCount) and another pod
128128
anotherPodUID := uuid.NewUUID()
129129
allocationState.addCPUs(cpuTopology, anotherPodUID, cpuset.MustParse("2-5"), schedulingconfig.CPUExclusivePolicyPCPULevel)
130-
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet())
130+
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 2, cpuset.NewCPUSet(), cpuset.NewCPUSet())
131131
expectAvailableCPUs = cpuset.MustParse("0-1,5-15")
132132
assert.Equal(t, expectAvailableCPUs, availableCPUs)
133133

134134
allocationState.releaseCPUs(podUID)
135-
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 1, cpuset.NewCPUSet())
135+
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 1, cpuset.NewCPUSet(), cpuset.NewCPUSet())
136136
expectAvailableCPUs = cpuset.MustParse("0-1,6-15")
137137
assert.Equal(t, expectAvailableCPUs, availableCPUs)
138138
}
139+
140+
func Test_cpuAllocation_getAvailableCPUs_with_preferred_cpus(t *testing.T) {
141+
cpuTopology := buildCPUTopologyForTest(2, 1, 4, 2)
142+
for _, v := range cpuTopology.CPUDetails {
143+
v.CoreID = v.SocketID<<16 | v.CoreID
144+
cpuTopology.CPUDetails[v.CPUID] = v
145+
}
146+
147+
allocationState := newCPUAllocation("test-node-1")
148+
assert.NotNil(t, allocationState)
149+
podUID := uuid.NewUUID()
150+
allocationState.addCPUs(cpuTopology, podUID, cpuset.MustParse("0-4"), schedulingconfig.CPUExclusivePolicyPCPULevel)
151+
availableCPUs, _ := allocationState.getAvailableCPUs(cpuTopology, 1, cpuset.NewCPUSet(), cpuset.NewCPUSet())
152+
expectAvailableCPUs := cpuset.MustParse("5-15")
153+
assert.Equal(t, expectAvailableCPUs, availableCPUs)
154+
155+
availableCPUs, _ = allocationState.getAvailableCPUs(cpuTopology, 1, cpuset.NewCPUSet(), cpuset.NewCPUSet(1, 2))
156+
expectAvailableCPUs = cpuset.MustParse("1-2,5-15")
157+
assert.Equal(t, expectAvailableCPUs, availableCPUs)
158+
}

pkg/scheduler/plugins/nodenumaresource/cpu_manager.go

Lines changed: 63 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -37,17 +37,23 @@ type CPUManager interface {
3737
node *corev1.Node,
3838
numCPUsNeeded int,
3939
cpuBindPolicy schedulingconfig.CPUBindPolicy,
40-
cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy) (cpuset.CPUSet, error)
40+
cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy,
41+
preferredCPUs cpuset.CPUSet,
42+
) (cpuset.CPUSet, error)
4143

4244
UpdateAllocatedCPUSet(nodeName string, podUID types.UID, cpuset cpuset.CPUSet, cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy)
4345

46+
GetAllocatedCPUSet(nodeName string, podUID types.UID) (cpuset.CPUSet, bool)
47+
4448
Free(nodeName string, podUID types.UID)
4549

4650
Score(
4751
node *corev1.Node,
4852
numCPUsNeeded int,
4953
cpuBindPolicy schedulingconfig.CPUBindPolicy,
50-
cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy) int64
54+
cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy,
55+
preferredCPUs cpuset.CPUSet,
56+
) int64
5157

5258
GetAvailableCPUs(nodeName string) (availableCPUs cpuset.CPUSet, allocated CPUDetails, err error)
5359
}
@@ -112,6 +118,7 @@ func (c *cpuManagerImpl) Allocate(
112118
numCPUsNeeded int,
113119
cpuBindPolicy schedulingconfig.CPUBindPolicy,
114120
cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy,
121+
preferredCPUs cpuset.CPUSet,
115122
) (cpuset.CPUSet, error) {
116123
result := cpuset.CPUSet{}
117124
// The Pod requires the CPU to be allocated according to CPUBindPolicy,
@@ -131,19 +138,47 @@ func (c *cpuManagerImpl) Allocate(
131138
allocation.lock.Lock()
132139
defer allocation.lock.Unlock()
133140

134-
availableCPUs, allocated := allocation.getAvailableCPUs(cpuTopologyOptions.CPUTopology, cpuTopologyOptions.MaxRefCount, reservedCPUs)
141+
availableCPUs, allocated := allocation.getAvailableCPUs(cpuTopologyOptions.CPUTopology, cpuTopologyOptions.MaxRefCount, reservedCPUs, preferredCPUs)
135142
numaAllocateStrategy := c.getNUMAAllocateStrategy(node)
136-
result, err := takeCPUs(
137-
cpuTopologyOptions.CPUTopology,
138-
cpuTopologyOptions.MaxRefCount,
139-
availableCPUs,
140-
allocated,
141-
numCPUsNeeded,
142-
cpuBindPolicy,
143-
cpuExclusivePolicy,
144-
numaAllocateStrategy,
145-
)
146-
return result, err
143+
if !preferredCPUs.IsEmpty() {
144+
var err error
145+
result, err = takePreferredCPUs(
146+
cpuTopologyOptions.CPUTopology,
147+
cpuTopologyOptions.MaxRefCount,
148+
availableCPUs,
149+
preferredCPUs,
150+
allocated,
151+
numCPUsNeeded,
152+
cpuBindPolicy,
153+
cpuExclusivePolicy,
154+
numaAllocateStrategy)
155+
if err != nil {
156+
return result, err
157+
}
158+
numCPUsNeeded -= result.Size()
159+
availableCPUs = availableCPUs.Difference(preferredCPUs)
160+
}
161+
if numCPUsNeeded > 0 {
162+
cpus, err := takeCPUs(
163+
cpuTopologyOptions.CPUTopology,
164+
cpuTopologyOptions.MaxRefCount,
165+
availableCPUs,
166+
allocated,
167+
numCPUsNeeded,
168+
cpuBindPolicy,
169+
cpuExclusivePolicy,
170+
numaAllocateStrategy,
171+
)
172+
if err != nil {
173+
return cpuset.CPUSet{}, err
174+
}
175+
if result.IsEmpty() {
176+
result = cpus
177+
} else {
178+
result = result.Union(cpus)
179+
}
180+
}
181+
return result, nil
147182
}
148183

149184
func (c *cpuManagerImpl) UpdateAllocatedCPUSet(nodeName string, podUID types.UID, cpuset cpuset.CPUSet, cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy) {
@@ -159,19 +194,22 @@ func (c *cpuManagerImpl) UpdateAllocatedCPUSet(nodeName string, podUID types.UID
159194
allocation.updateAllocatedCPUSet(cpuTopologyOptions.CPUTopology, podUID, cpuset, cpuExclusivePolicy)
160195
}
161196

197+
func (c *cpuManagerImpl) GetAllocatedCPUSet(nodeName string, podUID types.UID) (cpuset.CPUSet, bool) {
198+
allocation := c.getOrCreateAllocation(nodeName)
199+
allocation.lock.Lock()
200+
defer allocation.lock.Unlock()
201+
202+
return allocation.getCPUs(podUID)
203+
}
204+
162205
func (c *cpuManagerImpl) Free(nodeName string, podUID types.UID) {
163206
allocation := c.getOrCreateAllocation(nodeName)
164207
allocation.lock.Lock()
165208
defer allocation.lock.Unlock()
166209
allocation.releaseCPUs(podUID)
167210
}
168211

169-
func (c *cpuManagerImpl) Score(
170-
node *corev1.Node,
171-
numCPUsNeeded int,
172-
cpuBindPolicy schedulingconfig.CPUBindPolicy,
173-
cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy,
174-
) int64 {
212+
func (c *cpuManagerImpl) Score(node *corev1.Node, numCPUsNeeded int, cpuBindPolicy schedulingconfig.CPUBindPolicy, cpuExclusivePolicy schedulingconfig.CPUExclusivePolicy, preferredCPUs cpuset.CPUSet) int64 {
175213
cpuTopologyOptions := c.topologyManager.GetCPUTopologyOptions(node.Name)
176214
if cpuTopologyOptions.CPUTopology == nil || !cpuTopologyOptions.CPUTopology.IsValid() {
177215
return 0
@@ -184,8 +222,10 @@ func (c *cpuManagerImpl) Score(
184222
allocation.lock.Lock()
185223
defer allocation.lock.Unlock()
186224

225+
// TODO(joseph): should support score with preferredCPUs.
226+
187227
cpuTopology := cpuTopologyOptions.CPUTopology
188-
availableCPUs, allocated := allocation.getAvailableCPUs(cpuTopology, cpuTopologyOptions.MaxRefCount, reservedCPUs)
228+
availableCPUs, allocated := allocation.getAvailableCPUs(cpuTopology, cpuTopologyOptions.MaxRefCount, reservedCPUs, preferredCPUs)
189229
acc := newCPUAccumulator(
190230
cpuTopology,
191231
cpuTopologyOptions.MaxRefCount,
@@ -289,6 +329,7 @@ func (c *cpuManagerImpl) GetAvailableCPUs(nodeName string) (availableCPUs cpuset
289329
allocation := c.getOrCreateAllocation(nodeName)
290330
allocation.lock.Lock()
291331
defer allocation.lock.Unlock()
292-
availableCPUs, allocated = allocation.getAvailableCPUs(cpuTopologyOptions.CPUTopology, cpuTopologyOptions.MaxRefCount, cpuTopologyOptions.ReservedCPUs)
332+
emptyCPUs := cpuset.NewCPUSet()
333+
availableCPUs, allocated = allocation.getAvailableCPUs(cpuTopologyOptions.CPUTopology, cpuTopologyOptions.MaxRefCount, cpuTopologyOptions.ReservedCPUs, emptyCPUs)
293334
return availableCPUs, allocated, nil
294335
}

0 commit comments

Comments
 (0)