Skip to content

Commit

Permalink
k8s 122 upgrade (volcano-sh#27)
Browse files Browse the repository at this point in the history
cherry picked from commits

127309d
fd2e496
  • Loading branch information
yolgun committed Sep 14, 2022
1 parent 817d830 commit dc69aa1
Show file tree
Hide file tree
Showing 7 changed files with 320 additions and 97 deletions.
6 changes: 3 additions & 3 deletions pkg/controllers/job/job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ import (
"k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
kubeschedulinginformers "k8s.io/client-go/informers/scheduling/v1beta1"
kubeschedulinginformers "k8s.io/client-go/informers/scheduling/v1"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
kubeschedulinglisters "k8s.io/client-go/listers/scheduling/v1beta1"
kubeschedulinglisters "k8s.io/client-go/listers/scheduling/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
Expand Down Expand Up @@ -200,7 +200,7 @@ func (cc *jobcontroller) Initialize(opt *framework.ControllerOption) error {
cc.pgLister = cc.pgInformer.Lister()
cc.pgSynced = cc.pgInformer.Informer().HasSynced

cc.pcInformer = sharedInformers.Scheduling().V1beta1().PriorityClasses()
cc.pcInformer = sharedInformers.Scheduling().V1().PriorityClasses()
cc.pcLister = cc.pcInformer.Lister()
cc.pcSynced = cc.pcInformer.Informer().HasSynced

Expand Down
73 changes: 47 additions & 26 deletions pkg/scheduler/actions/preempt/preempt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ import (
"time"

v1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"

schedulingv1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
schedulingv1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
"volcano.sh/volcano/cmd/scheduler/app/options"
"volcano.sh/volcano/pkg/scheduler/api"
"volcano.sh/volcano/pkg/scheduler/cache"
Expand All @@ -48,24 +48,27 @@ func TestPreempt(t *testing.T) {

tests := []struct {
name string
podGroups []*schedulingv1.PodGroup
podGroups []*schedulingv1beta1.PodGroup
pods []*v1.Pod
nodes []*v1.Node
queues []*schedulingv1.Queue
queues []*schedulingv1beta1.Queue
expected int
}{
{
name: "do not preempt if there are enough idle resources",
podGroups: []*schedulingv1.PodGroup{
podGroups: []*schedulingv1beta1.PodGroup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg1",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
MinMember: 3,
Queue: "q1",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
},
pods: []*v1.Pod{
Expand All @@ -77,12 +80,12 @@ func TestPreempt(t *testing.T) {
nodes: []*v1.Node{
util.BuildNode("n1", util.BuildResourceList("10", "10G"), make(map[string]string)),
},
queues: []*schedulingv1.Queue{
queues: []*schedulingv1beta1.Queue{
{
ObjectMeta: metav1.ObjectMeta{
Name: "q1",
},
Spec: schedulingv1.QueueSpec{
Spec: schedulingv1beta1.QueueSpec{
Weight: 1,
},
},
Expand All @@ -91,26 +94,32 @@ func TestPreempt(t *testing.T) {
},
{
name: "do not preempt if job is pipelined",
podGroups: []*schedulingv1.PodGroup{
podGroups: []*schedulingv1beta1.PodGroup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg1",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
MinMember: 1,
Queue: "q1",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg2",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
MinMember: 1,
Queue: "q1",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
},
// Both pg1 and pg2 jobs are pipelined, because enough pods are already running.
Expand All @@ -124,12 +133,12 @@ func TestPreempt(t *testing.T) {
nodes: []*v1.Node{
util.BuildNode("n1", util.BuildResourceList("3", "3G"), make(map[string]string)),
},
queues: []*schedulingv1.Queue{
queues: []*schedulingv1beta1.Queue{
{
ObjectMeta: metav1.ObjectMeta{
Name: "q1",
},
Spec: schedulingv1.QueueSpec{
Spec: schedulingv1beta1.QueueSpec{
Weight: 1,
},
},
Expand All @@ -138,28 +147,34 @@ func TestPreempt(t *testing.T) {
},
{
name: "preempt one task of different job to fit both jobs on one node",
podGroups: []*schedulingv1.PodGroup{
podGroups: []*schedulingv1beta1.PodGroup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg1",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
MinMember: 1,
Queue: "q1",
PriorityClassName: "low-priority",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg2",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
MinMember: 1,
Queue: "q1",
PriorityClassName: "high-priority",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
},

Expand All @@ -172,12 +187,12 @@ func TestPreempt(t *testing.T) {
nodes: []*v1.Node{
util.BuildNode("n1", util.BuildResourceList("2", "2G"), make(map[string]string)),
},
queues: []*schedulingv1.Queue{
queues: []*schedulingv1beta1.Queue{
{
ObjectMeta: metav1.ObjectMeta{
Name: "q1",
},
Spec: schedulingv1.QueueSpec{
Spec: schedulingv1beta1.QueueSpec{
Weight: 1,
},
},
Expand All @@ -186,28 +201,34 @@ func TestPreempt(t *testing.T) {
},
{
name: "preempt enough tasks to fit large task of different job",
podGroups: []*schedulingv1.PodGroup{
podGroups: []*schedulingv1beta1.PodGroup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg1",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
MinMember: 1,
Queue: "q1",
PriorityClassName: "low-priority",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg2",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
MinMember: 1,
Queue: "q1",
PriorityClassName: "high-priority",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
},
// There are 3 cpus and 3G of memory idle and 3 tasks running each consuming 1 cpu and 1G of memory.
Expand All @@ -221,12 +242,12 @@ func TestPreempt(t *testing.T) {
nodes: []*v1.Node{
util.BuildNode("n1", util.BuildResourceList("6", "6G"), make(map[string]string)),
},
queues: []*schedulingv1.Queue{
queues: []*schedulingv1beta1.Queue{
{
ObjectMeta: metav1.ObjectMeta{
Name: "q1",
},
Spec: schedulingv1.QueueSpec{
Spec: schedulingv1beta1.QueueSpec{
Weight: 1,
},
},
Expand Down Expand Up @@ -254,14 +275,14 @@ func TestPreempt(t *testing.T) {
Evictor: evictor,
StatusUpdater: &util.FakeStatusUpdater{},
VolumeBinder: &util.FakeVolumeBinder{},
PriorityClasses: make(map[string]*v1beta1.PriorityClass),
PriorityClasses: make(map[string]*schedulingv1.PriorityClass),

Recorder: record.NewFakeRecorder(100),
}
schedulerCache.PriorityClasses["high-priority"] = &v1beta1.PriorityClass{
schedulerCache.PriorityClasses["high-priority"] = &schedulingv1.PriorityClass{
Value: 100000,
}
schedulerCache.PriorityClasses["low-priority"] = &v1beta1.PriorityClass{
schedulerCache.PriorityClasses["low-priority"] = &schedulingv1.PriorityClass{
Value: 10,
}
for _, node := range test.nodes {
Expand Down
32 changes: 19 additions & 13 deletions pkg/scheduler/actions/reclaim/reclaim_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ import (
"time"

v1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"

schedulingv1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
schedulingv1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
"volcano.sh/volcano/pkg/scheduler/api"
"volcano.sh/volcano/pkg/scheduler/cache"
"volcano.sh/volcano/pkg/scheduler/conf"
Expand All @@ -42,34 +42,40 @@ func TestReclaim(t *testing.T) {

tests := []struct {
name string
podGroups []*schedulingv1.PodGroup
podGroups []*schedulingv1beta1.PodGroup
pods []*v1.Pod
nodes []*v1.Node
queues []*schedulingv1.Queue
queues []*schedulingv1beta1.Queue
expected int
}{
{
name: "Two Queue with one Queue overusing resource, should reclaim",
podGroups: []*schedulingv1.PodGroup{
podGroups: []*schedulingv1beta1.PodGroup{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg1",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
Queue: "q1",
PriorityClassName: "low-priority",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pg2",
Namespace: "c1",
},
Spec: schedulingv1.PodGroupSpec{
Spec: schedulingv1beta1.PodGroupSpec{
Queue: "q2",
PriorityClassName: "high-priority",
},
Status: schedulingv1beta1.PodGroupStatus{
Phase: schedulingv1beta1.PodGroupInqueue,
},
},
},
pods: []*v1.Pod{
Expand All @@ -81,20 +87,20 @@ func TestReclaim(t *testing.T) {
nodes: []*v1.Node{
util.BuildNode("n1", util.BuildResourceList("3", "3Gi"), make(map[string]string)),
},
queues: []*schedulingv1.Queue{
queues: []*schedulingv1beta1.Queue{
{
ObjectMeta: metav1.ObjectMeta{
Name: "q1",
},
Spec: schedulingv1.QueueSpec{
Spec: schedulingv1beta1.QueueSpec{
Weight: 1,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "q2",
},
Spec: schedulingv1.QueueSpec{
Spec: schedulingv1beta1.QueueSpec{
Weight: 1,
},
},
Expand All @@ -121,14 +127,14 @@ func TestReclaim(t *testing.T) {
Evictor: evictor,
StatusUpdater: &util.FakeStatusUpdater{},
VolumeBinder: &util.FakeVolumeBinder{},
PriorityClasses: make(map[string]*v1beta1.PriorityClass),
PriorityClasses: make(map[string]*schedulingv1.PriorityClass),

Recorder: record.NewFakeRecorder(100),
}
schedulerCache.PriorityClasses["high-priority"] = &v1beta1.PriorityClass{
schedulerCache.PriorityClasses["high-priority"] = &schedulingv1.PriorityClass{
Value: 100000,
}
schedulerCache.PriorityClasses["low-priority"] = &v1beta1.PriorityClass{
schedulerCache.PriorityClasses["low-priority"] = &schedulingv1.PriorityClass{
Value: 10,
}
for _, node := range test.nodes {
Expand Down
Loading

0 comments on commit dc69aa1

Please sign in to comment.