Skip to content
This repository was archived by the owner on Sep 2, 2024. It is now read-only.

Commit 6cdf2d0

Browse files
upgrade to latest dependencies (#1416)
bumping knative.dev/reconciler-test 03cc77c...785e0bd: > 785e0bd Improve error message when deleting resources (# 618) > 7d36fe9 Copy pull secrets to SA for eventshub (# 615) > e52650f upgrade to latest dependencies (# 606) bumping knative.dev/eventing 6a695cb...0dadfd9: > 0dadfd9 [release-1.11] Scheduler: fix reserved replicas handling, blocking autoscaler and overcommitted pods (# 7374) > c1626f1 [release-1.11] Update dependencies (# 7362) > 46cc775 [release-1.11] TLS certificate rotation tests (# 7103) (# 7346) bumping knative.dev/pkg bd99f2f...56bfe0d: > 56bfe0d [release-1.11] [CVE-2023-44487] Disable http2 for webhooks (# 2875) Signed-off-by: Knative Automation <automation@knative.team>
1 parent 0df291c commit 6cdf2d0

File tree

15 files changed

+175
-42
lines changed

15 files changed

+175
-42
lines changed

go.mod

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@ require (
2424
k8s.io/apimachinery v0.26.5
2525
k8s.io/client-go v0.26.5
2626
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2
27-
knative.dev/eventing v0.38.4
27+
knative.dev/eventing v0.38.5
2828
knative.dev/hack v0.0.0-20230712131415-ddae80293c43
29-
knative.dev/pkg v0.0.0-20231011193800-bd99f2f98be7
30-
knative.dev/reconciler-test v0.0.0-20231010075208-03cc77c11831
29+
knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626
30+
knative.dev/reconciler-test v0.0.0-20231023114057-785e0bd2d9a2
3131
sigs.k8s.io/yaml v1.3.0
3232
)
3333

go.sum

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -967,14 +967,14 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+O
967967
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
968968
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
969969
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
970-
knative.dev/eventing v0.38.4 h1:eH059bfeLilj2xAN6V7XXOh3wqzz5ssoMS/CIJpJfmk=
971-
knative.dev/eventing v0.38.4/go.mod h1:ct8t+v6nmp1kFCy6ngkDWIEvnjJDNDoKptrfnQVh+z8=
970+
knative.dev/eventing v0.38.5 h1:NvSy3lek9IbLLWEot36NyAfNv7VkJNl38F1ItVL0D6s=
971+
knative.dev/eventing v0.38.5/go.mod h1:g+iAS+KBRSKULEPqoVnseMkObDeq3SJhqefbuIu8zY8=
972972
knative.dev/hack v0.0.0-20230712131415-ddae80293c43 h1:3SE06uNfSFGm/5XS+0trbyCUpgsOaBeyhPQU8FPNFz8=
973973
knative.dev/hack v0.0.0-20230712131415-ddae80293c43/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
974-
knative.dev/pkg v0.0.0-20231011193800-bd99f2f98be7 h1:y3qbfYX1SuSr/1ysXvKfpV8q/kCwWLWieCUgAhBUHmQ=
975-
knative.dev/pkg v0.0.0-20231011193800-bd99f2f98be7/go.mod h1:g+UCgSKQ2f15kHYu/V3CPtoKo5F1x/2Y1ot0NSK7gA0=
976-
knative.dev/reconciler-test v0.0.0-20231010075208-03cc77c11831 h1:rOisVvTe0yuJNImgOex1Z4vdqXRPP1FAg5xPxbLOSlU=
977-
knative.dev/reconciler-test v0.0.0-20231010075208-03cc77c11831/go.mod h1:i+/PWK/n3HPgjXMoj5U7CA6WRW/C3c3EfHCQ0FmrhNM=
974+
knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626 h1:qFE+UDBRg6cpF5LbA0sv1XK4XZ36Z7aTRCa+HcuxnNQ=
975+
knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626/go.mod h1:g+UCgSKQ2f15kHYu/V3CPtoKo5F1x/2Y1ot0NSK7gA0=
976+
knative.dev/reconciler-test v0.0.0-20231023114057-785e0bd2d9a2 h1:Lenj/sGhPYZoCdl4bvoeZzA4Y1VS4LNEIWH1/HTU+6I=
977+
knative.dev/reconciler-test v0.0.0-20231023114057-785e0bd2d9a2/go.mod h1:HgugJUOhHZ3F6Tbhte92ecL0sBqJtCeJtd7K8jX+IJk=
978978
pgregory.net/rapid v0.3.3 h1:jCjBsY4ln4Atz78QoBWxUEvAHaFyNDQg9+WU62aCn1U=
979979
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
980980
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

vendor/knative.dev/eventing/pkg/scheduler/state/state.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -361,7 +361,7 @@ func (s *stateBuilder) updateFreeCapacity(free []int32, last int32, podName stri
361361
// Assert the pod is not overcommitted
362362
if free[ordinal] < 0 {
363363
// This should not happen anymore. Log as an error but do not interrupt the current scheduling.
364-
s.logger.Errorw("pod is overcommitted", zap.String("podName", podName), zap.Int32("free", free[ordinal]))
364+
s.logger.Warnw("pod is overcommitted", zap.String("podName", podName), zap.Int32("free", free[ordinal]))
365365
}
366366

367367
if ordinal > last {

vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ func (a *autoscaler) Demote(b reconciler.Bucket) {
106106

107107
func newAutoscaler(ctx context.Context, cfg *Config, stateAccessor st.StateAccessor) *autoscaler {
108108
return &autoscaler{
109-
logger: logging.FromContext(ctx),
109+
logger: logging.FromContext(ctx).With(zap.String("component", "autoscaler")),
110110
statefulSetClient: kubeclient.Get(ctx).AppsV1().StatefulSets(cfg.StatefulSetNamespace),
111111
statefulSetName: cfg.StatefulSetName,
112112
vpodLister: cfg.VPodLister,
@@ -133,8 +133,10 @@ func (a *autoscaler) Start(ctx context.Context) {
133133
case <-ctx.Done():
134134
return
135135
case <-time.After(a.refreshPeriod):
136+
a.logger.Infow("Triggering scale down", zap.Bool("isLeader", a.isLeader.Load()))
136137
attemptScaleDown = true
137138
case <-a.trigger:
139+
a.logger.Infow("Triggering scale up", zap.Bool("isLeader", a.isLeader.Load()))
138140
attemptScaleDown = false
139141
}
140142

@@ -145,9 +147,14 @@ func (a *autoscaler) Start(ctx context.Context) {
145147
}
146148

147149
func (a *autoscaler) Autoscale(ctx context.Context) {
150+
select {
148151
// We trigger the autoscaler asynchronously by using the channel so that the scale down refresh
149152
// period is reset.
150-
a.trigger <- struct{}{}
153+
case a.trigger <- struct{}{}:
154+
default:
155+
// We don't want to block if the channel's buffer is full, it will be triggered eventually.
156+
157+
}
151158
}
152159

153160
func (a *autoscaler) syncAutoscale(ctx context.Context, attemptScaleDown bool) error {

vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go

Lines changed: 50 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -228,15 +228,6 @@ func (s *StatefulSetScheduler) Schedule(vpod scheduler.VPod) ([]duckv1alpha1.Pla
228228
s.reservedMu.Lock()
229229
defer s.reservedMu.Unlock()
230230

231-
vpods, err := s.vpodLister()
232-
if err != nil {
233-
return nil, err
234-
}
235-
vpodFromLister := st.GetVPod(vpod.GetKey(), vpods)
236-
if vpodFromLister != nil && vpod.GetResourceVersion() != vpodFromLister.GetResourceVersion() {
237-
return nil, fmt.Errorf("vpod to schedule has resource version different from one in indexer")
238-
}
239-
240231
placements, err := s.scheduleVPod(vpod)
241232
if placements == nil {
242233
return placements, err
@@ -253,7 +244,7 @@ func (s *StatefulSetScheduler) Schedule(vpod scheduler.VPod) ([]duckv1alpha1.Pla
253244
}
254245

255246
func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1.Placement, error) {
256-
logger := s.logger.With("key", vpod.GetKey())
247+
logger := s.logger.With("key", vpod.GetKey(), zap.String("component", "scheduler"))
257248
// Get the current placements state
258249
// Quite an expensive operation but safe and simple.
259250
state, err := s.stateAccessor.State(s.reserved)
@@ -262,18 +253,60 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
262253
return nil, err
263254
}
264255

256+
// Clean up reserved from removed resources that don't appear in the vpod list anymore and have
257+
// no pending resources.
258+
reserved := make(map[types.NamespacedName]map[string]int32)
259+
for k, v := range s.reserved {
260+
if pendings, ok := state.Pending[k]; ok {
261+
if pendings == 0 {
262+
reserved[k] = map[string]int32{}
263+
} else {
264+
reserved[k] = v
265+
}
266+
}
267+
}
268+
s.reserved = reserved
269+
265270
logger.Debugw("scheduling", zap.Any("state", state))
266271

267272
existingPlacements := vpod.GetPlacements()
268273
var left int32
269274

270-
// Remove unschedulable pods from placements
275+
// Remove unschedulable or adjust overcommitted pods from placements
271276
var placements []duckv1alpha1.Placement
272277
if len(existingPlacements) > 0 {
273278
placements = make([]duckv1alpha1.Placement, 0, len(existingPlacements))
274279
for _, p := range existingPlacements {
275-
if state.IsSchedulablePod(st.OrdinalFromPodName(p.PodName)) {
276-
placements = append(placements, *p.DeepCopy())
280+
p := p.DeepCopy()
281+
ordinal := st.OrdinalFromPodName(p.PodName)
282+
283+
if !state.IsSchedulablePod(ordinal) {
284+
continue
285+
}
286+
287+
// Handle overcommitted pods.
288+
if state.FreeCap[ordinal] < 0 {
289+
// vr > free => vr: 9, overcommit 4 -> free: 0, vr: 5, pending: +4
290+
// vr = free => vr: 4, overcommit 4 -> free: 0, vr: 0, pending: +4
291+
// vr < free => vr: 3, overcommit 4 -> free: -1, vr: 0, pending: +3
292+
293+
overcommit := -state.FreeCap[ordinal]
294+
295+
if p.VReplicas >= overcommit {
296+
state.SetFree(ordinal, 0)
297+
state.Pending[vpod.GetKey()] += overcommit
298+
299+
p.VReplicas = p.VReplicas - overcommit
300+
} else {
301+
state.SetFree(ordinal, p.VReplicas-overcommit)
302+
state.Pending[vpod.GetKey()] += p.VReplicas
303+
304+
p.VReplicas = 0
305+
}
306+
}
307+
308+
if p.VReplicas > 0 {
309+
placements = append(placements, *p)
277310
}
278311
}
279312
}
@@ -312,7 +345,7 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
312345
} else { //Predicates and priorities must be used for scheduling
313346
// Need less => scale down
314347
if tr > vpod.GetVReplicas() && state.DeschedPolicy != nil {
315-
logger.Debugw("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
348+
logger.Infow("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
316349
placements = s.removeReplicasWithPolicy(vpod, tr-vpod.GetVReplicas(), placements)
317350

318351
// Do not trigger the autoscaler to avoid unnecessary churn
@@ -325,17 +358,18 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
325358
// Need more => scale up
326359
// rebalancing needed for all vreps most likely since there are pending vreps from previous reconciliation
327360
// can fall here when vreps scaled up or after eviction
328-
logger.Debugw("scaling up with a rebalance (if needed)", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
361+
logger.Infow("scaling up with a rebalance (if needed)", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
329362
placements, left = s.rebalanceReplicasWithPolicy(vpod, vpod.GetVReplicas(), placements)
330363
}
331364
}
332365

333366
if left > 0 {
334367
// Give time for the autoscaler to do its job
335-
logger.Info("not enough pod replicas to schedule. Awaiting autoscaler", zap.Any("placement", placements), zap.Int32("left", left))
368+
logger.Infow("not enough pod replicas to schedule")
336369

337370
// Trigger the autoscaler
338371
if s.autoscaler != nil {
372+
logger.Infow("Awaiting autoscaler", zap.Any("placement", placements), zap.Int32("left", left))
339373
s.autoscaler.Autoscale(s.ctx)
340374
}
341375

vendor/knative.dev/eventing/test/e2e-common.sh

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ readonly CONFIG_TRACING_CONFIG="test/config/config-tracing.yaml"
4949
readonly KNATIVE_EVENTING_MONITORING_YAML="test/config/monitoring.yaml"
5050

5151
# The number of controlplane replicas to run.
52-
readonly REPLICAS=3
52+
readonly REPLICAS=${REPLICAS:-3}
5353

5454
# Should deploy a Knative Monitoring as well
5555
readonly DEPLOY_KNATIVE_MONITORING="${DEPLOY_KNATIVE_MONITORING:-1}"
@@ -76,15 +76,15 @@ UNINSTALL_LIST=()
7676

7777
# Setup the Knative environment for running tests.
7878
function knative_setup() {
79+
install_cert_manager || fail_test "Could not install Cert Manager"
80+
7981
install_knative_eventing "HEAD"
8082

8183
install_mt_broker || fail_test "Could not install MT Channel Based Broker"
8284

8385
enable_sugar || fail_test "Could not enable Sugar Controller Injection"
8486

8587
unleash_duck || fail_test "Could not unleash the chaos duck"
86-
87-
install_cert_manager || fail_test "Could not install Cert Manager"
8888
}
8989

9090
function scale_controlplane() {
@@ -147,6 +147,12 @@ function install_knative_eventing() {
147147
-f "${EVENTING_CORE_NAME}" || return 1
148148
UNINSTALL_LIST+=( "${EVENTING_CORE_NAME}" )
149149

150+
local EVENTING_TLS_NAME=${TMP_DIR}/${EVENTING_TLS_YAML##*/}
151+
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${EVENTING_TLS_YAML} > ${EVENTING_TLS_NAME}
152+
kubectl apply \
153+
-f "${EVENTING_TLS_NAME}" || return 1
154+
UNINSTALL_LIST+=( "${EVENTING_TLS_NAME}" )
155+
150156
kubectl patch horizontalpodautoscalers.autoscaling -n ${SYSTEM_NAMESPACE} eventing-webhook -p '{"spec": {"minReplicas": '${REPLICAS}'}}' || return 1
151157

152158
else

vendor/knative.dev/eventing/test/e2e-rekt-tests.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,10 @@ echo "Running E2E Reconciler Tests"
3838

3939
go_test_e2e -timeout=1h ./test/rekt || fail_test
4040

41+
echo "Running E2E Reconciler Tests with strict transport encryption"
42+
43+
kubectl apply -Rf "$(dirname "$0")/config-transport-encryption"
44+
45+
go_test_e2e -timeout=1h ./test/rekt -run TLS || fail_test
46+
4147
success

vendor/knative.dev/eventing/test/e2e-upgrade-tests.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/e2e-common.sh"
2626
# Overrides
2727

2828
function knative_setup {
29-
# Nothing to do at setup
30-
true
29+
install_cert_manager || return $?
3130
}
3231

3332
function install_test_resources {

vendor/knative.dev/pkg/webhook/webhook.go

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,17 @@ type Options struct {
7474
// ControllerOptions encapsulates options for creating a new controller,
7575
// including throttling and stats behavior.
7676
ControllerOptions *controller.ControllerOptions
77+
78+
// EnableHTTP2 enables HTTP2 for webhooks.
79+
// Mitigate CVE-2023-44487 by disabling HTTP2 by default until the Go
80+
// standard library and golang.org/x/net are fully fixed.
81+
// Right now, it is possible for authenticated and unauthenticated users to
82+
// hold open HTTP2 connections and consume huge amounts of memory.
83+
// See:
84+
// * https://github.com/kubernetes/kubernetes/pull/121120
85+
// * https://github.com/kubernetes/kubernetes/issues/121197
86+
// * https://github.com/golang/go/issues/63417#issuecomment-1758858612
87+
EnableHTTP2 bool
7788
}
7889

7990
// Operation is the verb being operated on
@@ -237,12 +248,19 @@ func (wh *Webhook) Run(stop <-chan struct{}) error {
237248
QuietPeriod: wh.Options.GracePeriod,
238249
}
239250

251+
// If TLSNextProto is not nil, HTTP/2 support is not enabled automatically.
252+
nextProto := map[string]func(*http.Server, *tls.Conn, http.Handler){}
253+
if wh.Options.EnableHTTP2 {
254+
nextProto = nil
255+
}
256+
240257
server := &http.Server{
241258
ErrorLog: log.New(&zapWrapper{logger}, "", 0),
242259
Handler: drainer,
243260
Addr: fmt.Sprint(":", wh.Options.Port),
244261
TLSConfig: wh.tlsConfig,
245262
ReadHeaderTimeout: time.Minute, //https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6
263+
TLSNextProto: nextProto,
246264
}
247265

248266
var serve = server.ListenAndServe

vendor/knative.dev/reconciler-test/pkg/environment/namespace.go

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -122,12 +122,26 @@ func (mr *MagicEnvironment) CreateNamespaceIfNeeded() error {
122122
return fmt.Errorf("error copying the image pull Secret: %s", err)
123123
}
124124

125-
_, err = c.CoreV1().ServiceAccounts(mr.namespace).Patch(context.Background(), sa.Name, types.StrategicMergePatchType,
126-
[]byte(`{"imagePullSecrets":[{"name":"`+mr.imagePullSecretName+`"}]}`), metav1.PatchOptions{})
125+
for _, secret := range sa.ImagePullSecrets {
126+
if secret.Name == mr.imagePullSecretName {
127+
return nil
128+
}
129+
}
130+
131+
// Prevent overwriting existing imagePullSecrets
132+
patch := `[{"op":"add","path":"/imagePullSecrets/-","value":{"name":"` + mr.imagePullSecretName + `"}}]`
133+
if len(sa.ImagePullSecrets) == 0 {
134+
patch = `[{"op":"add","path":"/imagePullSecrets","value":[{"name":"` + mr.imagePullSecretName + `"}]}]`
135+
}
136+
137+
_, err = c.CoreV1().ServiceAccounts(mr.namespace).Patch(context.Background(), sa.Name, types.JSONPatchType,
138+
[]byte(patch), metav1.PatchOptions{})
127139
if err != nil {
128-
return fmt.Errorf("patch failed on NS/SA (%s/%s): %s", mr.namespace, sa.Name, err)
140+
return fmt.Errorf("patch failed on NS/SA (%s/%s): %w",
141+
mr.namespace, sa.Name, err)
129142
}
130143
}
144+
131145
return nil
132146
}
133147

0 commit comments

Comments
 (0)