Skip to content

Commit

Permalink
Use correct output format for CNI Add in networkPolicyOnly mode (#2037)
Browse files Browse the repository at this point in the history
As per the CNI specification for the Add command:
> If the plugin was supplied a prevResult as part of its input
configuration, it MUST handle prevResult by either passing it through,
or modifying it appropriately.

In networkPolicyOnly mode, CNI chaining is used. Antrea receives a
prevResult from the container runtime and must output it in case of
success. We do not make any changes to the result, but we may convert it
to a different CNI version format (the default one for Antrea).

Previously, Antrea would output the entire input configuration (which
does include prevResult), which would cause containerd to return an
error.

The integration test was incorrect and had to be fixed.

Fixes #2002
  • Loading branch information
antoninbas committed May 3, 2021
1 parent fd9fb65 commit a775287
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 49 deletions.
32 changes: 19 additions & 13 deletions pkg/agent/cniserver/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,12 @@ func updateResultIfaceConfig(result *current.Result, defaultIPv4Gateway net.IP,
}
}

func resultToResponse(result *current.Result) *cnipb.CniCmdResponse {
var resultBytes bytes.Buffer
_ = result.PrintTo(&resultBytes)
return &cnipb.CniCmdResponse{CniResult: resultBytes.Bytes()}
}

func (s *CNIServer) loadNetworkConfig(request *cnipb.CniCmdRequest) (*CNIConfig, error) {
cniConfig := &CNIConfig{}
cniConfig.CniCmdArgs = request.CniArgs
Expand Down Expand Up @@ -333,7 +339,7 @@ func (s *CNIServer) parsePrevResultFromRequest(networkConfig *NetworkConfig) (*c
klog.Errorf("Failed to parse previous network configuration")
return nil, s.decodingFailureResponse("prevResult")
}
// Convert whatever the IPAM result was into the current Result type (for the current CNI
// Convert whatever the result was into the current Result type (for the current CNI
// version)
prevResult, err := current.NewResultFromResult(networkConfig.PrevResult)
if err != nil {
Expand Down Expand Up @@ -456,12 +462,10 @@ func (s *CNIServer) CmdAdd(ctx context.Context, request *cnipb.CniCmdRequest) (*
// Notify the Pod update event to required components.
s.podUpdates <- v1beta2.PodReference{Name: podName, Namespace: podNamespace}

var resultBytes bytes.Buffer
_ = result.PrintTo(&resultBytes)
klog.Infof("CmdAdd for container %v succeeded", cniConfig.ContainerId)
// mark success as true to avoid rollback
success = true
return &cnipb.CniCmdResponse{CniResult: resultBytes.Bytes()}, nil
return resultToResponse(result), nil
}

func (s *CNIServer) CmdDel(_ context.Context, request *cnipb.CniCmdRequest) (
Expand Down Expand Up @@ -596,42 +600,44 @@ func (s *CNIServer) Run(stopCh <-chan struct{}) {
// be called prior to Antrea CNI to allocate IP and ports. Antrea takes allocated port
// and hooks it to OVS br-int.
func (s *CNIServer) interceptAdd(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) {
klog.Infof("CNI Chaining: add")
klog.Infof("CNI Chaining: add for container %s", cniConfig.ContainerId)
prevResult, response := s.parsePrevResultFromRequest(cniConfig.NetworkConfig)
if response != nil {
klog.Infof("Failed to parse prev result for container %s", cniConfig.ContainerId)
return response, nil
}
podName := string(cniConfig.K8S_POD_NAME)
podNamespace := string(cniConfig.K8S_POD_NAMESPACE)
result := make([]byte, 0, 0)
if err := s.podConfigurator.connectInterceptedInterface(
podName,
podNamespace,
cniConfig.ContainerId,
s.hostNetNsPath(cniConfig.Netns),
cniConfig.Ifname,
prevResult.IPs); err != nil {
return &cnipb.CniCmdResponse{CniResult: result}, fmt.Errorf("failed to connect container %s to ovs: %w", cniConfig.ContainerId, err)
return &cnipb.CniCmdResponse{CniResult: []byte("")}, fmt.Errorf("failed to connect container %s to ovs: %w", cniConfig.ContainerId, err)
}
// Notify the Pod update event to required components.
s.podUpdates <- v1beta2.PodReference{Name: podName, Namespace: podNamespace}

return &cnipb.CniCmdResponse{CniResult: cniConfig.NetworkConfiguration}, nil
// we return prevResult, which should be exactly what we received from
// the runtime, potentially converted to the current CNI version used by
// Antrea.
return resultToResponse(prevResult), nil
}

func (s *CNIServer) interceptDel(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) {
klog.Infof("CNI Chaining: delete")
return &cnipb.CniCmdResponse{CniResult: make([]byte, 0, 0)}, s.podConfigurator.disconnectInterceptedInterface(
klog.Infof("CNI Chaining: delete for container %s", cniConfig.ContainerId)
return &cnipb.CniCmdResponse{CniResult: []byte("")}, s.podConfigurator.disconnectInterceptedInterface(
string(cniConfig.K8S_POD_NAME),
string(cniConfig.K8S_POD_NAMESPACE),
cniConfig.ContainerId)
}

func (s *CNIServer) interceptCheck(_ *CNIConfig) (*cnipb.CniCmdResponse, error) {
klog.Infof("CNI Chaining: check")
func (s *CNIServer) interceptCheck(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) {
klog.Infof("CNI Chaining: check for container %s", cniConfig.ContainerId)
// TODO, check for host interface setup later
return &cnipb.CniCmdResponse{CniResult: make([]byte, 0, 0)}, nil
return &cnipb.CniCmdResponse{CniResult: []byte("")}, nil
}

// reconcile performs startup reconciliation for the CNI server. The CNI server is in charge of
Expand Down
74 changes: 38 additions & 36 deletions test/integration/agent/cniserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,39 +108,40 @@ const (
ipamEndStr = `
}`

chainCNINetworkConfigTemplate = `{
"cniVersion":"{{.CNIVersion}}",
"name":"azure",
"prevResult":
{
"cniVersion":"{{.CNIVersion}}",
"interfaces":
[
{"name":"eth0"},
{"name":"eth0"}
],
"ips":
[
{
"version":"4",
"address":"{{.Subnet}}",
"gateway":"{{.Gateway}}"}
],
"routes":
[
{
"dst":"{{ (index .Routes 0) }}",
"gw":"{{.Gateway}}"
}
],
"dns":
chainCNIPrevResultTemplate = `{
"cniVersion":"{{.CNIVersion}}",
"interfaces":
[
{"name":"eth0"},
{"name":"eth0"}
],
"ips":
[
{
"version":"4",
"address":"{{.Subnet}}",
"gateway":"{{.Gateway}}"}
],
"routes":
[
{
"nameservers":
[
"{{.DNS}}"
]
"dst":"{{ (index .Routes 0) }}",
"gw":"{{.Gateway}}"
}
}
],
"dns":
{
"nameservers":
[
"{{.DNS}}"
]
}
}`

chainCNIConfStr = `{
"cniVersion":"%s",
"name":"azure",
"prevResult":%s
}`
)

Expand Down Expand Up @@ -799,12 +800,12 @@ func TestCNIServerChaining(t *testing.T) {
}

// create request
confParser := template.Must(template.New("").Parse(chainCNINetworkConfigTemplate))
conf := bytes.NewBuffer(nil)
if err := confParser.Execute(conf, tc); err != nil {
prevResultParser := template.Must(template.New("").Parse(chainCNIPrevResultTemplate))
prevResult := bytes.NewBuffer(nil)
if err := prevResultParser.Execute(prevResult, tc); err != nil {
t.Errorf("parse template failed: %v", err)
}
tc.networkConfig = conf.String()
tc.networkConfig = fmt.Sprintf(chainCNIConfStr, tc.CNIVersion, prevResult.String())
cniReq := tc.createCmdArgs(netNS, "")

// test cmdAdd
Expand All @@ -828,7 +829,8 @@ func TestCNIServerChaining(t *testing.T) {
mock.InOrder(orderedCalls...)
cniResp, err := server.CmdAdd(ctx, cniReq)
testRequire.Nil(err)
testRequire.Equal(tc.networkConfig, string(cniResp.CniResult))
// in chaining mode, we do not modify the result
testRequire.JSONEq(prevResult.String(), string(cniResp.CniResult))

// test cmdDel
containterHostRt := &net.IPNet{IP: podIP, Mask: net.CIDRMask(32, 32)}
Expand Down

0 comments on commit a775287

Please sign in to comment.