Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ci/kind/test-secondary-network-kind.sh
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ printf -v IMAGES "%s " "${IMAGE_LIST[@]}"
function setup_cluster {
args=$1
echo "creating test bed with args $args"
eval "timeout 600 $TESTBED_CMD create kind $args"
eval "timeout 600 $TESTBED_CMD create kind --ip-family dual $args"
}

function run_test {
Expand Down
109 changes: 107 additions & 2 deletions docs/antrea-ipam.md
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,9 @@ will get same IP after recreated.

With the AntreaIPAM feature, Antrea can allocate IPs for Pod secondary networks,
including both [secondary networks managed by Antrea](secondary-network.md) and
secondary networks managed by [Multus](cookbooks/multus).
secondary networks managed by [Multus](cookbooks/multus). IPv4, IPv6, and
dual-stack (IPv4 + IPv6) configurations are all supported for secondary
networks.

### Prerequisites

Expand All @@ -333,7 +335,7 @@ is not enabled by default.

### CNI IPAM configuration

To configure Antrea IPAM, `antrea` should be specified as the IPAM plugin in the
To configure Antrea IPAM, `antrea` should be specified as the IPAM plugin in
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should -> must

the CNI IPAM configuration, and at least one Antrea IPPool should be specified
in the `ippools` field. IPs will be allocated from the specified IPPool(s) for
the secondary network.
Expand All @@ -352,6 +354,22 @@ the secondary network.
}
```

To allocate an IPv6 address from an IPv6 IPPool:

```json
{
"cniVersion": "0.3.0",
"name": "ipv6-net-1",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "antrea",
"ippools": [ "ipv6-pool-1" ]
}
}
```

Multiple IPPools can be specified to allocate multiple IPs from each IPPool for
the secondary network. For example, you can specify one IPPool to allocate an
IPv4 address and another IPPool to allocate an IPv6 address in the dual-stack
Expand Down Expand Up @@ -436,6 +454,46 @@ spec:
}'
```

For an IPv6-only secondary network:

```yaml
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: ipv6-net-1
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "antrea",
"ippools": [ "ipv6-pool-1" ]
}
}'
```

For a dual-stack secondary network with both IPv4 and IPv6:

```yaml
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: dual-stack-net-1
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "antrea",
"ippools": [ "ipv4-pool-1", "ipv6-pool-1" ]
}
}'
```

## `IPPool` CRD

Antrea IP pools are defined with the `IPPool` CRD. The following two examples
Expand Down Expand Up @@ -513,6 +571,53 @@ spec:
}'
```

VLAN secondary networks also support IPv6 and dual-stack. The following example
shows a dual-stack VLAN configuration with both an IPv4 and an IPv6 IPPool:

```yaml
apiVersion: "crd.antrea.io/v1beta1"
kind: IPPool
metadata:
name: ipv4-vlan-pool
spec:
ipRanges:
- cidr: "10.10.1.0/26"
subnetInfo:
gateway: "10.10.1.1"
prefixLength: 24
vlan: 100

---
apiVersion: "crd.antrea.io/v1beta1"
kind: IPPool
metadata:
name: ipv6-vlan-pool
spec:
ipRanges:
- start: "3ffe:ffff:1:01ff::0100"
end: "3ffe:ffff:1:01ff::0200"
subnetInfo:
gateway: "3ffe:ffff:1:01ff::1"
prefixLength: 64
vlan: 100

---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: dual-stack-vlan-net
spec:
config: '{
"cniVersion": "0.3.0",
"type": "antrea",
"networkType": "vlan",
"ipam": {
"type": "antrea",
"ippools": [ "ipv4-vlan-pool", "ipv6-vlan-pool" ]
}
}'
```

You can refer to the [Antrea secondary network document](secondary-network.md)
for more information about Antrea secondary VLAN network configuration.

Expand Down
163 changes: 129 additions & 34 deletions pkg/agent/cniserver/ipam/antrea_ipam.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
current "github.com/containernetworking/cni/pkg/types/100"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
utilnet "k8s.io/utils/net"

"antrea.io/antrea/pkg/agent/cniserver/types"
crdv1b1 "antrea.io/antrea/pkg/apis/crd/v1beta1"
Expand Down Expand Up @@ -135,10 +136,29 @@ func (d *AntreaIPAM) setController(controller *AntreaIPAMController) {
d.controller = controller
}

// Add allocates the next available IP address from the associated IP Pool. The
// allocated IP and associated resource will be stored in the IP Pool status.
// findMatchingIP returns the first IP from the list that matches the given IP
// family, and removes it from the list.
func findMatchingIP(ips []net.IP, ipVersion utilnet.IPFamily) (net.IP, []net.IP) {
for i, ip := range ips {
if ip == nil {
continue
}
family := utilnet.IPFamilyOfString(ip.String())
if family == ipVersion {
remaining := append(ips[:i:i], ips[i+1:]...)
return ip, remaining
}
}
return nil, ips
}

// Add allocates IP addresses from the associated IP Pools. It supports IPv4,
// IPv6, and dual-stack configurations. For dual-stack, at most one IP per IP
// family will be allocated even if multiple Pools exist for the same family.
// The allocated IPs and associated resources will be stored in the IP Pool
// status.
func (d *AntreaIPAM) Add(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, *IPAMResult, error) {
mine, allocator, ips, reservedOwner, err := d.owns(k8sArgs)
mine, allocators, ips, reservedOwner, err := d.owns(k8sArgs)
if err != nil {
return true, nil, err
}
Expand All @@ -148,29 +168,92 @@ func (d *AntreaIPAM) Add(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfi
}

owner := *getAllocationOwner(args, k8sArgs, reservedOwner, false)
var ip net.IP
var subnetInfo *crdv1b1.SubnetInfo
if reservedOwner != nil {
ip, subnetInfo, err = allocator.AllocateReservedOrNext(crdv1b1.IPAddressPhaseAllocated, owner)
} else if len(ips) == 0 {
ip, subnetInfo, err = allocator.AllocateNext(crdv1b1.IPAddressPhaseAllocated, owner)
} else {
ip = ips[0]
subnetInfo, err = allocator.AllocateIP(ip, crdv1b1.IPAddressPhaseAllocated, owner)
}
if err != nil {
return true, nil, err
}
result := IPAMResult{Result: current.Result{CNIVersion: current.ImplementedSpecVersion}}

var allocatedAllocators []*poolallocator.IPPoolAllocator
defer func() {
if err != nil {
// Release already allocated IPs on error.
podOwner := getAllocationPodOwner(args, k8sArgs, nil, false)
for _, a := range allocatedAllocators {
a.ReleaseContainer(podOwner.ContainerID, podOwner.IFName)
}
}
}()

remainingIPs := ips
var hasIPv4Pool, hasIPv6Pool bool
var allocatedIPv4, allocatedIPv6 bool
for _, allocator := range allocators {
switch allocator.IPVersion {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if / else may be simpler?

case utilnet.IPv4:
hasIPv4Pool = true
if allocatedIPv4 {
continue
}
case utilnet.IPv6:
hasIPv6Pool = true
if allocatedIPv6 {
continue
}
}

var ip net.IP
var subnetInfo *crdv1b1.SubnetInfo
if reservedOwner != nil {
ip, subnetInfo, err = allocator.AllocateReservedOrNext(crdv1b1.IPAddressPhaseAllocated, owner)
} else if len(remainingIPs) == 0 {
ip, subnetInfo, err = allocator.AllocateNext(crdv1b1.IPAddressPhaseAllocated, owner)
} else {
// Match the IP to the allocator's IP version.
var matchedIP net.IP
matchedIP, remainingIPs = findMatchingIP(remainingIPs, allocator.IPVersion)
if matchedIP != nil {
ip = matchedIP
subnetInfo, err = allocator.AllocateIP(ip, crdv1b1.IPAddressPhaseAllocated, owner)
} else {
ip, subnetInfo, err = allocator.AllocateNext(crdv1b1.IPAddressPhaseAllocated, owner)
}
}
if err != nil {
return true, nil, err
}
allocatedAllocators = append(allocatedAllocators, allocator)

klog.V(4).InfoS("IP allocation successful", "IP", ip.String(), "Pod", string(k8sArgs.K8S_POD_NAME))
switch allocator.IPVersion {
case utilnet.IPv4:
allocatedIPv4 = true
case utilnet.IPv6:
allocatedIPv6 = true
}

result := IPAMResult{Result: current.Result{CNIVersion: current.ImplementedSpecVersion}, VLANID: uint16(subnetInfo.VLAN)}
gwIP := net.ParseIP(subnetInfo.Gateway)
klog.V(4).InfoS("IP allocation successful", "IP", ip.String(), "Pod", string(k8sArgs.K8S_POD_NAME))

ipConfig, defaultRoute := generateIPConfig(ip, int(subnetInfo.PrefixLength), gwIP)
gwIP := net.ParseIP(subnetInfo.Gateway)
ipConfig, defaultRoute := generateIPConfig(ip, int(subnetInfo.PrefixLength), gwIP)

result.Routes = append(result.Routes, defaultRoute)
result.IPs = append(result.IPs, ipConfig)
result.Routes = append(result.Routes, defaultRoute)
result.IPs = append(result.IPs, ipConfig)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cannot we get >1 v4 or v6 IPs?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I think it’s possible. Current code returns allocators for all valid pools from the annotation (getPoolAllocatorsByPod), and we append one IPConfig per allocator. It’s intended for dual-stack (one v4 + one v6) but it doesn’t enforce ‘one pool per family’, so multiple v4/v6 can happen.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should allocate at most 1 v4 and 1 v6 IP.

And probably we should report an error if no v4 / v6 IP is available, but the Pod does have a v4 / v6 pool specified.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated. Please help check it again. Thanks.

vlanID := uint16(subnetInfo.VLAN)
if result.VLANID == 0 {
result.VLANID = vlanID
} else if vlanID != 0 && result.VLANID != vlanID {
err = fmt.Errorf("IPPools have conflicting VLAN IDs %d and %d for dual-stack allocation", result.VLANID, vlanID)
return true, nil, err
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good to break if both v4 and v6 are already allocated.

}

if hasIPv4Pool && !allocatedIPv4 {
err = fmt.Errorf("failed to allocate IPv4 address for Pod %s/%s", string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
return true, nil, err
}
if hasIPv6Pool && !allocatedIPv6 {
err = fmt.Errorf("failed to allocate IPv6 address for Pod %s/%s", string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
return true, nil, err
}

// All allocations successful, clear the deferred release.
allocatedAllocators = nil
return true, &result, nil
}

Expand All @@ -189,7 +272,7 @@ func (d *AntreaIPAM) Del(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfi

// Check verifies the IP associated with the resource is tracked in the IP Pool status.
func (d *AntreaIPAM) Check(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, error) {
mine, allocator, _, _, err := d.owns(k8sArgs)
mine, allocators, _, _, err := d.owns(k8sArgs)
if err != nil {
return true, err
}
Expand All @@ -198,15 +281,25 @@ func (d *AntreaIPAM) Check(args *invoke.Args, k8sArgs *types.K8sArgs, networkCon
return false, nil
}

ip, err := allocator.GetContainerIP(args.ContainerID, "")
if err != nil {
return true, err
found := false
var lastErr error
for _, allocator := range allocators {
ip, err := allocator.GetContainerIP(args.ContainerID, "")
if err != nil {
lastErr = err
continue
}
if ip != nil {
found = true
}
}

if ip == nil {
return true, fmt.Errorf("no IP Address association found for container %s", string(k8sArgs.K8S_POD_NAME))
if found {
return true, nil
}
return true, nil
if lastErr != nil {
return true, lastErr
}
return true, fmt.Errorf("no IP Address association found for container %s", string(k8sArgs.K8S_POD_NAME))
}

// SecondaryNetworkAllocate allocates IP addresses for a Pod secondary network interface, based on
Expand Down Expand Up @@ -262,9 +355,11 @@ func (d *AntreaIPAM) SecondaryNetworkAllocate(podOwner *crdv1b1.PodOwner, networ
// assume the CNI version >= 0.3.0, and so do not check the number of
// addresses.
result.IPs = append(result.IPs, ipConfig)
vlanID := uint16(subnetInfo.VLAN)
if result.VLANID == 0 {
// Return the first non-zero VLAN.
result.VLANID = uint16(subnetInfo.VLAN)
result.VLANID = vlanID
} else if vlanID != 0 && result.VLANID != vlanID {
return nil, fmt.Errorf("IPPools have conflicting VLAN IDs %d and %d for dual-stack allocation", result.VLANID, vlanID)
}
}
// No failed allocation, so do not release allocated IPs.
Expand Down Expand Up @@ -340,7 +435,7 @@ func (d *AntreaIPAM) del(podOwner *crdv1b1.PodOwner) (foundAllocation bool, err
// mineTrue + timeout error
// mineTrue + IPPoolNotFound error
// mineTrue + nil error
func (d *AntreaIPAM) owns(k8sArgs *types.K8sArgs) (mineType, *poolallocator.IPPoolAllocator, []net.IP, *crdv1b1.IPAddressOwner, error) {
func (d *AntreaIPAM) owns(k8sArgs *types.K8sArgs) (mineType, []*poolallocator.IPPoolAllocator, []net.IP, *crdv1b1.IPAddressOwner, error) {
// Wait controller ready to avoid inappropriate behaviors on the CNI request.
if err := d.waitForControllerReady(); err != nil {
// Return mineTrue to make this request fail and kubelet will retry.
Expand All @@ -350,7 +445,7 @@ func (d *AntreaIPAM) owns(k8sArgs *types.K8sArgs) (mineType, *poolallocator.IPPo
namespace := string(k8sArgs.K8S_POD_NAMESPACE)
podName := string(k8sArgs.K8S_POD_NAME)
klog.V(2).InfoS("Inspecting IPAM annotation", "Namespace", namespace, "Pod", podName)
return d.controller.getPoolAllocatorByPod(namespace, podName)
return d.controller.getPoolAllocatorsByPod(namespace, podName)
}

func (d *AntreaIPAM) waitForControllerReady() error {
Expand Down
Loading
Loading