Skip to content

Commit dae320c

Browse files
authored
Merge pull request #633 from robbrockbank/handle-pool-deletion
Release pool affinities, check for overlapping pools and check CIDR cannot be altered
2 parents 05786be + 33c143c commit dae320c

File tree

6 files changed

+334
-74
lines changed

6 files changed

+334
-74
lines changed

lib/backend/model/resource.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -254,3 +254,7 @@ func (options ResourceListOptions) defaultPathRoot() string {
254254
}
255255
return k + "/" + options.Name
256256
}
257+
258+
func (options ResourceListOptions) String() string {
259+
return options.Kind
260+
}

lib/backend/syncersv1/bgpsyncer/bgpsyncer_e2e_test.go

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -204,10 +204,11 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl
204204
)
205205
Expect(err).NotTo(HaveOccurred())
206206
// The pool will add as single entry ( +1 )
207+
poolKeyV1 := model.IPPoolKey{CIDR: net.MustParseCIDR("192.124.0.0/21")}
207208
expectedCacheSize += 1
208209
syncTester.ExpectCacheSize(expectedCacheSize)
209210
syncTester.ExpectData(model.KVPair{
210-
Key: model.IPPoolKey{CIDR: net.MustParseCIDR("192.124.0.0/21")},
211+
Key: poolKeyV1,
211212
Value: &model.IPPool{
212213
CIDR: poolCIDRNet,
213214
IPIPInterface: "tunl0",
@@ -293,9 +294,10 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl
293294

294295
// For non-kubernetes, check that we can allocate an IP address and get a syncer update
295296
// for the allocation block.
297+
var blockAffinityKeyV1 model.BlockAffinityKey
296298
if config.Spec.DatastoreType != apiconfig.Kubernetes {
297299
By("Allocating an IP address and checking that we get an allocation block")
298-
_, _, err = c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{
300+
ips1, _, err := c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{
299301
Num4: 1,
300302
Hostname: "127.0.0.1",
301303
})
@@ -306,16 +308,45 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl
306308
expectedCacheSize += 1
307309
syncTester.ExpectCacheSize(expectedCacheSize)
308310
current := syncTester.GetCacheEntries()
309-
found := false
310311
for _, kvp := range current {
311312
if kab, ok := kvp.Key.(model.BlockAffinityKey); ok {
312313
if kab.Host == "127.0.0.1" && poolCIDRNet.Contains(kab.CIDR.IP) {
313-
found = true
314+
blockAffinityKeyV1 = kab
314315
break
315316
}
316317
}
317318
}
318-
Expect(found).To(BeTrue(), "Did not find affinity block in sync data")
319+
Expect(blockAffinityKeyV1).NotTo(BeNil(), "Did not find affinity block in sync data")
320+
321+
By("Allocating an IP address on a different host and checking for no updates")
322+
// The syncer only monitors affine blocks for one host, so IP allocations for a different
323+
// host should not result in updates.
324+
ips2, _, err := c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{
325+
Num4: 1,
326+
Hostname: "not-this-host",
327+
})
328+
Expect(err).NotTo(HaveOccurred())
329+
syncTester.ExpectCacheSize(expectedCacheSize)
330+
331+
By("Releasing the IP addresses and checking for no updates")
332+
// Releasing IPs should leave the affine blocks assigned, so releasing the IPs
333+
// should result in no updates.
334+
_, err = c.IPAM().ReleaseIPs(ctx, ips1)
335+
Expect(err).NotTo(HaveOccurred())
336+
_, err = c.IPAM().ReleaseIPs(ctx, ips2)
337+
Expect(err).NotTo(HaveOccurred())
338+
syncTester.ExpectCacheSize(expectedCacheSize)
339+
340+
By("Deleting the IPPool and checking for pool and affine block deletion")
341+
// Deleting the pool will also release all affine blocks associated with the pool.
342+
_, err = c.IPPools().Delete(ctx, "mypool", options.DeleteOptions{})
343+
Expect(err).NotTo(HaveOccurred())
344+
345+
// The pool and the affine block for 127.0.0.1 should have deletion events.
346+
expectedCacheSize -= 2
347+
syncTester.ExpectCacheSize(expectedCacheSize)
348+
syncTester.ExpectNoData(blockAffinityKeyV1)
349+
syncTester.ExpectNoData(poolKeyV1)
319350
}
320351

321352
By("Starting a new syncer and verifying that all current entries are returned before sync status")
@@ -329,7 +360,7 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl
329360
// step.
330361
syncTester.ExpectStatusUpdate(api.WaitForDatastore)
331362
syncTester.ExpectStatusUpdate(api.ResyncInProgress)
332-
syncTester.ExpectCacheSize(len(current))
363+
syncTester.ExpectCacheSize(expectedCacheSize)
333364
for _, e := range current {
334365
if config.Spec.DatastoreType == apiconfig.Kubernetes {
335366
// Don't check revisions for K8s since the node data gets updated constantly.

lib/backend/syncersv1/felixsyncer/felixsyncer_e2e_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ var _ = testutils.E2eDatastoreDescribe("Felix syncer tests", testutils.Datastore
182182
Revision: pool.ResourceVersion,
183183
})
184184
syncTester.ExpectData(model.KVPair{
185-
Key: model.GlobalConfigKey{"IpInIpEnabled"},
185+
Key: model.GlobalConfigKey{"IpInIpEnabled"},
186186
Value: "true",
187187
})
188188

lib/clientv2/ippool.go

Lines changed: 137 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,17 @@ package clientv2
1616

1717
import (
1818
"context"
19+
"fmt"
1920
"net"
21+
"time"
22+
23+
log "github.com/sirupsen/logrus"
2024

2125
apiv2 "github.com/projectcalico/libcalico-go/lib/apis/v2"
2226
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
2327
cnet "github.com/projectcalico/libcalico-go/lib/net"
2428
"github.com/projectcalico/libcalico-go/lib/options"
2529
"github.com/projectcalico/libcalico-go/lib/watch"
26-
log "github.com/sirupsen/logrus"
2730
)
2831

2932
// IPPoolInterface has methods to work with IPPool resources.
@@ -45,7 +48,7 @@ type ipPools struct {
4548
// representation of the IPPool, and an error, if there is any.
4649
func (r ipPools) Create(ctx context.Context, res *apiv2.IPPool, opts options.SetOptions) (*apiv2.IPPool, error) {
4750
// Validate the IPPool before creating the resource.
48-
if err := r.validateAndSetDefaults(res); err != nil {
51+
if err := r.validateAndSetDefaults(ctx, res, nil); err != nil {
4952
return nil, err
5053
}
5154

@@ -67,14 +70,20 @@ func (r ipPools) Create(ctx context.Context, res *apiv2.IPPool, opts options.Set
6770
// Update takes the representation of a IPPool and updates it. Returns the stored
6871
// representation of the IPPool, and an error, if there is any.
6972
func (r ipPools) Update(ctx context.Context, res *apiv2.IPPool, opts options.SetOptions) (*apiv2.IPPool, error) {
73+
// Get the existing settings, so that we can validate the CIDR has not changed.
74+
old, err := r.Get(ctx, res.Name, options.GetOptions{})
75+
if err != nil {
76+
return nil, err
77+
}
78+
7079
// Validate the IPPool updating the resource.
71-
if err := r.validateAndSetDefaults(res); err != nil {
80+
if err := r.validateAndSetDefaults(ctx, res, old); err != nil {
7281
return nil, err
7382
}
7483

7584
// Enable IPIP globally if required. Do this before the Update so if it fails the user
7685
// can retry the same command.
77-
err := r.maybeEnableIPIP(ctx, res)
86+
err = r.maybeEnableIPIP(ctx, res)
7887
if err != nil {
7988
return nil, err
8089
}
@@ -88,6 +97,65 @@ func (r ipPools) Update(ctx context.Context, res *apiv2.IPPool, opts options.Set
8897

8998
// Delete takes name of the IPPool and deletes it. Returns an error if one occurs.
9099
func (r ipPools) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv2.IPPool, error) {
100+
// Deleting a pool requires a little care because of existing endpoints
101+
// using IP addresses allocated in the pool. We do the deletion in
102+
// the following steps:
103+
// - disable the pool so no more IPs are assigned from it
104+
// - remove all affinities associated with the pool
105+
// - delete the pool
106+
107+
// Get the pool so that we can find the CIDR associated with it.
108+
pool, err := r.Get(ctx, name, options.GetOptions{})
109+
if err != nil {
110+
return nil, err
111+
}
112+
113+
logCxt := log.WithFields(log.Fields{
114+
"CIDR": pool.Spec.CIDR,
115+
"Name": name,
116+
})
117+
118+
// If the pool is active, set the disabled flag to ensure we stop allocating from this pool.
119+
if !pool.Spec.Disabled {
120+
logCxt.Info("Disabling pool to release affinities")
121+
pool.Spec.Disabled = true
122+
123+
// If the Delete has been called with a ResourceVersion then use that to perform the
124+
// update - that way we'll catch update conflicts (we could actually check here, but
125+
// the most likely scenario is there isn't one - so just pass it through and let the
126+
// Update handle any conflicts).
127+
if opts.ResourceVersion != "" {
128+
pool.ResourceVersion = opts.ResourceVersion
129+
}
130+
if _, err := r.Update(ctx, pool, options.SetOptions{}); err != nil {
131+
return nil, err
132+
}
133+
134+
// Reset the resource version before the actual delete since the version of that resource
135+
// will now have been updated.
136+
opts.ResourceVersion = ""
137+
}
138+
139+
// Release affinities associated with this pool. We do this even if the pool was disabled
140+
// (since it may have been enabled at one time, and if there are no affine blocks created
141+
// then this will be a no-op). We've already validated the CIDR so we know it will parse.
142+
if _, cidrNet, err := cnet.ParseCIDR(pool.Spec.CIDR); err == nil {
143+
logCxt.Info("Releasing pool affinities")
144+
145+
// Pause for a short period before releasing the affinities - this gives any in-progress
146+
// allocations an opportunity to finish.
147+
time.Sleep(500 * time.Millisecond)
148+
err = r.client.IPAM().ReleasePoolAffinities(ctx, *cidrNet)
149+
150+
// Depending on the datastore, IPAM may not be supported. If we get a not supported
151+
// error, then continue. Any other error, fail.
152+
if _, ok := err.(cerrors.ErrorOperationNotSupported); !ok && err != nil {
153+
return nil, err
154+
}
155+
}
156+
157+
// And finally, delete the pool.
158+
logCxt.Info("Deleting pool")
91159
out, err := r.client.resources.Delete(ctx, opts, apiv2.KindIPPool, noNamespace, name)
92160
if out != nil {
93161
return out.(*apiv2.IPPool), err
@@ -120,12 +188,14 @@ func (r ipPools) Watch(ctx context.Context, opts options.ListOptions) (watch.Int
120188
return r.client.resources.Watch(ctx, opts, apiv2.KindIPPool)
121189
}
122190

123-
// validateAndSetDefaults validates IPPool fields.
124-
func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error {
191+
// validateAndSetDefaults validates IPPool fields and sets default values that are
192+
// not assigned.
193+
// The old pool will be unassigned for a Create.
194+
func (r ipPools) validateAndSetDefaults(ctx context.Context, new, old *apiv2.IPPool) error {
125195
errFields := []cerrors.ErroredField{}
126196

127197
// Spec.CIDR field must not be empty.
128-
if pool.Spec.CIDR == "" {
198+
if new.Spec.CIDR == "" {
129199
return cerrors.ErrorValidation{
130200
ErroredFields: []cerrors.ErroredField{{
131201
Name: "IPPool.Spec.CIDR",
@@ -135,55 +205,103 @@ func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error {
135205
}
136206

137207
// Make sure the CIDR is parsable.
138-
ipAddr, cidr, err := cnet.ParseCIDR(pool.Spec.CIDR)
208+
ipAddr, cidr, err := cnet.ParseCIDR(new.Spec.CIDR)
139209
if err != nil {
140210
return cerrors.ErrorValidation{
141211
ErroredFields: []cerrors.ErroredField{{
142212
Name: "IPPool.Spec.CIDR",
143213
Reason: "IPPool CIDR must be a valid subnet",
214+
Value: new.Spec.CIDR,
144215
}},
145216
}
146217
}
147218

219+
// Normalize the CIDR before persisting.
220+
new.Spec.CIDR = cidr.String()
221+
222+
// If there was a previous pool then this must be an Update, validate that the
223+
// CIDR has not changed. Since we are using normalized CIDRs we can just do a
224+
// simple string comparison.
225+
if old != nil && old.Spec.CIDR != new.Spec.CIDR {
226+
errFields = append(errFields, cerrors.ErroredField{
227+
Name: "IPPool.Spec.CIDR",
228+
Reason: "IPPool CIDR cannot be modified",
229+
Value: new.Spec.CIDR,
230+
})
231+
}
232+
233+
// If there was no previous pool then this must be a Create. Check that the CIDR
234+
// does not overlap with any other pool CIDRs.
235+
if old == nil {
236+
allPools, err := r.List(ctx, options.ListOptions{})
237+
if err != nil {
238+
return err
239+
}
240+
241+
for _, otherPool := range allPools.Items {
242+
// It's possible that Create is called for a pre-existing pool, so skip our own
243+
// pool and let the generic processing handle the pre-existing resource error case.
244+
if otherPool.Name == new.Name {
245+
continue
246+
}
247+
_, otherCIDR, err := cnet.ParseCIDR(otherPool.Spec.CIDR)
248+
if err != nil {
249+
log.WithField("Name", otherPool.Name).WithError(err).Error("IPPool is configured with an invalid CIDR")
250+
continue
251+
}
252+
if otherCIDR.IsNetOverlap(cidr.IPNet) {
253+
errFields = append(errFields, cerrors.ErroredField{
254+
Name: "IPPool.Spec.CIDR",
255+
Reason: fmt.Sprintf("IPPool(%s) CIDR overlaps with IPPool(%s) CIDR %s", new.Name, otherPool.Name, otherPool.Spec.CIDR),
256+
Value: new.Spec.CIDR,
257+
})
258+
}
259+
}
260+
}
261+
148262
// Make sure IPIPMode is defaulted to "Never".
149-
if len(pool.Spec.IPIPMode) == 0 {
150-
pool.Spec.IPIPMode = apiv2.IPIPModeNever
263+
if len(new.Spec.IPIPMode) == 0 {
264+
new.Spec.IPIPMode = apiv2.IPIPModeNever
151265
}
152266

153267
// IPIP cannot be enabled for IPv6.
154-
if cidr.Version() == 6 && pool.Spec.IPIPMode != apiv2.IPIPModeNever {
268+
if cidr.Version() == 6 && new.Spec.IPIPMode != apiv2.IPIPModeNever {
155269
errFields = append(errFields, cerrors.ErroredField{
156-
Name: "IPPool.Spec.IPIP.Mode",
270+
Name: "IPPool.Spec.IPIPMode",
157271
Reason: "IPIP is not supported on an IPv6 IP pool",
272+
Value: new.Spec.IPIPMode,
158273
})
159274
}
160275

161276
// The Calico IPAM places restrictions on the minimum IP pool size. If
162277
// the ippool is enabled, check that the pool is at least the minimum size.
163-
if !pool.Spec.Disabled {
278+
if !new.Spec.Disabled {
164279
ones, bits := cidr.Mask.Size()
165280
log.Debugf("Pool CIDR: %s, num bits: %d", cidr.String(), bits-ones)
166281
if bits-ones < 6 {
167282
if cidr.Version() == 4 {
168283
errFields = append(errFields, cerrors.ErroredField{
169284
Name: "IPPool.Spec.CIDR",
170285
Reason: "IPv4 pool size is too small (min /26) for use with Calico IPAM",
286+
Value: new.Spec.CIDR,
171287
})
172288
} else {
173289
errFields = append(errFields, cerrors.ErroredField{
174290
Name: "IPPool.Spec.CIDR",
175291
Reason: "IPv6 pool size is too small (min /122) for use with Calico IPAM",
292+
Value: new.Spec.CIDR,
176293
})
177294
}
178295
}
179296
}
180297

181298
// The Calico CIDR should be strictly masked
182-
log.Debugf("IPPool CIDR: %s, Masked IP: %d", pool.Spec.CIDR, cidr.IP)
299+
log.Debugf("IPPool CIDR: %s, Masked IP: %d", new.Spec.CIDR, cidr.IP)
183300
if cidr.IP.String() != ipAddr.String() {
184301
errFields = append(errFields, cerrors.ErroredField{
185302
Name: "IPPool.Spec.CIDR",
186-
Reason: "IP pool CIDR is not strictly masked",
303+
Reason: "IPPool CIDR is not strictly masked",
304+
Value: new.Spec.CIDR,
187305
})
188306
}
189307

@@ -202,14 +320,16 @@ func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error {
202320
if cidr.Version() == 4 && cidr.IsNetOverlap(ipv4LinkLocalNet) {
203321
errFields = append(errFields, cerrors.ErroredField{
204322
Name: "IPPool.Spec.CIDR",
205-
Reason: "IP pool range overlaps with IPv4 Link Local range 169.254.0.0/16",
323+
Reason: "IPPool CIDR overlaps with IPv4 Link Local range 169.254.0.0/16",
324+
Value: new.Spec.CIDR,
206325
})
207326
}
208327

209328
if cidr.Version() == 6 && cidr.IsNetOverlap(ipv6LinkLocalNet) {
210329
errFields = append(errFields, cerrors.ErroredField{
211330
Name: "IPPool.Spec.CIDR",
212-
Reason: "IP pool range overlaps with IPv6 Link Local range fe80::/10",
331+
Reason: "IPPool CIDR overlaps with IPv6 Link Local range fe80::/10",
332+
Value: new.Spec.CIDR,
213333
})
214334
}
215335

0 commit comments

Comments
 (0)