|
// This differs from `TestSwarmRotateUnlockKey` because that one rotates a single node, which is the leader. |
|
// This one keeps the leader up, and asserts that other manager nodes in the cluster also have their unlock |
|
// key rotated. |
|
func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { |
|
if runtime.GOARCH == "s390x" { |
|
c.Skip("Disabled on s390x") |
|
} |
|
if runtime.GOARCH == "ppc64le" { |
|
c.Skip("Disabled on ppc64le") |
|
} |
|
|
|
d1 := s.AddDaemon(c, true, true) // leader - don't restart this one, we don't want leader election delays |
|
d2 := s.AddDaemon(c, true, true) |
|
d3 := s.AddDaemon(c, true, true) |
|
|
|
outs, err := d1.Cmd("swarm", "update", "--autolock") |
|
c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) |
|
unlockKey := getUnlockKey(d1, c, outs) |
|
|
|
// Rotate multiple times |
|
for i := 0; i != 3; i++ { |
|
outs, err = d1.Cmd("swarm", "unlock-key", "-q", "--rotate") |
|
c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) |
|
// Strip \n |
|
newUnlockKey := outs[:len(outs)-1] |
|
c.Assert(newUnlockKey, checker.Not(checker.Equals), "") |
|
c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) |
|
|
|
d2.RestartNode(c) |
|
d3.RestartNode(c) |
|
|
|
for _, d := range []*daemon.Daemon{d2, d3} { |
|
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) |
|
|
|
outs, _ := d.Cmd("node", "ls") |
|
c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") |
|
|
|
cmd := d.Command("swarm", "unlock") |
|
cmd.Stdin = bytes.NewBufferString(unlockKey) |
|
result := icmd.RunCmd(cmd) |
|
|
|
if result.Error == nil { |
|
// On occasion, the daemon may not have finished |
|
// rotating the KEK before restarting. The test is |
|
// intentionally written to explore this behavior. |
|
// When this happens, unlocking with the old key will |
|
// succeed. If we wait for the rotation to happen and |
|
// restart again, the new key should be required this |
|
// time. |
|
|
|
time.Sleep(3 * time.Second) |
|
|
|
d.RestartNode(c) |
|
|
|
cmd = d.Command("swarm", "unlock") |
|
cmd.Stdin = bytes.NewBufferString(unlockKey) |
|
result = icmd.RunCmd(cmd) |
|
} |
|
result.Assert(c, icmd.Expected{ |
|
ExitCode: 1, |
|
Err: "invalid key", |
|
}) |
|
|
|
outs, _ = d.Cmd("node", "ls") |
|
c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") |
|
|
|
cmd = d.Command("swarm", "unlock") |
|
cmd.Stdin = bytes.NewBufferString(newUnlockKey) |
|
icmd.RunCmd(cmd).Assert(c, icmd.Success) |
|
|
|
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) |
|
|
|
outs, err = d.Cmd("node", "ls") |
|
c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) |
|
c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") |
|
} |
|
|
|
unlockKey = newUnlockKey |
|
} |
|
} |
Let's create a separate issue for this one (also tracked in #33041 and #37306
Seen failing in https://jenkins.dockerproject.org/job/Docker-PRs-experimental/44501/console (and many other times)
This is the test:
moby/integration-cli/docker_cli_swarm_test.go
Lines 1313 to 1392 in 0e54f5e
d1=dcd909916369dd2=de6869a9c7827d3=d899b634e4c28Create 3 daemons;
Daemon 1 (
d1=dcd909916369d)moby/integration-cli/docker_cli_swarm_test.go
Line 1324 in 0e54f5e
Daemon 2 (
d2=de6869a9c7827)moby/integration-cli/docker_cli_swarm_test.go
Line 1325 in 0e54f5e
Daemon 3 (
d3=d899b634e4c28)moby/integration-cli/docker_cli_swarm_test.go
Line 1326 in 0e54f5e
In a loop (3 times);
Iteration 1:
Restart daemon
d2moby/integration-cli/docker_cli_swarm_test.go
Line 1341 in 0e54f5e
Restart daemon
d3moby/integration-cli/docker_cli_swarm_test.go
Line 1342 in 0e54f5e
Iteration 2:
Restart daemon
d2moby/integration-cli/docker_cli_swarm_test.go
Line 1341 in 0e54f5e
Restart daemon
d3moby/integration-cli/docker_cli_swarm_test.go
Line 1342 in 0e54f5e
Failing here;
moby/integration-cli/docker_cli_swarm_test.go
Lines 1385 to 1386 in 0e54f5e
Teardown:
Logs: