2018-07-23 10:06:37 -07:00
|
|
|
/*
|
|
|
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
|
limitations under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
package cluster
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"fmt"
|
2018-08-27 10:21:43 -07:00
|
|
|
"io/ioutil"
|
|
|
|
|
"os"
|
2018-08-28 11:45:21 -07:00
|
|
|
"path/filepath"
|
|
|
|
|
"regexp"
|
2018-09-21 22:09:25 -07:00
|
|
|
"strings"
|
2018-08-23 18:27:52 -07:00
|
|
|
"time"
|
|
|
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2018-09-02 00:47:40 -07:00
|
|
|
log "github.com/sirupsen/logrus"
|
2018-08-06 17:05:46 -07:00
|
|
|
|
2018-09-12 15:44:17 -07:00
|
|
|
"sigs.k8s.io/kind/pkg/cluster/config"
|
|
|
|
|
"sigs.k8s.io/kind/pkg/cluster/kubeadm"
|
2018-10-25 16:29:29 -07:00
|
|
|
"sigs.k8s.io/kind/pkg/docker"
|
2018-09-12 15:44:17 -07:00
|
|
|
"sigs.k8s.io/kind/pkg/exec"
|
2018-10-25 16:29:29 -07:00
|
|
|
"sigs.k8s.io/kind/pkg/kustomize"
|
2018-10-19 18:19:57 -07:00
|
|
|
logutil "sigs.k8s.io/kind/pkg/log"
|
2018-07-23 10:06:37 -07:00
|
|
|
)
|
|
|
|
|
|
2018-08-28 11:45:21 -07:00
|
|
|
// ClusterLabelKey is applied to each "node" docker container for identification
|
2018-09-19 20:27:10 -07:00
|
|
|
const ClusterLabelKey = "io.k8s.sigs.kind.cluster"
|
2018-08-28 11:45:21 -07:00
|
|
|
|
|
|
|
|
// Context is used to create / manipulate kubernetes-in-docker clusters
|
2018-07-23 10:06:37 -07:00
|
|
|
type Context struct {
|
2018-10-19 18:19:57 -07:00
|
|
|
name string
|
|
|
|
|
status *logutil.Status
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
|
|
|
|
|
2018-08-28 11:45:21 -07:00
|
|
|
// similar to valid docker container names, but since we will prefix
|
|
|
|
|
// and suffix this name, we can relax it a little
|
|
|
|
|
// see NewContext() for usage
|
|
|
|
|
// https://godoc.org/github.com/docker/docker/daemon/names#pkg-constants
|
|
|
|
|
var validNameRE = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
|
|
|
|
|
|
|
|
|
|
// NewContext returns a new cluster management context
|
|
|
|
|
// if name is "" the default ("1") will be used
|
|
|
|
|
func NewContext(name string) (ctx *Context, err error) {
|
|
|
|
|
if name == "" {
|
|
|
|
|
name = "1"
|
|
|
|
|
}
|
|
|
|
|
// validate the name
|
|
|
|
|
if !validNameRE.MatchString(name) {
|
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
|
"'%s' is not a valid cluster name, cluster names must match `%s`",
|
|
|
|
|
name, validNameRE.String(),
|
|
|
|
|
)
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
2018-08-28 11:45:21 -07:00
|
|
|
return &Context{
|
2018-09-02 00:47:40 -07:00
|
|
|
name: name,
|
2018-08-28 11:45:21 -07:00
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ClusterLabel returns the docker object label that will be applied
|
|
|
|
|
// to cluster "node" containers
|
|
|
|
|
func (c *Context) ClusterLabel() string {
|
2018-09-02 00:47:40 -07:00
|
|
|
return fmt.Sprintf("%s=%s", ClusterLabelKey, c.name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Name returns the context's name
|
|
|
|
|
func (c *Context) Name() string {
|
|
|
|
|
return c.name
|
2018-08-28 11:45:21 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ClusterName returns the Kubernetes cluster name based on the context name
|
|
|
|
|
// currently this is .Name prefixed with "kind-"
|
|
|
|
|
func (c *Context) ClusterName() string {
|
2018-09-02 00:47:40 -07:00
|
|
|
return fmt.Sprintf("kind-%s", c.name)
|
2018-08-28 11:45:21 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// KubeConfigPath returns the path to where the Kubeconfig would be placed
|
|
|
|
|
// by kind based on the configuration.
|
|
|
|
|
func (c *Context) KubeConfigPath() string {
|
|
|
|
|
// TODO(bentheelder): Windows?
|
|
|
|
|
// configDir matches the standard directory expected by kubectl etc
|
|
|
|
|
configDir := filepath.Join(os.Getenv("HOME"), ".kube")
|
|
|
|
|
// note that the file name however does not, we do not want to overwite
|
|
|
|
|
// the standard config, though in the future we may (?) merge them
|
2018-09-02 00:47:40 -07:00
|
|
|
fileName := fmt.Sprintf("kind-config-%s", c.name)
|
2018-08-28 11:45:21 -07:00
|
|
|
return filepath.Join(configDir, fileName)
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create provisions and starts a kubernetes-in-docker cluster
|
2018-09-06 18:50:03 -07:00
|
|
|
func (c *Context) Create(cfg *config.Config) error {
|
2018-08-09 11:26:18 -07:00
|
|
|
// validate config first
|
2018-09-06 18:50:03 -07:00
|
|
|
if err := cfg.Validate(); err != nil {
|
2018-08-09 11:26:18 -07:00
|
|
|
return err
|
|
|
|
|
}
|
2018-08-23 18:27:52 -07:00
|
|
|
|
2018-10-19 18:19:57 -07:00
|
|
|
fmt.Printf("Creating cluster '%s' ...\n", c.ClusterName())
|
2018-11-06 00:58:24 -08:00
|
|
|
c.status = logutil.NewStatus(os.Stdout)
|
2018-10-19 18:19:57 -07:00
|
|
|
c.status.MaybeWrapLogrus(log.StandardLogger())
|
|
|
|
|
|
|
|
|
|
defer c.status.End(false)
|
2018-11-09 19:16:21 +02:00
|
|
|
image := cfg.Image
|
|
|
|
|
if strings.Contains(image, "@sha256:") {
|
|
|
|
|
image = strings.Split(image, "@sha256:")[0]
|
|
|
|
|
}
|
|
|
|
|
c.status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", image))
|
2018-10-19 18:19:57 -07:00
|
|
|
|
2018-09-26 20:09:34 -07:00
|
|
|
// attempt to explicitly pull the image if it doesn't exist locally
|
|
|
|
|
// we don't care if this errors, we'll still try to run which also pulls
|
|
|
|
|
_, _ = docker.PullIfNotPresent(cfg.Image, 4)
|
|
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// TODO(bentheelder): multiple nodes ...
|
|
|
|
|
kubeadmConfig, err := c.provisionControlPlane(
|
2018-09-02 00:47:40 -07:00
|
|
|
fmt.Sprintf("kind-%s-control-plane", c.name),
|
2018-09-06 18:50:03 -07:00
|
|
|
cfg,
|
2018-08-27 10:21:43 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// clean up the kubeadm config file
|
|
|
|
|
// NOTE: in the future we will use this for other nodes first
|
|
|
|
|
if kubeadmConfig != "" {
|
|
|
|
|
defer os.Remove(kubeadmConfig)
|
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-19 18:19:57 -07:00
|
|
|
c.status.End(true)
|
|
|
|
|
fmt.Printf(
|
2018-10-24 13:16:16 -07:00
|
|
|
"Cluster creation complete. You can now use the cluster with:\n\nexport KUBECONFIG=\"$(kind get kubeconfig-path)\"\nkubectl cluster-info\n",
|
2018-08-29 16:01:49 -07:00
|
|
|
)
|
2018-08-27 10:21:43 -07:00
|
|
|
return nil
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Delete tears down a kubernetes-in-docker cluster
|
|
|
|
|
func (c *Context) Delete() error {
|
|
|
|
|
nodes, err := c.ListNodes(true)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("error listing nodes: %v", err)
|
|
|
|
|
}
|
|
|
|
|
return c.deleteNodes(nodes...)
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// provisionControlPlane provisions the control plane node
|
|
|
|
|
// and the cluster kubeadm config
|
2018-09-02 00:47:40 -07:00
|
|
|
func (c *Context) provisionControlPlane(
|
|
|
|
|
nodeName string,
|
2018-09-06 18:50:03 -07:00
|
|
|
cfg *config.Config,
|
2018-09-02 00:47:40 -07:00
|
|
|
) (kubeadmConfigPath string, err error) {
|
2018-10-19 18:19:57 -07:00
|
|
|
c.status.Start(fmt.Sprintf("[%s] Creating node container 📦", nodeName))
|
2018-07-23 10:06:37 -07:00
|
|
|
// create the "node" container (docker run, but it is paused, see createNode)
|
2018-10-18 18:21:00 -07:00
|
|
|
node, port, err := createControlPlaneNode(nodeName, cfg.Image, c.ClusterLabel())
|
2018-08-27 10:21:43 -07:00
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-19 18:19:57 -07:00
|
|
|
c.status.Start(fmt.Sprintf("[%s] Fixing mounts 🗻", nodeName))
|
2018-09-26 15:34:22 -07:00
|
|
|
// we need to change a few mounts once we have the container
|
2018-09-26 15:47:41 -07:00
|
|
|
// we'd do this ahead of time if we could, but --privileged implies things
|
2018-09-26 15:34:22 -07:00
|
|
|
// that don't seem to be configurable, and we need that flag
|
|
|
|
|
if err := node.FixMounts(); err != nil {
|
2018-07-23 10:06:37 -07:00
|
|
|
// TODO(bentheelder): logging here
|
2018-08-23 18:27:52 -07:00
|
|
|
// TODO(bentheelder): add a flag to retain the broken nodes for debugging
|
2018-08-27 10:21:43 -07:00
|
|
|
c.deleteNodes(node.nameOrID)
|
|
|
|
|
return "", err
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
|
|
|
|
|
2018-09-06 18:50:03 -07:00
|
|
|
// run any pre-boot hooks
|
2018-11-08 15:58:39 -08:00
|
|
|
if cfg.ControlPlane != nil && cfg.ControlPlane.NodeLifecycle != nil {
|
|
|
|
|
for _, hook := range cfg.ControlPlane.NodeLifecycle.PreBoot {
|
2018-09-06 18:50:03 -07:00
|
|
|
if err := node.RunHook(&hook, "preBoot"); err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-19 18:19:57 -07:00
|
|
|
c.status.Start(fmt.Sprintf("[%s] Starting systemd 🖥", nodeName))
|
2018-08-27 10:21:43 -07:00
|
|
|
// signal the node entrypoint to continue booting into systemd
|
|
|
|
|
if err := node.SignalStart(); err != nil {
|
2018-07-23 10:06:37 -07:00
|
|
|
// TODO(bentheelder): logging here
|
2018-08-27 10:21:43 -07:00
|
|
|
// TODO(bentheelder): add a flag to retain the broken nodes for debugging
|
|
|
|
|
c.deleteNodes(node.nameOrID)
|
|
|
|
|
return "", err
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-19 18:19:57 -07:00
|
|
|
c.status.Start(fmt.Sprintf("[%s] Waiting for docker to be ready 🐋", nodeName))
|
2018-08-23 18:27:52 -07:00
|
|
|
// wait for docker to be ready
|
2018-08-27 10:21:43 -07:00
|
|
|
if !node.WaitForDocker(time.Now().Add(time.Second * 30)) {
|
|
|
|
|
// TODO(bentheelder): logging here
|
|
|
|
|
// TODO(bentheelder): add a flag to retain the broken nodes for debugging
|
|
|
|
|
c.deleteNodes(node.nameOrID)
|
|
|
|
|
return "", fmt.Errorf("timed out waiting for docker to be ready on node")
|
2018-08-23 18:27:52 -07:00
|
|
|
}
|
|
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// load the docker image artifacts into the docker daemon
|
|
|
|
|
node.LoadImages()
|
|
|
|
|
|
|
|
|
|
// get installed kubernetes version from the node image
|
|
|
|
|
kubeVersion, err := node.KubeVersion()
|
|
|
|
|
if err != nil {
|
|
|
|
|
// TODO(bentheelder): logging here
|
|
|
|
|
// TODO(bentheelder): add a flag to retain the broken nodes for debugging
|
|
|
|
|
c.deleteNodes(node.nameOrID)
|
|
|
|
|
return "", fmt.Errorf("failed to get kubernetes version from node: %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// create kubeadm config file
|
2018-08-28 17:21:17 -07:00
|
|
|
kubeadmConfig, err := c.createKubeadmConfig(
|
2018-10-25 16:29:29 -07:00
|
|
|
cfg,
|
2018-08-28 17:21:17 -07:00
|
|
|
kubeadm.ConfigData{
|
|
|
|
|
ClusterName: c.ClusterName(),
|
|
|
|
|
KubernetesVersion: kubeVersion,
|
2018-10-18 18:21:00 -07:00
|
|
|
APIBindPort: port,
|
2018-08-28 17:21:17 -07:00
|
|
|
},
|
|
|
|
|
)
|
2018-10-25 16:29:29 -07:00
|
|
|
if err != nil {
|
|
|
|
|
c.deleteNodes(node.nameOrID)
|
|
|
|
|
return "", fmt.Errorf("failed to create kubeadm config: %v", err)
|
|
|
|
|
}
|
2018-08-27 10:21:43 -07:00
|
|
|
|
|
|
|
|
// copy the config to the node
|
|
|
|
|
if err := node.CopyTo(kubeadmConfig, "/kind/kubeadm.conf"); err != nil {
|
|
|
|
|
// TODO(bentheelder): logging here
|
|
|
|
|
// TODO(bentheelder): add a flag to retain the broken nodes for debugging
|
|
|
|
|
c.deleteNodes(node.nameOrID)
|
|
|
|
|
return kubeadmConfig, errors.Wrap(err, "failed to copy kubeadm config to node")
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-06 18:50:03 -07:00
|
|
|
// run any pre-kubeadm hooks
|
2018-11-08 15:58:39 -08:00
|
|
|
if cfg.ControlPlane != nil && cfg.ControlPlane.NodeLifecycle != nil {
|
|
|
|
|
for _, hook := range cfg.ControlPlane.NodeLifecycle.PreKubeadm {
|
2018-09-06 18:50:03 -07:00
|
|
|
if err := node.RunHook(&hook, "preKubeadm"); err != nil {
|
|
|
|
|
return kubeadmConfig, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// run kubeadm
|
2018-10-19 18:19:57 -07:00
|
|
|
c.status.Start(
|
|
|
|
|
fmt.Sprintf(
|
|
|
|
|
"[%s] Starting Kubernetes (this may take a minute) ☸",
|
|
|
|
|
nodeName,
|
|
|
|
|
))
|
|
|
|
|
if err := node.RunQ(
|
2018-08-27 10:21:43 -07:00
|
|
|
// init because this is the control plane node
|
2018-08-23 18:27:52 -07:00
|
|
|
"kubeadm", "init",
|
2018-08-27 10:21:43 -07:00
|
|
|
// preflight errors are expected, in particular for swap being enabled
|
|
|
|
|
// TODO(bentheelder): limit the set of acceptable errors
|
2018-08-23 18:27:52 -07:00
|
|
|
"--ignore-preflight-errors=all",
|
2018-08-27 10:21:43 -07:00
|
|
|
// specify our generated config file
|
|
|
|
|
"--config=/kind/kubeadm.conf",
|
|
|
|
|
); err != nil {
|
|
|
|
|
// TODO(bentheelder): logging here
|
|
|
|
|
// TODO(bentheelder): add a flag to retain the broken nodes for debugging
|
|
|
|
|
return kubeadmConfig, errors.Wrap(err, "failed to init node with kubeadm")
|
2018-08-23 18:27:52 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-19 18:19:57 -07:00
|
|
|
// run any post-kubeadm hooks
|
2018-11-08 15:58:39 -08:00
|
|
|
if cfg.ControlPlane != nil && cfg.ControlPlane.NodeLifecycle != nil {
|
|
|
|
|
for _, hook := range cfg.ControlPlane.NodeLifecycle.PostKubeadm {
|
2018-09-06 18:50:03 -07:00
|
|
|
if err := node.RunHook(&hook, "postKubeadm"); err != nil {
|
|
|
|
|
return kubeadmConfig, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// set up the $KUBECONFIG
|
2018-08-28 11:45:21 -07:00
|
|
|
kubeConfigPath := c.KubeConfigPath()
|
2018-08-27 10:21:43 -07:00
|
|
|
if err = node.WriteKubeConfig(kubeConfigPath); err != nil {
|
|
|
|
|
// TODO(bentheelder): logging here
|
|
|
|
|
// TODO(bentheelder): add a flag to retain the broken nodes for debugging
|
|
|
|
|
return kubeadmConfig, errors.Wrap(err, "failed to get kubeconfig from node")
|
|
|
|
|
}
|
2018-08-23 18:27:52 -07:00
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// TODO(bentheelder): support other overlay networks
|
2018-10-19 18:19:57 -07:00
|
|
|
if err = node.RunQ(
|
2018-08-27 10:21:43 -07:00
|
|
|
"/bin/sh", "-c",
|
2018-09-21 22:27:59 -07:00
|
|
|
`kubectl apply --kubeconfig=/etc/kubernetes/admin.conf -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version --kubeconfig=/etc/kubernetes/admin.conf | base64 | tr -d '\n')"`,
|
2018-08-27 10:21:43 -07:00
|
|
|
); err != nil {
|
|
|
|
|
return kubeadmConfig, errors.Wrap(err, "failed to apply overlay network")
|
|
|
|
|
}
|
2018-07-23 10:06:37 -07:00
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// if we are only provisioning one node, remove the master taint
|
|
|
|
|
// https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#master-isolation
|
2018-11-08 16:15:25 -08:00
|
|
|
// TODO(bentheelder): put this back when we have multi-node
|
|
|
|
|
//if cfg.NumNodes == 1 {
|
|
|
|
|
if err = node.RunQ(
|
|
|
|
|
"kubectl", "--kubeconfig=/etc/kubernetes/admin.conf",
|
|
|
|
|
"taint", "nodes", "--all", "node-role.kubernetes.io/master-",
|
|
|
|
|
); err != nil {
|
|
|
|
|
return kubeadmConfig, errors.Wrap(err, "failed to remove master taint")
|
2018-08-23 18:27:52 -07:00
|
|
|
}
|
2018-11-08 16:15:25 -08:00
|
|
|
//}
|
2018-08-27 10:21:43 -07:00
|
|
|
|
2018-09-21 22:09:25 -07:00
|
|
|
// add the default storage class
|
2018-10-19 18:19:57 -07:00
|
|
|
if err := node.RunQWithInput(
|
2018-09-21 22:09:25 -07:00
|
|
|
strings.NewReader(defaultStorageClassManifest),
|
|
|
|
|
"kubectl", "--kubeconfig=/etc/kubernetes/admin.conf", "apply", "-f", "-",
|
|
|
|
|
); err != nil {
|
|
|
|
|
return kubeadmConfig, errors.Wrap(err, "failed to add default storage class")
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-06 18:50:03 -07:00
|
|
|
// run any post-overlay hooks
|
2018-11-08 15:58:39 -08:00
|
|
|
if cfg.ControlPlane != nil && cfg.ControlPlane.NodeLifecycle != nil {
|
|
|
|
|
for _, hook := range cfg.ControlPlane.NodeLifecycle.PostSetup {
|
2018-09-06 18:50:03 -07:00
|
|
|
if err := node.RunHook(&hook, "postSetup"); err != nil {
|
|
|
|
|
return kubeadmConfig, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
return kubeadmConfig, nil
|
2018-08-23 18:27:52 -07:00
|
|
|
}
|
|
|
|
|
|
2018-08-27 10:21:43 -07:00
|
|
|
// createKubeadmConfig creates the kubeadm config file for the cluster
|
|
|
|
|
// by running data through the template and writing it to a temp file
|
|
|
|
|
// the config file path is returned, this file should be removed later
|
2018-10-25 16:29:29 -07:00
|
|
|
func (c *Context) createKubeadmConfig(cfg *config.Config, data kubeadm.ConfigData) (path string, err error) {
|
2018-08-27 10:21:43 -07:00
|
|
|
// create kubeadm config file
|
|
|
|
|
f, err := ioutil.TempFile("", "")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", errors.Wrap(err, "failed to create kubeadm config")
|
|
|
|
|
}
|
|
|
|
|
path = f.Name()
|
|
|
|
|
// generate the config contents
|
2018-10-25 16:29:29 -07:00
|
|
|
config, err := kubeadm.Config(data)
|
2018-08-27 10:21:43 -07:00
|
|
|
if err != nil {
|
|
|
|
|
os.Remove(path)
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
2018-10-25 16:29:29 -07:00
|
|
|
// apply patches
|
|
|
|
|
patchedConfig, err := kustomize.Build(
|
|
|
|
|
[]string{config},
|
|
|
|
|
cfg.KubeadmConfigPatches,
|
|
|
|
|
cfg.KubeadmConfigPatchesJSON6902,
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
os.Remove(path)
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
// write to the file
|
|
|
|
|
log.Infof("Using KubeadmConfig:\n\n%s\n", patchedConfig)
|
|
|
|
|
_, err = f.WriteString(patchedConfig)
|
2018-08-27 10:21:43 -07:00
|
|
|
if err != nil {
|
|
|
|
|
os.Remove(path)
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
return path, nil
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-25 16:29:29 -07:00
|
|
|
// config has slices of string, but we want bytes for kustomize
|
|
|
|
|
func stringSliceToByteSliceSlice(ss []string) [][]byte {
|
|
|
|
|
bss := [][]byte{}
|
|
|
|
|
for _, s := range ss {
|
|
|
|
|
bss = append(bss, []byte(s))
|
|
|
|
|
}
|
|
|
|
|
return bss
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-23 10:06:37 -07:00
|
|
|
func (c *Context) deleteNodes(names ...string) error {
|
|
|
|
|
cmd := exec.Command("docker", "rm")
|
|
|
|
|
cmd.Args = append(cmd.Args,
|
|
|
|
|
"-f", // force the container to be delete now
|
2018-10-08 15:11:45 -07:00
|
|
|
"-v", // delete volumes
|
2018-07-23 10:06:37 -07:00
|
|
|
)
|
|
|
|
|
cmd.Args = append(cmd.Args, names...)
|
|
|
|
|
return cmd.Run()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListNodes returns the list of container IDs for the "nodes" in the cluster
|
|
|
|
|
func (c *Context) ListNodes(alsoStopped bool) (containerIDs []string, err error) {
|
|
|
|
|
cmd := exec.Command("docker", "ps")
|
|
|
|
|
cmd.Args = append(cmd.Args,
|
|
|
|
|
// quiet output for parsing
|
|
|
|
|
"-q",
|
|
|
|
|
// filter for nodes with the cluster label
|
2018-08-28 11:45:21 -07:00
|
|
|
"--filter", "label="+c.ClusterLabel(),
|
2018-07-23 10:06:37 -07:00
|
|
|
)
|
2018-08-27 10:21:43 -07:00
|
|
|
// optionally list nodes that are stopped
|
2018-07-23 10:06:37 -07:00
|
|
|
if alsoStopped {
|
|
|
|
|
cmd.Args = append(cmd.Args, "-a")
|
|
|
|
|
}
|
2018-08-06 17:05:46 -07:00
|
|
|
return cmd.CombinedOutputLines()
|
2018-07-23 10:06:37 -07:00
|
|
|
}
|