mirror of
https://github.com/kubernetes-sigs/kind.git
synced 2025-12-01 07:26:05 +07:00
enable name as a config field
This commit is contained in:
@@ -20,6 +20,10 @@ package v1alpha4
|
||||
type Cluster struct {
|
||||
TypeMeta `yaml:",inline"`
|
||||
|
||||
// The cluster name.
|
||||
// Optional, this will be overridden by --name / KIND_CLUSTER_NAME
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
// Nodes contains the list of nodes defined in the `kind` Cluster
|
||||
// If unset this will default to a single control-plane node
|
||||
// Note that if more than one control plane is specified, an external
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package context contains the internal cluster context shared by various
|
||||
// packages that implement the user face pkg/cluster.Context
|
||||
package context
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/kind/pkg/cluster/constants"
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodes"
|
||||
"sigs.k8s.io/kind/pkg/log"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/docker"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/provider"
|
||||
)
|
||||
|
||||
// Context is the private shared context underlying pkg/cluster.Context
|
||||
//
|
||||
// NOTE: this is the internal one, it should contain reasonably trivial
|
||||
// methods that are safe to share between various user facing methods
|
||||
// pkg/cluster.Context is a superset of this, packages like create and delete
|
||||
// consume this
|
||||
type Context struct {
|
||||
name string
|
||||
// cluster backend (docker, ...)
|
||||
provider provider.Provider
|
||||
}
|
||||
|
||||
// NewContext returns a new internal cluster management context
|
||||
// if name is "" the default name will be used
|
||||
func NewContext(logger log.Logger, name string) *Context {
|
||||
if name == "" {
|
||||
name = constants.DefaultClusterName
|
||||
}
|
||||
return &Context{
|
||||
name: name,
|
||||
provider: docker.NewProvider(logger),
|
||||
}
|
||||
}
|
||||
|
||||
// NewProviderContext returns a new context with given provider and name
|
||||
func NewProviderContext(p provider.Provider, name string) *Context {
|
||||
return &Context{
|
||||
name: name,
|
||||
provider: p,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the cluster's name
|
||||
func (c *Context) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// Provider returns the provider of the context
|
||||
func (c *Context) Provider() provider.Provider {
|
||||
return c.provider
|
||||
}
|
||||
|
||||
// GetAPIServerEndpoint returns the cluster's API Server endpoint
|
||||
func (c *Context) GetAPIServerEndpoint() (string, error) {
|
||||
return c.provider.GetAPIServerEndpoint(c.Name())
|
||||
}
|
||||
|
||||
// GetAPIServerInternalEndpoint returns the cluster's internal API Server endpoint
|
||||
func (c *Context) GetAPIServerInternalEndpoint() (string, error) {
|
||||
return c.provider.GetAPIServerInternalEndpoint(c.Name())
|
||||
}
|
||||
|
||||
// ListNodes returns the list of container IDs for the "nodes" in the cluster
|
||||
func (c *Context) ListNodes() ([]nodes.Node, error) {
|
||||
return c.provider.ListNodes(c.name)
|
||||
}
|
||||
|
||||
// ListInternalNodes returns the list of container IDs for the "nodes" in the cluster
|
||||
// that are not external
|
||||
func (c *Context) ListInternalNodes() ([]nodes.Node, error) {
|
||||
clusterNodes, err := c.ListNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selectedNodes := []nodes.Node{}
|
||||
for _, node := range clusterNodes {
|
||||
nodeRole, err := node.Role()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nodeRole == constants.WorkerNodeRoleValue || nodeRole == constants.ControlPlaneNodeRoleValue {
|
||||
selectedNodes = append(selectedNodes, node)
|
||||
}
|
||||
}
|
||||
return selectedNodes, nil
|
||||
}
|
||||
|
||||
// CollectLogs will populate dir with cluster logs and other debug files
|
||||
func (c *Context) CollectLogs(dir string) error {
|
||||
// TODO: should use ListNodes and Collect should handle nodes differently
|
||||
// based on role ...
|
||||
n, err := c.ListInternalNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.provider.CollectLogs(dir, n)
|
||||
}
|
||||
@@ -19,11 +19,12 @@ package actions
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/context"
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodes"
|
||||
"sigs.k8s.io/kind/pkg/internal/apis/config"
|
||||
"sigs.k8s.io/kind/pkg/internal/cli"
|
||||
"sigs.k8s.io/kind/pkg/log"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/provider"
|
||||
)
|
||||
|
||||
// Action defines a step of bringing up a kind cluster after initial node
|
||||
@@ -34,26 +35,26 @@ type Action interface {
|
||||
|
||||
// ActionContext is data supplied to all actions
|
||||
type ActionContext struct {
|
||||
Logger log.Logger
|
||||
Status *cli.Status
|
||||
Config *config.Cluster
|
||||
ClusterContext *context.Context
|
||||
cache *cachedData
|
||||
Logger log.Logger
|
||||
Status *cli.Status
|
||||
Config *config.Cluster
|
||||
Provider provider.Provider
|
||||
cache *cachedData
|
||||
}
|
||||
|
||||
// NewActionContext returns a new ActionContext
|
||||
func NewActionContext(
|
||||
logger log.Logger,
|
||||
cfg *config.Cluster,
|
||||
ctx *context.Context,
|
||||
status *cli.Status,
|
||||
provider provider.Provider,
|
||||
cfg *config.Cluster,
|
||||
) *ActionContext {
|
||||
return &ActionContext{
|
||||
Logger: logger,
|
||||
Status: status,
|
||||
Config: cfg,
|
||||
ClusterContext: ctx,
|
||||
cache: &cachedData{},
|
||||
Logger: logger,
|
||||
Status: status,
|
||||
Provider: provider,
|
||||
Config: cfg,
|
||||
cache: &cachedData{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +81,7 @@ func (ac *ActionContext) Nodes() ([]nodes.Node, error) {
|
||||
if cachedNodes != nil {
|
||||
return cachedNodes, nil
|
||||
}
|
||||
n, err := ac.ClusterContext.ListNodes()
|
||||
n, err := ac.Provider.ListNodes(ac.Config.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
|
||||
return err
|
||||
}
|
||||
|
||||
controlPlaneEndpoint, err := ctx.ClusterContext.GetAPIServerInternalEndpoint()
|
||||
controlPlaneEndpoint, err := ctx.Provider.GetAPIServerInternalEndpoint(ctx.Config.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
|
||||
fns := []func() error{}
|
||||
|
||||
configData := kubeadm.ConfigData{
|
||||
ClusterName: ctx.ClusterContext.Name(),
|
||||
ClusterName: ctx.Config.Name,
|
||||
ControlPlaneEndpoint: controlPlaneEndpoint,
|
||||
APIBindPort: common.APIServerInternalPort,
|
||||
APIServerAddress: ctx.Config.Networking.APIServerAddress,
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
|
||||
"github.com/alessio/shellescape"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/context"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/delete"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/provider"
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/internal/apis/config"
|
||||
"sigs.k8s.io/kind/pkg/internal/apis/config/encoding"
|
||||
@@ -57,7 +57,8 @@ var validNameRE = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
|
||||
|
||||
// ClusterOptions holds cluster creation options
|
||||
type ClusterOptions struct {
|
||||
Config *config.Cluster
|
||||
Config *config.Cluster
|
||||
NameOverride string // overrides config.Name
|
||||
// NodeImage overrides the nodes' images in Config if non-zero
|
||||
NodeImage string
|
||||
Retain bool
|
||||
@@ -71,22 +72,28 @@ type ClusterOptions struct {
|
||||
}
|
||||
|
||||
// Cluster creates a cluster
|
||||
func Cluster(logger log.Logger, ctx *context.Context, opts *ClusterOptions) error {
|
||||
func Cluster(logger log.Logger, p provider.Provider, opts *ClusterOptions) error {
|
||||
// default / process options (namely config)
|
||||
if err := fixupOptions(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the cluster name already exists
|
||||
if err := alreadyExists(p, opts.Config.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: move to config validation
|
||||
// validate the name
|
||||
if !validNameRE.MatchString(ctx.Name()) {
|
||||
if !validNameRE.MatchString(opts.Config.Name) {
|
||||
return errors.Errorf(
|
||||
"'%s' is not a valid cluster name, cluster names must match `%s`",
|
||||
ctx.Name(), validNameRE.String(),
|
||||
opts.Config.Name, validNameRE.String(),
|
||||
)
|
||||
}
|
||||
// warn if cluster name might typically be too long
|
||||
if len(ctx.Name()) > clusterNameMax {
|
||||
logger.Warnf("cluster name %q is probably too long, this might not work properly on some systems", ctx.Name())
|
||||
if len(opts.Config.Name) > clusterNameMax {
|
||||
logger.Warnf("cluster name %q is probably too long, this might not work properly on some systems", opts.Config.Name)
|
||||
}
|
||||
|
||||
// then validate
|
||||
@@ -97,11 +104,14 @@ func Cluster(logger log.Logger, ctx *context.Context, opts *ClusterOptions) erro
|
||||
// setup a status object to show progress to the user
|
||||
status := cli.StatusForLogger(logger)
|
||||
|
||||
// we're going to start creating now, tell the user
|
||||
logger.V(0).Infof("Creating cluster %q ...\n", opts.Config.Name)
|
||||
|
||||
// Create node containers implementing defined config Nodes
|
||||
if err := ctx.Provider().Provision(status, ctx.Name(), opts.Config); err != nil {
|
||||
if err := p.Provision(status, opts.Config); err != nil {
|
||||
// In case of errors nodes are deleted (except if retain is explicitly set)
|
||||
if !opts.Retain {
|
||||
_ = delete.Cluster(logger, ctx, opts.KubeconfigPath)
|
||||
_ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -130,11 +140,11 @@ func Cluster(logger log.Logger, ctx *context.Context, opts *ClusterOptions) erro
|
||||
}
|
||||
|
||||
// run all actions
|
||||
actionsContext := actions.NewActionContext(logger, opts.Config, ctx, status)
|
||||
actionsContext := actions.NewActionContext(logger, status, p, opts.Config)
|
||||
for _, action := range actionsToRun {
|
||||
if err := action.Execute(actionsContext); err != nil {
|
||||
if !opts.Retain {
|
||||
_ = delete.Cluster(logger, ctx, opts.KubeconfigPath)
|
||||
_ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -145,13 +155,13 @@ func Cluster(logger log.Logger, ctx *context.Context, opts *ClusterOptions) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := kubeconfig.Export(ctx, opts.KubeconfigPath); err != nil {
|
||||
if err := kubeconfig.Export(p, opts.Config.Name, opts.KubeconfigPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// optionally display usage
|
||||
if opts.DisplayUsage {
|
||||
logUsage(logger, ctx, opts.KubeconfigPath)
|
||||
logUsage(logger, opts.Config.Name, opts.KubeconfigPath)
|
||||
}
|
||||
// optionally give the user a friendly salutation
|
||||
if opts.DisplaySalutation {
|
||||
@@ -161,9 +171,22 @@ func Cluster(logger log.Logger, ctx *context.Context, opts *ClusterOptions) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func logUsage(logger log.Logger, ctx *context.Context, explicitKubeconfigPath string) {
|
||||
// alreadyExists returns an error if the cluster name already exists
|
||||
// or if we had an error checking
|
||||
func alreadyExists(p provider.Provider, name string) error {
|
||||
n, err := p.ListNodes(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(n) != 0 {
|
||||
return errors.Errorf("node(s) already exist for a cluster with the name %q", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func logUsage(logger log.Logger, name, explicitKubeconfigPath string) {
|
||||
// construct a sample command for interacting with the cluster
|
||||
kctx := kubeconfig.ContextForCluster(ctx.Name())
|
||||
kctx := kubeconfig.ContextForCluster(name)
|
||||
sampleCommand := fmt.Sprintf("kubectl cluster-info --context %s", kctx)
|
||||
if explicitKubeconfigPath != "" {
|
||||
// explicit path, include this
|
||||
@@ -196,6 +219,10 @@ func fixupOptions(opts *ClusterOptions) error {
|
||||
opts.Config = cfg
|
||||
}
|
||||
|
||||
if opts.NameOverride != "" {
|
||||
opts.Config.Name = opts.NameOverride
|
||||
}
|
||||
|
||||
// if NodeImage was set, override the image on all nodes
|
||||
if opts.NodeImage != "" {
|
||||
// Apply image override to all the Nodes defined in Config
|
||||
|
||||
@@ -20,26 +20,25 @@ import (
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/log"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/context"
|
||||
// TODO: we shouldn't need to import this here
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/provider"
|
||||
)
|
||||
|
||||
// Cluster deletes the cluster identified by ctx
|
||||
// explicitKubeconfigPath is --kubeconfig, following the rules from
|
||||
// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands
|
||||
func Cluster(logger log.Logger, c *context.Context, explicitKubeconfigPath string) error {
|
||||
n, err := c.ListNodes()
|
||||
func Cluster(logger log.Logger, p provider.Provider, name, explicitKubeconfigPath string) error {
|
||||
n, err := p.ListNodes(name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing nodes")
|
||||
}
|
||||
|
||||
kerr := kubeconfig.Remove(c.Name(), explicitKubeconfigPath)
|
||||
kerr := kubeconfig.Remove(name, explicitKubeconfigPath)
|
||||
if kerr != nil {
|
||||
logger.Errorf("failed to update kubeconfig: %v", kerr)
|
||||
}
|
||||
|
||||
err = c.Provider().DeleteNodes(n)
|
||||
err = p.DeleteNodes(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -21,19 +21,19 @@ package kubeconfig
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/context"
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
|
||||
// this package has slightly more generic kubeconfig helpers
|
||||
// and minimal dependencies on the rest of kind
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/provider"
|
||||
)
|
||||
|
||||
// Export exports the kubeconfig given the cluster context and a path to write it to
|
||||
// This will always be an external kubeconfig
|
||||
func Export(ctx *context.Context, explicitPath string) error {
|
||||
cfg, err := get(ctx, true)
|
||||
func Export(p provider.Provider, name, explicitPath string) error {
|
||||
cfg, err := get(p, name, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -50,8 +50,8 @@ func Remove(clusterName, explicitPath string) error {
|
||||
|
||||
// Get returns the kubeconfig for the cluster
|
||||
// external controls if the internal IP address is used or the host endpoint
|
||||
func Get(ctx *context.Context, external bool) (string, error) {
|
||||
cfg, err := get(ctx, external)
|
||||
func Get(p provider.Provider, name string, external bool) (string, error) {
|
||||
cfg, err := get(p, name, external)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -68,9 +68,9 @@ func ContextForCluster(kindClusterName string) string {
|
||||
return kubeconfig.KINDClusterKey(kindClusterName)
|
||||
}
|
||||
|
||||
func get(ctx *context.Context, external bool) (*kubeconfig.Config, error) {
|
||||
func get(p provider.Provider, name string, external bool) (*kubeconfig.Config, error) {
|
||||
// find a control plane node to get the kubeadm config from
|
||||
n, err := ctx.ListNodes()
|
||||
n, err := p.ListNodes(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func get(ctx *context.Context, external bool) (*kubeconfig.Config, error) {
|
||||
// if we're doing external we need to override the server endpoint
|
||||
server := ""
|
||||
if external {
|
||||
endpoint, err := ctx.GetAPIServerEndpoint()
|
||||
endpoint, err := p.GetAPIServerEndpoint(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -100,5 +100,5 @@ func get(ctx *context.Context, external bool) (*kubeconfig.Config, error) {
|
||||
}
|
||||
|
||||
// actually encode
|
||||
return kubeconfig.KINDFromRawKubeadm(buff.String(), ctx.Name(), server)
|
||||
return kubeconfig.KINDFromRawKubeadm(buff.String(), name, server)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ type Provider struct {
|
||||
}
|
||||
|
||||
// Provision is part of the providers.Provider interface
|
||||
func (p *Provider) Provision(status *cli.Status, cluster string, cfg *config.Cluster) (err error) {
|
||||
func (p *Provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) {
|
||||
// TODO: validate cfg
|
||||
// ensure node images are pulled before actually provisioning
|
||||
if err := ensureNodeImages(p.logger, status, cfg); err != nil {
|
||||
@@ -76,7 +76,7 @@ func (p *Provider) Provision(status *cli.Status, cluster string, cfg *config.Clu
|
||||
defer func() { status.End(err == nil) }()
|
||||
|
||||
// plan creating the containers
|
||||
createContainerFuncs, err := planCreation(cluster, cfg, networkName)
|
||||
createContainerFuncs, err := planCreation(cfg, networkName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -33,10 +33,10 @@ import (
|
||||
)
|
||||
|
||||
// planCreation creates a slice of funcs that will create the containers
|
||||
func planCreation(cluster string, cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) {
|
||||
func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) {
|
||||
// we need to know all the names for NO_PROXY
|
||||
// compute the names first before any actual node details
|
||||
nodeNamer := common.MakeNodeNamer(cluster)
|
||||
nodeNamer := common.MakeNodeNamer(cfg.Name)
|
||||
names := make([]string, len(cfg.Nodes))
|
||||
for i, node := range cfg.Nodes {
|
||||
name := nodeNamer(string(node.Role)) // name the node
|
||||
@@ -48,7 +48,7 @@ func planCreation(cluster string, cfg *config.Cluster, networkName string) (crea
|
||||
}
|
||||
|
||||
// these apply to all container creation
|
||||
genericArgs, err := commonArgs(cluster, cfg, networkName, names)
|
||||
genericArgs, err := commonArgs(cfg.Name, cfg, networkName, names)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ type Provider struct {
|
||||
}
|
||||
|
||||
// Provision is part of the providers.Provider interface
|
||||
func (p *Provider) Provision(status *cli.Status, cluster string, cfg *config.Cluster) (err error) {
|
||||
func (p *Provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) {
|
||||
if err := ensureMinVersion(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func (p *Provider) Provision(status *cli.Status, cluster string, cfg *config.Clu
|
||||
defer func() { status.End(err == nil) }()
|
||||
|
||||
// plan creating the containers
|
||||
createContainerFuncs, err := planCreation(cluster, cfg)
|
||||
createContainerFuncs, err := planCreation(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -32,10 +32,10 @@ import (
|
||||
)
|
||||
|
||||
// planCreation creates a slice of funcs that will create the containers
|
||||
func planCreation(cluster string, cfg *config.Cluster) (createContainerFuncs []func() error, err error) {
|
||||
func planCreation(cfg *config.Cluster) (createContainerFuncs []func() error, err error) {
|
||||
// these apply to all container creation
|
||||
nodeNamer := common.MakeNodeNamer(cluster)
|
||||
genericArgs, err := commonArgs(cluster, cfg)
|
||||
nodeNamer := common.MakeNodeNamer(cfg.Name)
|
||||
genericArgs, err := commonArgs(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -135,13 +135,13 @@ func clusterHasImplicitLoadBalancer(cfg *config.Cluster) bool {
|
||||
}
|
||||
|
||||
// commonArgs computes static arguments that apply to all containers
|
||||
func commonArgs(cluster string, cfg *config.Cluster) ([]string, error) {
|
||||
func commonArgs(cfg *config.Cluster) ([]string, error) {
|
||||
// standard arguments all nodes containers need, computed once
|
||||
args := []string{
|
||||
"--detach", // run the container detached
|
||||
"--tty", // allocate a tty for entrypoint logs
|
||||
// label the node with the cluster ID
|
||||
"--label", fmt.Sprintf("%s=%s", clusterLabelKey, cluster),
|
||||
"--label", fmt.Sprintf("%s=%s", clusterLabelKey, cfg.Name),
|
||||
}
|
||||
|
||||
// enable IPv6 if necessary
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
type Provider interface {
|
||||
// Provision should create and start the nodes, just short of
|
||||
// actually starting up Kubernetes, based on the given cluster config
|
||||
Provision(status *cli.Status, cluster string, cfg *config.Cluster) error
|
||||
Provision(status *cli.Status, cfg *config.Cluster) error
|
||||
// ListClusters discovers the clusters that currently have resources
|
||||
// under this providers
|
||||
ListClusters() ([]string, error)
|
||||
|
||||
@@ -42,6 +42,22 @@ func SelectNodesByRole(allNodes []nodes.Node, role string) ([]nodes.Node, error)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// InternalNodes returns the list of container IDs for the "nodes" in the cluster
|
||||
// that are ~Kubernetes nodes, as opposed to e.g. the external loadbalancer for HA
|
||||
func InternalNodes(allNodes []nodes.Node) ([]nodes.Node, error) {
|
||||
selectedNodes := []nodes.Node{}
|
||||
for _, node := range allNodes {
|
||||
nodeRole, err := node.Role()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nodeRole == constants.WorkerNodeRoleValue || nodeRole == constants.ControlPlaneNodeRoleValue {
|
||||
selectedNodes = append(selectedNodes, node)
|
||||
}
|
||||
}
|
||||
return selectedNodes, nil
|
||||
}
|
||||
|
||||
// ExternalLoadBalancerNode returns a node handle for the external control plane
|
||||
// loadbalancer node or nil if there isn't one
|
||||
func ExternalLoadBalancerNode(allNodes []nodes.Node) (nodes.Node, error) {
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/constants"
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodes"
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
|
||||
"sigs.k8s.io/kind/pkg/log"
|
||||
|
||||
internalcontext "sigs.k8s.io/kind/pkg/cluster/internal/context"
|
||||
internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create"
|
||||
internaldelete "sigs.k8s.io/kind/pkg/cluster/internal/delete"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig"
|
||||
@@ -36,6 +36,14 @@ import (
|
||||
// DefaultName is the default cluster name
|
||||
const DefaultName = constants.DefaultClusterName
|
||||
|
||||
// defaultName is a helper that given a name defaults it if unset
|
||||
func defaultName(name string) string {
|
||||
if name == "" {
|
||||
name = DefaultName
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Provider is used to perform cluster operations
|
||||
type Provider struct {
|
||||
provider internalprovider.Provider
|
||||
@@ -121,26 +129,24 @@ func ProviderWithPodman() ProviderOption {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: remove this, rename internal context to something else
|
||||
func (p *Provider) ic(name string) *internalcontext.Context {
|
||||
return internalcontext.NewProviderContext(p.provider, name)
|
||||
}
|
||||
|
||||
// Create provisions and starts a kubernetes-in-docker cluster
|
||||
// TODO: move name to an option to override config
|
||||
func (p *Provider) Create(name string, options ...CreateOption) error {
|
||||
// apply options
|
||||
opts := &internalcreate.ClusterOptions{}
|
||||
opts := &internalcreate.ClusterOptions{
|
||||
NameOverride: name,
|
||||
}
|
||||
for _, o := range options {
|
||||
if err := o.apply(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return internalcreate.Cluster(p.logger, p.ic(name), opts)
|
||||
return internalcreate.Cluster(p.logger, p.provider, opts)
|
||||
}
|
||||
|
||||
// Delete tears down a kubernetes-in-docker cluster
|
||||
func (p *Provider) Delete(name, explicitKubeconfigPath string) error {
|
||||
return internaldelete.Cluster(p.logger, p.ic(name), explicitKubeconfigPath)
|
||||
return internaldelete.Cluster(p.logger, p.provider, defaultName(name), explicitKubeconfigPath)
|
||||
}
|
||||
|
||||
// List returns a list of clusters for which nodes exist
|
||||
@@ -152,7 +158,7 @@ func (p *Provider) List() ([]string, error) {
|
||||
// If internal is true, this will contain the internal IP etc.
|
||||
// If internal is false, this will contain the host IP etc.
|
||||
func (p *Provider) KubeConfig(name string, internal bool) (string, error) {
|
||||
return kubeconfig.Get(p.ic(name), !internal)
|
||||
return kubeconfig.Get(p.provider, defaultName(name), !internal)
|
||||
}
|
||||
|
||||
// ExportKubeConfig exports the KUBECONFIG for the cluster, merging
|
||||
@@ -160,21 +166,31 @@ func (p *Provider) KubeConfig(name string, internal bool) (string, error) {
|
||||
// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config
|
||||
// where explicitPath is the --kubeconfig value.
|
||||
func (p *Provider) ExportKubeConfig(name string, explicitPath string) error {
|
||||
return kubeconfig.Export(p.ic(name), explicitPath)
|
||||
return kubeconfig.Export(p.provider, defaultName(name), explicitPath)
|
||||
}
|
||||
|
||||
// ListNodes returns the list of container IDs for the "nodes" in the cluster
|
||||
func (p *Provider) ListNodes(name string) ([]nodes.Node, error) {
|
||||
return p.ic(name).ListNodes()
|
||||
return p.provider.ListNodes(defaultName(name))
|
||||
}
|
||||
|
||||
// ListInternalNodes returns the list of container IDs for the "nodes" in the cluster
|
||||
// that are not external
|
||||
func (p *Provider) ListInternalNodes(name string) ([]nodes.Node, error) {
|
||||
return p.ic(name).ListInternalNodes()
|
||||
n, err := p.provider.ListNodes(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nodeutils.InternalNodes(n)
|
||||
}
|
||||
|
||||
// CollectLogs will populate dir with cluster logs and other debug files
|
||||
func (p *Provider) CollectLogs(name, dir string) error {
|
||||
return p.ic(name).CollectLogs(dir)
|
||||
// TODO: should use ListNodes and Collect should handle nodes differently
|
||||
// based on role ...
|
||||
n, err := p.ListInternalNodes(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.provider.CollectLogs(dir, n)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ limitations under the License.
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
@@ -56,7 +55,7 @@ func NewCommand(logger log.Logger, streams cmd.IOStreams) *cobra.Command {
|
||||
return runE(logger, streams, flags)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&flags.Name, "name", cluster.DefaultName, "cluster context name")
|
||||
cmd.Flags().StringVar(&flags.Name, "name", "", "cluster name, overrides KIND_CLUSTER_NAME, config (default kind)")
|
||||
cmd.Flags().StringVar(&flags.Config, "config", "", "path to a kind config file")
|
||||
cmd.Flags().StringVar(&flags.ImageName, "image", "", "node docker image to use for booting the cluster")
|
||||
cmd.Flags().BoolVar(&flags.Retain, "retain", false, "retain nodes for debugging when cluster creation fails")
|
||||
@@ -71,15 +70,6 @@ func runE(logger log.Logger, streams cmd.IOStreams, flags *flagpole) error {
|
||||
runtime.GetDefault(logger),
|
||||
)
|
||||
|
||||
// Check if the cluster name already exists
|
||||
n, err := provider.ListNodes(flags.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(n) != 0 {
|
||||
return fmt.Errorf("node(s) already exist for a cluster with the name %q", flags.Name)
|
||||
}
|
||||
|
||||
// handle config flag, we might need to read from stdin
|
||||
withConfig, err := configOption(flags.Config, streams.In)
|
||||
if err != nil {
|
||||
@@ -87,7 +77,6 @@ func runE(logger log.Logger, streams cmd.IOStreams, flags *flagpole) error {
|
||||
}
|
||||
|
||||
// create the cluster
|
||||
logger.V(0).Infof("Creating cluster %q ...\n", flags.Name)
|
||||
if err = provider.Create(
|
||||
flags.Name,
|
||||
withConfig,
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
func Convertv1alpha4(in *v1alpha4.Cluster) *Cluster {
|
||||
in = in.DeepCopy() // deep copy first to avoid touching the original
|
||||
out := &Cluster{
|
||||
Name: in.Name,
|
||||
Nodes: make([]Node, len(in.Nodes)),
|
||||
FeatureGates: in.FeatureGates,
|
||||
KubeadmConfigPatches: in.KubeadmConfigPatches,
|
||||
|
||||
@@ -22,10 +22,16 @@ package config
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/kind/pkg/apis/config/defaults"
|
||||
"sigs.k8s.io/kind/pkg/cluster/constants"
|
||||
)
|
||||
|
||||
// SetDefaultsCluster sets uninitialized fields to their default value.
|
||||
func SetDefaultsCluster(obj *Cluster) {
|
||||
// default cluster name
|
||||
if obj.Name == "" {
|
||||
obj.Name = constants.DefaultClusterName
|
||||
}
|
||||
|
||||
// default to a one node cluster
|
||||
if len(obj.Nodes) == 0 {
|
||||
obj.Nodes = []Node{
|
||||
@@ -35,6 +41,7 @@ func SetDefaultsCluster(obj *Cluster) {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// default nodes
|
||||
for i := range obj.Nodes {
|
||||
a := &obj.Nodes[i]
|
||||
@@ -43,6 +50,7 @@ func SetDefaultsCluster(obj *Cluster) {
|
||||
if obj.Networking.IPFamily == "" {
|
||||
obj.Networking.IPFamily = "ipv4"
|
||||
}
|
||||
|
||||
// default to listening on 127.0.0.1:randomPort on ipv4
|
||||
// and [::1]:randomPort on ipv6
|
||||
if obj.Networking.APIServerAddress == "" {
|
||||
@@ -51,6 +59,7 @@ func SetDefaultsCluster(obj *Cluster) {
|
||||
obj.Networking.APIServerAddress = "::1"
|
||||
}
|
||||
}
|
||||
|
||||
// default the pod CIDR
|
||||
if obj.Networking.PodSubnet == "" {
|
||||
obj.Networking.PodSubnet = "10.244.0.0/16"
|
||||
@@ -58,6 +67,7 @@ func SetDefaultsCluster(obj *Cluster) {
|
||||
obj.Networking.PodSubnet = "fd00:10:244::/64"
|
||||
}
|
||||
}
|
||||
|
||||
// default the service CIDR using the kubeadm default
|
||||
// https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32
|
||||
// Note: kubeadm is doing it already but this simplifies kind's logic
|
||||
|
||||
@@ -18,6 +18,10 @@ package config
|
||||
|
||||
// Cluster contains kind cluster configuration
|
||||
type Cluster struct {
|
||||
// The cluster name.
|
||||
// Optional, this will be overridden by --name / KIND_CLUSTER_NAME
|
||||
Name string
|
||||
|
||||
// Nodes contains the list of nodes defined in the `kind` Cluster
|
||||
// If unset this will default to a single control-plane node
|
||||
// Note that if more than one control plane is specified, an external
|
||||
|
||||
Reference in New Issue
Block a user