Rke up -Fauled at /etcd-fix-prem

Fails every time here …

DEBU[1727] FIXME: Got an status-code for which error does not match any expected type!!!: -1 module=api status_code=-1
WARN[1727] Failed to create Docker container [etcd-fix-perm] on host [10.10.76.208]: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
WARN[1727] Failed to create Docker container [etcd-fix-perm] on host [10.10.76.208]: Error response from daemon: Conflict. The container name “/etcd-fix-perm” is already in use by container “ff0e685abdb04e31027614bc55649ebede1257230f3248e83084067d92f01831”. You have to remove (or rename) that container to be able to reuse that name.
WARN[1727] Failed to create Docker container [etcd-fix-perm] on host [10.10.76.208]: Error response from daemon: Conflict. The container name “/etcd-fix-perm” is already in use by container “ff0e685abdb04e31027614bc55649ebede1257230f3248e83084067d92f01831”. You have to remove (or rename) that container to be able to reuse that name.
FATA[1727] [etcd] Failed to bring up Etcd Plane: Failed to create [etcd-fix-perm] container on host [10.10.76.208]: Failed to create Docker container [etcd-fix-perm] on host [10.10.76.208]: Error response from daemon: Conflict. The container name “/etcd-fix-perm” is already in use by container “ff0e685abdb04e31027614bc55649ebede1257230f3248e83084067d92f01831”. You have to remove (or rename) that container to be able to reuse that name.

my Cluster.yml here -

cat cluster.yml

If you intended to deploy Kubernetes in an air-gapped environment,

please consult the documentation on how to configure custom RKE images.

nodes:

  • address: 10.10.76.208
    port: “5522”
    internal_address: “”
    role:
    • controlplane
    • etcd
      hostname_override: Master
      user: parandhaman
      docker_socket: /var/run/docker.sock
      ssh_key: “”
      ssh_key_path: /home/parandhaman/.ssh/id_rsa
      ssh_cert: “”
      ssh_cert_path: “”
      labels: {}
      taints:
  • address: 10.10.76.209
    port: “5522”
    internal_address: “”
    role:
    • worker
      hostname_override: node1
      user: parandhaman
      docker_socket: /var/run/docker.sock
      ssh_key: “”
      ssh_key_path: /home/parandhaman/.ssh/id_rsa
      ssh_cert: “”
      ssh_cert_path: “”
      labels: {}
      taints:
      services:
      etcd:
      image: “”
      extra_args: {}
      extra_binds:
      extra_env:
      win_extra_args: {}
      win_extra_binds:
      win_extra_env:
      external_urls:
      ca_cert: “”
      cert: “”
      key: “”
      path: “”
      uid: 0
      gid: 0
      snapshot: null
      retention: “”
      creation: “”
      backup_config: null
      kube-api:
      image: “”
      extra_args: {}
      extra_binds:
      extra_env:
      win_extra_args: {}
      win_extra_binds:
      win_extra_env:
      service_cluster_ip_range: 10.43.0.0/16
      service_node_port_range: “”
      pod_security_policy: false
      always_pull_images: false
      secrets_encryption_config: null
      audit_log: null
      admission_configuration: null
      event_rate_limit: null
      kube-controller:
      image: “”
      extra_args: {}
      extra_binds:
      extra_env:
      win_extra_args: {}
      win_extra_binds:
      win_extra_env:
      cluster_cidr: 10.42.0.0/16
      service_cluster_ip_range: 10.43.0.0/16
      scheduler:
      image: “”
      extra_args: {}
      extra_binds:
      extra_env:
      win_extra_args: {}
      win_extra_binds:
      win_extra_env:
      kubelet:
      image: “”
      extra_args: {}
      extra_binds:
      extra_env:
      win_extra_args: {}
      win_extra_binds:
      win_extra_env:
      cluster_domain: oasys_rancher
      infra_container_image: “”
      cluster_dns_server: 10.43.0.10
      fail_swap_on: false
      generate_serving_certificate: false
      kubeproxy:
      image: “”
      extra_args: {}
      extra_binds:
      extra_env:
      win_extra_args: {}
      win_extra_binds:
      win_extra_env:
      network:
      plugin: weave
      options: {}
      mtu: 0
      node_selector: {}
      update_strategy: null
      tolerations:
      authentication:
      strategy: x509
      sans:
      webhook: null
      addons: “”
      addons_include:
      system_images:
      etcd: rancher/mirrored-coreos-etcd:v3.5.3
      alpine: rancher/rke-tools:v0.1.80
      nginx_proxy: rancher/rke-tools:v0.1.80
      cert_downloader: rancher/rke-tools:v0.1.80
      kubernetes_services_sidecar: rancher/rke-tools:v0.1.80
      kubedns: rancher/mirrored-k8s-dns-kube-dns:1.17.4
      dnsmasq: rancher/mirrored-k8s-dns-dnsmasq-nanny:1.17.4
      kubedns_sidecar: rancher/mirrored-k8s-dns-sidecar:1.17.4
      kubedns_autoscaler: rancher/mirrored-cluster-proportional-autoscaler:1.8.3
      coredns: rancher/mirrored-coredns-coredns:1.8.6
      coredns_autoscaler: rancher/mirrored-cluster-proportional-autoscaler:1.8.5
      nodelocal: rancher/mirrored-k8s-dns-node-cache:1.21.1
      kubernetes: rancher/hyperkube:v1.22.9-rancher1
      flannel: rancher/mirrored-coreos-flannel:v0.15.1
      flannel_cni: rancher/flannel-cni:v0.3.0-rancher6
      calico_node: rancher/mirrored-calico-node:v3.21.1
      calico_cni: rancher/mirrored-calico-cni:v3.21.1
      calico_controllers: rancher/mirrored-calico-kube-controllers:v3.21.1
      calico_ctl: rancher/mirrored-calico-ctl:v3.21.1
      calico_flexvol: rancher/mirrored-calico-pod2daemon-flexvol:v3.21.1
      canal_node: rancher/mirrored-calico-node:v3.21.1
      canal_cni: rancher/mirrored-calico-cni:v3.21.1
      canal_controllers: rancher/mirrored-calico-kube-controllers:v3.21.1
      canal_flannel: rancher/mirrored-flannelcni-flannel:v0.17.0
      canal_flexvol: rancher/mirrored-calico-pod2daemon-flexvol:v3.21.1
      weave_node: weaveworks/weave-kube:2.8.1
      weave_cni: weaveworks/weave-npc:2.8.1
      pod_infra_container: rancher/mirrored-pause:3.6
      ingress: rancher/nginx-ingress-controller:nginx-1.2.0-rancher1
      ingress_backend: rancher/mirrored-nginx-ingress-controller-defaultbackend:1.5-rancher1
      ingress_webhook: rancher/mirrored-ingress-nginx-kube-webhook-certgen:v1.1.1
      metrics_server: rancher/mirrored-metrics-server:v0.5.1
      windows_pod_infra_container: rancher/mirrored-pause:3.6
      aci_cni_deploy_container: noiro/cnideploy:5.1.1.0.1ae238a
      aci_host_container: noiro/aci-containers-host:5.1.1.0.1ae238a
      aci_opflex_container: noiro/opflex:5.1.1.0.1ae238a
      aci_mcast_container: noiro/opflex:5.1.1.0.1ae238a
      aci_ovs_container: noiro/openvswitch:5.1.1.0.1ae238a
      aci_controller_container: noiro/aci-containers-controller:5.1.1.0.1ae238a
      aci_gbp_server_container: noiro/gbp-server:5.1.1.0.1ae238a
      aci_opflex_server_container: noiro/opflex-server:5.1.1.0.1ae238a
      ssh_key_path: /home/parandhaman/.ssh/id_rsa
      ssh_cert_path: “”
      ssh_agent_auth: false
      authorization:
      mode: rbac
      options: {}
      ignore_docker_version: null
      enable_cri_dockerd: null
      kubernetes_version: “”
      private_registries:
      ingress:
      provider: none
      options: {}
      node_selector: {}
      extra_args: {}
      dns_policy: “”
      extra_envs:
      extra_volumes:
      extra_volume_mounts:
      update_strategy: null
      http_port: 0
      https_port: 0
      network_mode: “”
      tolerations:
      default_backend: null
      default_http_backend_priority_class_name: “”
      nginx_ingress_controller_priority_class_name: “”
      default_ingress_class: null
      cluster_name: “”
      cloud_provider:
      name: “”
      prefix_path: “”
      win_prefix_path: “”
      addon_job_timeout: 0
      bastion_host:
      address: “”
      port: “”
      user: “”
      ssh_key: “”
      ssh_key_path: “”
      ssh_cert: “”
      ssh_cert_path: “”
      ignore_proxy_env_vars: false
      monitoring:
      provider: “”
      options: {}
      node_selector: {}
      update_strategy: null
      replicas: null
      tolerations:
      metrics_server_priority_class_name: “”
      restore:
      restore: false
      snapshot_name: “”
      rotate_encryption_key: false
      dns: null

Hello Parandhaman,

Please check whether Docker is running on all nodes. Additionally, from the logs, we can see that some containers are already running and facing conflicts. You may try deleting the particular container or removing the RKE cluster and then reinstalling it.
The SSH port of the host should be 22

1 Like