$ ls -la | grep kube
drwx------+ 1 Kone None 0 Feb 22 11:56 .kube
drwxrwx---+ 1 Administrators None 0 Feb 22 11:12 .minikube
$ tree .kube /F
Folder PATH listing
Volume serial number is 36D7-3774
C:\USERS\KONE\.KUBE
│ config
│
└───http-cache
└───.diskv-temp
$ cat .kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority: C:\Users\Kone\.minikube\ca.crt
server: https://192.168.99.101:8443
name: minikube
contexts:
- context:
cluster: minikube
user: minikube
name: minikube
current-context: minikube
kind: Config
preferences: {}
users:
- name: minikube
user:
client-certificate: C:\Users\Kone\.minikube\client.crt
client-key: C:\Users\Kone\.minikube\client.key
$ kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority: C:\Users\Kone\.minikube\ca.crt
server: https://192.168.99.101:8443
name: minikube
contexts:
- context:
cluster: minikube
user: minikube
name: minikube
current-context: minikube
kind: Config
preferences: {}
users:
- name: minikube
user:
client-certificate: C:\Users\Kone\.minikube\client.crt
client-key: C:\Users\Kone\.minikube\client.key
$ kubectl cluster-info
Kubernetes master is running at https://192.168.99.101:8443
KubeDNS is running at https://192.168.99.101:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
$ minikube dashboard
* Enabling dashboard ...
* Verifying dashboard health ...
* Launching proxy ...
* Verifying proxy health ...
* Opening http://127.0.0.1:57996/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser...
Issuing the kubectl proxy
command, kubectl authenticates with the API server on the master node
and makes the Dashboard available on a slightly different URL than the one earlier.
$ kubectl --help | grep proxy
proxy Run a proxy to the Kubernetes API server
$ kubectl proxy
Starting to serve on 127.0.0.1:8001
then we can access via
http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/#/cluster?namespace=default
Once we stop the proxy (CTRL + C) the Dashboard is no longer accessible.
When kubectl proxy
is running, we can send request to the API over the localhost on the proxy
port 8001
$ curl http://localhost:8001/
{
"paths": [
"/api",
"/api/v1",
"/apis",
"/apis/",
"/apis/admissionregistration.k8s.io",
"/apis/admissionregistration.k8s.io/v1",
"/apis/admissionregistration.k8s.io/v1beta1",
"/apis/apiextensions.k8s.io",
"/apis/apiextensions.k8s.io/v1",
"/apis/apiextensions.k8s.io/v1beta1",
"/apis/apiregistration.k8s.io",
"/apis/apiregistration.k8s.io/v1",
"/apis/apiregistration.k8s.io/v1beta1",
"/apis/apps",
"/apis/apps/v1",
"/apis/authentication.k8s.io",
"/apis/authentication.k8s.io/v1",
"/apis/authentication.k8s.io/v1beta1",
"/apis/authorization.k8s.io",
"/apis/authorization.k8s.io/v1",
"/apis/authorization.k8s.io/v1beta1",
"/apis/autoscaling",
"/apis/autoscaling/v1",
"/apis/autoscaling/v2beta1",
"/apis/autoscaling/v2beta2",
"/apis/batch",
"/apis/batch/v1",
"/apis/batch/v1beta1",
"/apis/certificates.k8s.io",
"/apis/certificates.k8s.io/v1beta1",
"/apis/coordination.k8s.io",
"/apis/coordination.k8s.io/v1",
"/apis/coordination.k8s.io/v1beta1",
"/apis/discovery.k8s.io",
"/apis/discovery.k8s.io/v1beta1",
"/apis/events.k8s.io",
"/apis/events.k8s.io/v1beta1",
"/apis/extensions",
"/apis/extensions/v1beta1",
"/apis/networking.k8s.io",
"/apis/networking.k8s.io/v1",
"/apis/networking.k8s.io/v1beta1",
"/apis/node.k8s.io",
"/apis/node.k8s.io/v1beta1",
"/apis/policy",
"/apis/policy/v1beta1",
"/apis/rbac.authorization.k8s.io",
"/apis/rbac.authorization.k8s.io/v1",
"/apis/rbac.authorization.k8s.io/v1beta1",
"/apis/scheduling.k8s.io",
"/apis/scheduling.k8s.io/v1",
"/apis/scheduling.k8s.io/v1beta1",
"/apis/storage.k8s.io",
"/apis/storage.k8s.io/v1",
"/apis/storage.k8s.io/v1beta1",
"/healthz",
"/healthz/autoregister-completion",
"/healthz/etcd",
"/healthz/log",
"/healthz/ping",
"/healthz/poststarthook/apiservice-openapi-controller",
"/healthz/poststarthook/apiservice-registration-controller",
"/healthz/poststarthook/apiservice-status-available-controller",
"/healthz/poststarthook/bootstrap-controller",
"/healthz/poststarthook/crd-informer-synced",
"/healthz/poststarthook/generic-apiserver-start-informers",
"/healthz/poststarthook/kube-apiserver-autoregistration",
"/healthz/poststarthook/rbac/bootstrap-roles",
"/healthz/poststarthook/scheduling/bootstrap-system-priority-classes",
"/healthz/poststarthook/start-apiextensions-controllers",
"/healthz/poststarthook/start-apiextensions-informers",
"/healthz/poststarthook/start-cluster-authentication-info-controller",
"/healthz/poststarthook/start-kube-aggregator-informers",
"/healthz/poststarthook/start-kube-apiserver-admission-initializer",
"/livez",
"/livez/autoregister-completion",
"/livez/etcd",
"/livez/log",
"/livez/ping",
"/livez/poststarthook/apiservice-openapi-controller",
"/livez/poststarthook/apiservice-registration-controller",
"/livez/poststarthook/apiservice-status-available-controller",
"/livez/poststarthook/bootstrap-controller",
"/livez/poststarthook/crd-informer-synced",
"/livez/poststarthook/generic-apiserver-start-informers",
"/livez/poststarthook/kube-apiserver-autoregistration",
"/livez/poststarthook/rbac/bootstrap-roles",
"/livez/poststarthook/scheduling/bootstrap-system-priority-classes",
"/livez/poststarthook/start-apiextensions-controllers",
"/livez/poststarthook/start-apiextensions-informers",
"/livez/poststarthook/start-cluster-authentication-info-controller",
"/livez/poststarthook/start-kube-aggregator-informers",
"/livez/poststarthook/start-kube-apiserver-admission-initializer",
"/logs",
"/metrics",
"/openapi/v2",
"/readyz",
"/readyz/autoregister-completion",
"/readyz/etcd",
"/readyz/log",
"/readyz/ping",
"/readyz/poststarthook/apiservice-openapi-controller",
"/readyz/poststarthook/apiservice-registration-controller",
"/readyz/poststarthook/apiservice-status-available-controller",
"/readyz/poststarthook/bootstrap-controller",
"/readyz/poststarthook/crd-informer-synced",
"/readyz/poststarthook/generic-apiserver-start-informers",
"/readyz/poststarthook/kube-apiserver-autoregistration",
"/readyz/poststarthook/rbac/bootstrap-roles",
"/readyz/poststarthook/scheduling/bootstrap-system-priority-classes",
"/readyz/poststarthook/start-apiextensions-controllers",
"/readyz/poststarthook/start-apiextensions-informers",
"/readyz/poststarthook/start-cluster-authentication-info-controller",
"/readyz/poststarthook/start-kube-aggregator-informers",
"/readyz/poststarthook/start-kube-apiserver-admission-initializer",
"/readyz/shutdown",
"/version"
]
}
kubectl proxy
, we need to authenticate to the API server when sending
API requests.curl
, or by providing a set of
keys and certificates.$ kubectl get secrets -n kube-system | grep default
default-token-bdmlz kubernetes.io/service-account-token 3 131m
then
$ kubectl describe secret -n kube-system default-token-bdmlz
Name: default-token-bdmlz
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: default
kubernetes.io/service-account.uid: e104bbfa-7908-4495-bae5-14c95d0246cd
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1066 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IlNaZ1dUVUZ6Q01oTG05VUh3SEVpa2YzMkNvbXQ2YjdLdzdfb3RFdm1RTEEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkZWZhdWx0LXRva2VuLWJkbWx6Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImRlZmF1bHQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlMTA0YmJmYS03OTA4LTQ0OTUtYmFlNS0xNGM5NWQwMjQ2Y2QiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06ZGVmYXVsdCJ9.pjyLX4mGRafkIFqJko9VHMkhFTYU-Db6wWpADQWcAFMyVOATOyawY7dMCQ4O3xB-QKCoN3sckegB_NqWO-5GYe8eixwi66SkqyHnduuyaASymGqJaESQvrF7KuvMh8gLLGgjFDZ3shUKa3qYdasK8rxyYRjkcPo55aVBA6cVZ0cL_TxAfe0_zlaIqFDJ0aNR4L-G5x0XvotJ6P9-xMADaC2GXFkgw7PXHLu0XTlxaWCe6ORDb0kpJSiJ4QRCbUABXfUU2k_6vEZRDswc32XbHQvmE0OF3LSg-Fu7ZpA8AZ3NHtwqit0XkZDJYm6HIfVEPlp6xRLn_Hart8QyW9VdCg
now we get token. Then check api server
$ kubectl cluster-info
Kubernetes master is running at https://192.168.99.101:8443
KubeDNS is running at https://192.168.99.101:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
make a varialble to terminal (in Windows. it would be easier in unix system :p )
$ set TOKEN=eyJhbGciOiJSUzI1NiIsImt....8QyW9VdCg
$ set APISERVER=https://192.168.99.101:8443
$ echo %TOKEN%
eyJhbGciOiJSUzI1NiIsImtpZ...
...
...8QyW9VdCg
$ echo %APISERVER%
https://192.168.99.101:8443
Then curl it.
$ curl -k %APISERVER% --header "Authorization: Bearer %TOKEN%"
{
"paths": [
"/api",
"/api/v1",
"/apis",
...
...
"/readyz/shutdown",
"/version"
]
}
check where they are.
$ kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority: C:\Users\Kone\.minikube\ca.crt
server: https://192.168.99.101:8443
name: minikube
contexts:
- context:
cluster: minikube
user: minikube
name: minikube
current-context: minikube
kind: Config
preferences: {}
users:
- name: minikube
user:
client-certificate: C:\Users\Kone\.minikube\client.crt
client-key: C:\Users\Kone\.minikube\client.key
we need to encode base64 for ca.crt, client.crt and client.key
Kone@KONE C:\Users\Kone\.minikube
// TODO
// will figure out how to curl it with certificates