diff --git a/Gopkg.lock b/Gopkg.lock index 44b600235..449482759 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -61,7 +61,7 @@ [[projects]] branch = "master" - digest = "1:881917f48fa5cc7ea3365b03710c7526a006100fece1ab7a9abb981e3eb15a18" + digest = "1:ca1c3f8d5fb822ad8aa3b93f499be3501a1559ebc70207e908474cc8c33bd9b7" name = "github.com/gluster/glusterd2" packages = [ "pkg/api", @@ -75,7 +75,7 @@ "plugins/glustershd/api", ] pruneopts = "" - revision = "73c66caf16cea5bea76a899d9d4eec82ab7bbf56" + revision = "428c623b4d5618a0e1bcbc4a4c317f5b526f29b5" [[projects]] digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" @@ -394,7 +394,7 @@ [[projects]] branch = "master" - digest = "1:d1b5970f2a453e7c4be08117fb683b5d096bad9d17f119a6e58d4c561ca205dd" + digest = "1:117e1e4f1ed83191a4a225d23488e14802ff8f91b3ed4ff0d229e8ea0faf0a88" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -402,7 +402,7 @@ "model", ] pruneopts = "" - revision = "bcb74de08d37a417cb6789eec1d6c810040f0470" + revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" [[projects]] branch = "master" @@ -511,25 +511,25 @@ [[projects]] branch = "master" - digest = "1:235cb00e80dcf85b78a24be4bbe6c827fb28613b84037a9d524084308a849d91" + digest = "1:77478892c6d9d841c7997858d6287884c65b760000f29a25d7b1a6b6ada5f308" name = "golang.org/x/oauth2" packages = [ ".", "internal", ] pruneopts = "" - revision = "c57b0facaced709681d9f90397429b9430a74754" + revision = "9dcd33a902f40452422c2367fefcb95b54f9f8f8" [[projects]] branch = "master" - digest = "1:2ed0bf267e44950120acd95570227e28184573ffb099bd85b529ee148e004ddb" + digest = "1:03a1b1f2bfb0a6b2291c4161032429b1e8a1f23cc68530ad64358f8b493ead5f" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "" - revision = "fa43e7bc11baaae89f3f902b2b4d832b68234844" + revision = "8a28ead16f52c8aaeffbf79239b251dfdf6c4f96" [[projects]] digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" @@ -721,11 +721,11 @@ [[projects]] branch = "master" - digest = "1:e52a7c580216d9bec31302d43c129e82eff62ab3790501b6553d55897ad23c6f" + digest = "1:5b61011f0e2204d6ced06cb2ee555973280e62c840edd88dec95263c31fffaaa" name = "k8s.io/apiextensions-apiserver" packages = ["pkg/features"] pruneopts = "" - revision = "5ca6f4b7a8c88a0318b42e3c75f1610f0d5b67b4" + revision = "2867844605ff2cb7086eb25e19ad86c02e3319ad" [[projects]] digest = "1:7aa037a4df5432be2820d164f378d7c22335e5cbba124e90e42114757ebd11ac" @@ -784,7 +784,7 @@ [[projects]] branch = "master" - digest = "1:8be8812f85a0baaf9608ccbe4a4b1a6ef261d086d04f3d4304602006fe1fd295" + digest = "1:4f24c53b785bbb0ed94d7168546454658281c3cbe070d84e319cae373f1f5e44" name = "k8s.io/apiserver" packages = [ "pkg/authentication/authenticator", @@ -794,7 +794,7 @@ "pkg/util/feature", ] pruneopts = "" - revision = "0deca6c827f580ebbd9cd8c2fe6fca0858313386" + revision = "7f6c545c2e4ff0af96d8aac792ca499ea87a92e1" [[projects]] digest = "1:5d4153d12c3aed2c90a94262520d2498d5afa4d692554af55e65a7c5af0bc399" @@ -933,7 +933,7 @@ [[projects]] branch = "master" - digest = "1:ed3b93f161228f7530bc7e8d6ffb99e4b0133268805121bebce80e24694abec5" + digest = "1:0c8eddb131234172e9eaf51ca81c3e9d8400ad08164f8ab2c08b963a0921bd13" name = "k8s.io/csi-api" packages = [ "pkg/apis/csi/v1alpha1", @@ -942,15 +942,15 @@ "pkg/client/clientset/versioned/typed/csi/v1alpha1", ] pruneopts = "" - revision = "e68653451c29a6582db1f2839d5d8dda0c0a5aea" + revision = "ec830fab2ec0b36d37a043713f87d76e72486004" [[projects]] branch = "master" - digest = "1:7b06ff480fd71dead51f0f243b573c448c372ec086b790ec7ed4f8a78f2c1cbf" + digest = "1:9dbe3db0f0ba4af2e0611683abab22dfd049b96294de40ca97cda2e39e2b78c6" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] pruneopts = "" - revision = "9dfdf9be683f61f82cda12362c44c784e0778b56" + revision = "90b54e673cf4f8ba61a75ad6ef90a69e8da13568" [[projects]] digest = "1:c8b66f8046163fd757f9fb87602e3bb181191512d31281c50c2c900046470877" @@ -1009,14 +1009,14 @@ [[projects]] branch = "master" - digest = "1:551fd880288345a6652a98712012b56e21fed473b7c7bd16dc32695e2bd6537a" + digest = "1:946ab9d88af6d6e8fff0b083bbd39b53af5d019261c828eed4bcbc8d58f0a812" name = "k8s.io/utils" packages = [ "exec", "pointer", ] pruneopts = "" - revision = "14b61cea4a067d0af104cea47bbf51cf100c0253" + revision = "4c3feeb576b06ef8fea769809bd3db5e5e78dc23" [solve-meta] analyzer-name = "dep" @@ -1024,6 +1024,7 @@ input-imports = [ "github.com/container-storage-interface/spec/lib/go/csi/v0", "github.com/gluster/glusterd2/pkg/api", + "github.com/gluster/glusterd2/pkg/errors", "github.com/gluster/glusterd2/pkg/restclient", "github.com/golang/glog", "github.com/kubernetes-csi/csi-test/pkg/sanity", diff --git a/README.md b/README.md index 220af67d1..33e9bb135 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ To build, ensure docker is installed, and run: ### Deploy CSI driver ``` -[root@localhost cluster]#kubectl create -f csi-deployment.yaml +[root@localhost]#kubectl create -f csi-deployment.yaml service/csi-attacher-glusterfsplugin created statefulset.apps/csi-attacher-glusterfsplugin created daemonset.apps/csi-nodeplugin-glusterfsplugin created @@ -58,10 +58,28 @@ clusterrole.rbac.authorization.k8s.io/glusterfs-csi created clusterrolebinding.rbac.authorization.k8s.io/glusterfs-csi-role created ``` +Below listed feature gates need to be enabled in kubernetes v1.12.1 + +``` +--feature-gates=CSIPersistentVolume=true,MountPropagation=true,VolumeSnapshotDataSource=true,KubeletPluginsWatcher=true,CSINodeInfo=true,CSIDriverRegistry=true +``` + +### Create CSIDriverRegistry and CSINodeInfo CRD in kubernetes 1.12.1 + +``` +[root@localhost]#kubectl create -f driver-registry-crd.yaml --validate=false +customresourcedefinition.apiextensions.k8s.io/csidrivers.csi.storage.k8s.io created +``` + +``` +[root@localhost]#kubectl apply -f node-info-crd.yaml --validate=false +customresourcedefinition.apiextensions.k8s.io/csinodeinfos.csi.storage.k8s.io created +``` + ### Create a storage class ``` -[root@localhost cluster]# cat sc.yaml +[root@localhost]# cat storage-class.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: @@ -71,10 +89,34 @@ metadata: provisioner: org.gluster.glusterfs ``` +``` +[root@localhost]# kubectl create -f storage-class.yaml +storageclass.storage.k8s.io/glusterfs-csi created +``` + +Verify storage class + +``` +[root@localhost]# kubectl get storageclass +NAME PROVISIONER AGE +glusterfs-csi (default) org.gluster.glusterfs 105s +[root@localhost]# kubectl describe storageclass/glusterfs-csi +Name: glusterfs-csi +IsDefaultClass: Yes +Annotations: storageclass.beta.kubernetes.io/is-default-class=true +Provisioner: org.gluster.glusterfs +Parameters: +AllowVolumeExpansion: +MountOptions: +ReclaimPolicy: Delete +VolumeBindingMode: Immediate +Events: +``` + ### Create PersistentVolumeClaim ``` -[root@localhost cluster]# cat pvc.yaml +[root@localhost]# cat pvc.yaml --- kind: PersistentVolumeClaim apiVersion: v1 @@ -96,13 +138,13 @@ persistentvolumeclaim/glusterfs-csi-pv created Validate the claim creation ``` -[root@localhost cluster]# kubectl get pvc +[root@localhost]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE glusterfs-csi-pv Bound pvc-953d21f5a51311e8 5Gi RWX glusterfs-csi 3s ``` ``` -[root@localhost cluster]# kubectl describe pvc +[root@localhost]# kubectl describe pvc Name: glusterfs-csi-pv Namespace: default StorageClass: glusterfs-csi @@ -128,7 +170,7 @@ Events: Verify PV details: ``` -[root@localhost cluster]# kubectl describe pv +[root@localhost]# kubectl describe pv Name: pvc-953d21f5a51311e8 Labels: Annotations: pv.kubernetes.io/provisioned-by=org.gluster.glusterfs @@ -152,7 +194,7 @@ Events: ### Create a pod with this claim ``` -[root@master vagrant]# cat app.yaml +[root@localhost]# cat app.yaml --- apiVersion: v1 kind: Pod @@ -179,11 +221,233 @@ spec: Check mount output and validate. ``` -[root@localhost cluster]# mount |grep glusterfs +[root@localhost]# mount |grep glusterfs 192.168.121.158:pvc-953d21f5a51311e8 on /var/lib/kubelet/pods/2a563343-a514-11e8-a324-525400a04cb4/volumes/kubernetes.io~csi/pvc-953d21f5a51311e8/mount type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) -[root@localhost cluster]# kubectl delete pod gluster +[root@localhost]# kubectl delete pod gluster pod "gluster" deleted -[root@localhost cluster]# mount |grep glusterfs -[root@localhost cluster]# +[root@localhost]# mount |grep glusterfs +[root@localhost]# +``` + +### Support for Snapshot + +Kubernetes v1.12 introduces alpha support for volume snapshotting. +This feature allows creating/deleting volume snapshots, and the ability +to create new volumes from a snapshot natively using the Kubernetes API. + +### Write some data into already created application with PVC to verify volume clone + +``` +[root@localhost]# kubectl exec -it redis /bin/bash +root@redis:/data# cd /mnt/gluster/ +root@redis:/mnt/gluster# echo "glusterfs csi clone test" > clone_data +``` + +### Create a snapshot class + +``` +[root@localhost]# cat snapshot-class.yaml +--- +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshotClass +metadata: + name: glusterfs-csi-snap +snapshotter: org.gluster.glusterfs +``` + +``` +[root@localhost]#kubectl create -f snapshot-class.yaml +volumesnapshotclass.snapshot.storage.k8s.io/glusterfs-csi-snap created +``` + +Verify snapshot class + +``` +[root@localhost]# kubectl get volumesnapshotclass +NAME AGE +glusterfs-csi-snap 1h +[root@localhost]# kubectl describe volumesnapshotclass/glusterfs-csi-snap +Name: glusterfs-csi-snap +Namespace: +Labels: +Annotations: +API Version: snapshot.storage.k8s.io/v1alpha1 +Kind: VolumeSnapshotClass +Metadata: + Creation Timestamp: 2018-10-24T04:57:34Z + Generation: 1 + Resource Version: 3215 + Self Link: /apis/snapshot.storage.k8s.io/v1alpha1/volumesnapshotclasses/glusterfs-csi-snap + UID: 51de83df-d749-11e8-892a-525400d84c47 +Snapshotter: org.gluster.glusterfs +Events: +``` + +### Create a snapshot from pvc + +``` +[root@localhost]# cat volume-snapshot.yaml +--- +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshot +metadata: + name: glusterfs-csi-ss +spec: + snapshotClassName: glusterfs-csi-ss + source: + name: glusterfs-csi-pv + kind: PersistentVolumeClaim + +``` + +``` +[root@localhost]# kubectl create -f volume-snapshot.yaml +volumesnapshot.snapshot.storage.k8s.io/glusterfs-csi-ss created +``` + +Verify volume snapshot + +``` +[root@localhost]# kubectl get volumesnapshot +NAME AGE +glusterfs-csi-ss 13s +[root@localhost]# kubectl describe volumesnapshot/glusterfs-csi-ss +Name: glusterfs-csi-ss +Namespace: default +Labels: +Annotations: +API Version: snapshot.storage.k8s.io/v1alpha1 +Kind: VolumeSnapshot +Metadata: + Creation Timestamp: 2018-10-24T06:39:35Z + Generation: 1 + Resource Version: 12567 + Self Link: /apis/snapshot.storage.k8s.io/v1alpha1/namespaces/default/volumesnapshots/glusterfs-csi-ss + UID: 929722b7-d757-11e8-892a-525400d84c47 +Spec: + Snapshot Class Name: glusterfs-csi-snap + Snapshot Content Name: snapcontent-929722b7-d757-11e8-892a-525400d84c47 + Source: + Kind: PersistentVolumeClaim + Name: glusterfs-csi-pv +Status: + Creation Time: 1970-01-01T00:00:01Z + Ready: true + Restore Size: +Events: +``` + +### Provision new volume from snapshot + +``` +[root@localhost]# cat pvc-restore.yaml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: glusterfs-pv-restore +spec: + storageClassName: glusterfs-csi + dataSource: + name: glusterfs-csi-ss + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi +``` + +``` +[root@localhost]# kubectl create -f pvc-restore.yaml +persistentvolumeclaim/glusterfs-pv-restore created +``` + +Verify newly created claim + +``` +[root@localhost]# kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +glusterfs-csi-pv Bound pvc-712278b0-d749-11e8-892a-525400d84c47 5Gi RWX glusterfs-csi 103m +glusterfs-pv-restore Bound pvc-dfcc36f0-d757-11e8-892a-525400d84c47 5Gi RWO glusterfs-csi 14s +``` + +``` +[root@localhost]# kubectl describe pvc/glusterfs-pv-restore +Name: glusterfs-pv-restore +Namespace: default +StorageClass: glusterfs-csi +Status: Bound +Volume: pvc-dfcc36f0-d757-11e8-892a-525400d84c47 +Labels: +Annotations: pv.kubernetes.io/bind-completed: yes + pv.kubernetes.io/bound-by-controller: yes + volume.beta.kubernetes.io/storage-provisioner: org.gluster.glusterfs +Finalizers: [kubernetes.io/pvc-protection] +Capacity: 5Gi +Access Modes: RWO +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ExternalProvisioning 41s persistentvolume-controller waiting for a volume to be created, either by external provisioner "org.gluster.glusterfs" or manually created by system administrator + Normal Provisioning 41s org.gluster.glusterfs_csi-provisioner-glusterfsplugin-0_1e7821cb-d749-11e8-9935-0a580af40303 External provisioner is provisioning volume for claim "default/glusterfs-pv-restore" + Normal ProvisioningSucceeded 41s org.gluster.glusterfs_csi-provisioner-glusterfsplugin-0_1e7821cb-d749-11e8-9935-0a580af40303 Successfully provisioned volume pvc-dfcc36f0-d757-11e8-892a-525400d84c47 +Mounted By: +``` + +### Create an app with New claim + +``` +[root@localhost]# cat app-with-clone.yaml +--- +apiVersion: v1 +kind: Pod +metadata: + name: redis-pvc-restore + labels: + name: redis-pvc-restore +spec: + containers: + - name: redis-pvc-restore + image: redis:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: "/mnt/gluster" + name: glusterfscsivol + volumes: + - name: glusterfscsivol + persistentVolumeClaim: + claimName: glusterfs-pv-restore + +``` + +``` + +[root@localhost]# kubectl create -f app-with-clone.yaml +pod/redis-pvc-restore created +``` + +Verify cloned data is present in newly created application + +``` +[root@localhost]# kubectl get po +NAME READY STATUS RESTARTS AGE +csi-attacher-glusterfsplugin-0 2/2 Running 0 112m +csi-nodeplugin-glusterfsplugin-dl7pp 2/2 Running 0 112m +csi-nodeplugin-glusterfsplugin-khrtd 2/2 Running 0 112m +csi-nodeplugin-glusterfsplugin-kqcsw 2/2 Running 0 112m +csi-provisioner-glusterfsplugin-0 3/3 Running 0 112m +glusterfs-55v7v 1/1 Running 0 128m +glusterfs-qbvgv 1/1 Running 0 128m +glusterfs-vclr4 1/1 Running 0 128m +redis 1/1 Running 0 109m +redis-pvc-restore 1/1 Running 0 26s +[root@master vagrant]# kubectl exec -it redis-pvc-restore /bin/bash +root@redis-pvc-restore:/data# cd /mnt/gluster/ +root@redis-pvc-restore:/mnt/gluster# ls +clone_data +root@redis-pvc-restore:/mnt/gluster# cat clone_data +glusterfs csi clone test ``` diff --git a/examples/kubernetes/app-with-clone-pvc.yaml b/examples/kubernetes/app-with-clone-pvc.yaml new file mode 100644 index 000000000..0269d2f18 --- /dev/null +++ b/examples/kubernetes/app-with-clone-pvc.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: redis-pvc-restore + labels: + name: redis-pvc-restore +spec: + containers: + - name: redis-pvc-restore + image: redis:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: "/mnt/gluster" + name: glusterfscsivol + volumes: + - name: glusterfscsivol + persistentVolumeClaim: + claimName: glusterfs-pv-restore diff --git a/examples/kubernetes/csi-deployment.yaml b/examples/kubernetes/csi-deployment.yaml index fc8830572..9785f43e6 100644 --- a/examples/kubernetes/csi-deployment.yaml +++ b/examples/kubernetes/csi-deployment.yaml @@ -43,7 +43,7 @@ spec: serviceAccount: glusterfs-csi containers: - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v0.3.0 + image: quay.io/k8scsi/csi-attacher:v0.4.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -99,7 +99,7 @@ spec: hostNetwork: true containers: - name: driver-registrar - image: quay.io/k8scsi/driver-registrar:v0.3.0 + image: quay.io/k8scsi/driver-registrar:v0.4.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -116,6 +116,8 @@ spec: volumeMounts: - name: plugin-dir mountPath: /plugin + - name: registration-dir + mountPath: /registration - name: glusterfs securityContext: privileged: true @@ -143,8 +145,6 @@ spec: volumeMounts: - name: plugin-dir mountPath: /plugin - - name: registration-dir - mountPath: /registration - name: pods-mount-dir mountPath: /var/lib/kubelet/pods mountPropagation: "Bidirectional" @@ -192,13 +192,25 @@ spec: serviceAccount: glusterfs-csi containers: - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v0.3.0 + image: quay.io/k8scsi/csi-provisioner:v0.4.0 args: - "--provisioner=org.gluster.glusterfs" - "--csi-address=$(ADDRESS)" env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + imagePullPolicy: "IfNotPresent" + - name: csi-snapshotter + image: quay.io/k8scsi/csi-snapshotter:v0.4.0 + args: + - "--csi-address=$(ADDRESS)" + - "--connection-timeout=15s" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir @@ -243,7 +255,7 @@ metadata: rules: - apiGroups: [""] resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "patch", "delete"] + verbs: ["get", "list", "watch", "update", "create", "delete"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] @@ -258,7 +270,28 @@ rules: verbs: ["get", "list", "watch", "update"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["list", "watch", "create", "update", "patch", "delete", "get"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] --- kind: ClusterRoleBinding diff --git a/examples/kubernetes/driver-registry-crd.yaml b/examples/kubernetes/driver-registry-crd.yaml new file mode 100644 index 000000000..cd2cddf86 --- /dev/null +++ b/examples/kubernetes/driver-registry-crd.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: csidrivers.csi.storage.k8s.io +spec: + group: csi.storage.k8s.io + names: + kind: CSIDriver + plural: csidrivers + scope: Cluster + validation: + openAPIV3Schema: + properties: + spec: + description: Specification of the CSI Driver. + properties: + attachRequired: + description: "Indicates this CSI volume driver requires an attach + operation,and that Kubernetes should call attach and wait for any + attach operation to complete before proceeding to mount." + type: boolean + podInfoOnMountVersion: + description: "Indicates this CSI volume driver requires additional + pod information (like podName, podUID, etc.) during mount + operations." + type: string + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/examples/kubernetes/node-info-crd.yaml b/examples/kubernetes/node-info-crd.yaml new file mode 100644 index 000000000..664a63fec --- /dev/null +++ b/examples/kubernetes/node-info-crd.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: csinodeinfos.csi.storage.k8s.io +spec: + group: csi.storage.k8s.io + names: + kind: CSINodeInfo + plural: csinodeinfos + scope: Cluster + validation: + openAPIV3Schema: + properties: + csiDrivers: + description: "List of CSI drivers running on the node and + their properties." + items: + properties: + driver: + description: The CSI driver that this object refers to. + type: string + nodeID: + description: The node from the driver point of view. + type: string + topologyKeys: + description: List of keys supported by the driver. + items: + type: string + type: array + type: array + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/examples/kubernetes/pvc-restore.yaml b/examples/kubernetes/pvc-restore.yaml new file mode 100644 index 000000000..e8bfafc48 --- /dev/null +++ b/examples/kubernetes/pvc-restore.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: glusterfs-pv-restore +spec: + storageClassName: glusterfs-csi + dataSource: + name: glusterfs-csi-ss + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/examples/kubernetes/snapshot-class.yaml b/examples/kubernetes/snapshot-class.yaml new file mode 100644 index 000000000..9268da263 --- /dev/null +++ b/examples/kubernetes/snapshot-class.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshotClass +metadata: + name: glusterfs-csi-snap +snapshotter: org.gluster.glusterfs diff --git a/examples/kubernetes/sc.yaml b/examples/kubernetes/storage-class.yaml similarity index 99% rename from examples/kubernetes/sc.yaml rename to examples/kubernetes/storage-class.yaml index 5c1827137..edde5e70e 100644 --- a/examples/kubernetes/sc.yaml +++ b/examples/kubernetes/storage-class.yaml @@ -1,5 +1,4 @@ --- - apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: diff --git a/examples/kubernetes/volume-snapshot.yaml b/examples/kubernetes/volume-snapshot.yaml new file mode 100644 index 000000000..7b9aed6ca --- /dev/null +++ b/examples/kubernetes/volume-snapshot.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshot +metadata: + name: glusterfs-csi-ss +spec: + snapshotClassName: glusterfs-csi-snap + source: + name: glusterfs-csi-pv + kind: PersistentVolumeClaim diff --git a/pkg/glusterfs/controllerserver.go b/pkg/glusterfs/controllerserver.go index 78b4a3e34..0592b43f1 100644 --- a/pkg/glusterfs/controllerserver.go +++ b/pkg/glusterfs/controllerserver.go @@ -3,13 +3,16 @@ package glusterfs import ( "context" "errors" + "fmt" "net/http" + "strconv" "strings" "github.com/gluster/gluster-csi-driver/pkg/glusterfs/utils" csi "github.com/container-storage-interface/spec/lib/go/csi/v0" "github.com/gluster/glusterd2/pkg/api" + gd2Error "github.com/gluster/glusterd2/pkg/errors" "github.com/golang/glog" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -77,16 +80,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol glog.V(1).Infof("creating volume with name %s", req.Name) - if reqCaps := req.GetVolumeCapabilities(); reqCaps == nil { - return nil, status.Error(codes.InvalidArgument, "volume capabilities is a required field") - } - - // If capacity mentioned, pick that or use default size 1 GB - volSizeBytes := defaultVolumeSize - if capRange := req.GetCapacityRange(); capRange != nil { - volSizeBytes = capRange.GetRequiredBytes() - } - + volSizeBytes := cs.getVolumeSize(req) volSizeMB := int(utils.RoundUpSize(volSizeBytes, 1024*1024)) // parse the request. @@ -103,12 +97,30 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol glog.Errorf("error checking for pre-existing volume: %v", err) return nil, err } - // If volume does not exist, provision volume - err = cs.doVolumeCreate(volumeName, volSizeMB) - if err != nil { - return nil, err + + if req.VolumeContentSource.GetSnapshot().GetId() != "" { + snapName := req.VolumeContentSource.GetSnapshot().GetId() + glog.V(2).Infof("creating volume from snapshot %s", snapName) + err = cs.checkExistingSnapshot(snapName, req.GetName()) + if err != nil { + return nil, err + } + } else { + // If volume does not exist, provision volume + err = cs.doVolumeCreate(volumeName, volSizeMB) + if err != nil { + return nil, err + } } } + err = cs.client.VolumeStart(volumeName, true) + if err != nil { + //we dont need to delete the volume if volume start fails + //as we are listing the volumes and starting it again + //before sending back the response + glog.Errorf("failed to start volume: %v", err) + return nil, status.Errorf(codes.Internal, "failed to start volume: %v", err) + } glusterServer, bkpServers, err := cs.getClusterNodes() if err != nil { @@ -132,6 +144,53 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return resp, nil } +func (cs *ControllerServer) getVolumeSize(req *csi.CreateVolumeRequest) int64 { + // If capacity mentioned, pick that or use default size 1 GB + volSizeBytes := defaultVolumeSize + if capRange := req.GetCapacityRange(); capRange != nil { + volSizeBytes = capRange.GetRequiredBytes() + } + return volSizeBytes +} + +func (cs *ControllerServer) checkExistingSnapshot(snapName, volName string) error { + snapInfo, err := cs.GfDriver.client.SnapshotInfo(snapName) + if err != nil { + errResp := cs.client.LastErrorResponse() + //errResp will be nil in case of No route to host error + if errResp != nil && errResp.StatusCode == http.StatusNotFound { + return status.Errorf(codes.NotFound, "failed to get snapshot info %s", err.Error()) + } + return status.Error(codes.Internal, err.Error()) + } + + if snapInfo.VolInfo.State != api.VolStarted { + actReq := api.SnapActivateReq{ + Force: true, + } + err = cs.client.SnapshotActivate(actReq, snapName) + if err != nil { + glog.Errorf("failed to activate snapshot: %v", err) + return status.Errorf(codes.Internal, "failed to activate snapshot %s", err.Error()) + } + } + //create snapshot clone + err = cs.createSnapshotClone(snapName, volName) + return err +} + +func (cs *ControllerServer) createSnapshotClone(snapName, volName string) error { + var snapreq api.SnapCloneReq + snapreq.CloneName = volName + snapResp, err := cs.client.SnapshotClone(snapName, snapreq) + if err != nil { + glog.Errorf("failed to create volume clone: %v", err) + return status.Errorf(codes.Internal, "failed to create volume clone: %s", err.Error()) + } + glog.V(1).Infof("snapshot clone response : %+v", snapResp) + return nil +} + func (cs *ControllerServer) validateCreateVolumeReq(req *csi.CreateVolumeRequest) error { if req == nil { return status.Errorf(codes.InvalidArgument, "request cannot be empty") @@ -167,15 +226,6 @@ func (cs *ControllerServer) doVolumeCreate(volumeName string, volSizeMB int) err } glog.V(3).Infof("volume create response : %+v", volumeCreateResp) - err = cs.client.VolumeStart(volumeName, true) - if err != nil { - //we dont need to delete the volume if volume start fails - //as we are listing the volumes and starting it again - //before sending back the response - glog.Errorf("failed to start volume: %v", err) - return status.Errorf(codes.Internal, "failed to start volume: %v", err) - } - return nil } @@ -264,8 +314,10 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol if errResp != nil && errResp.StatusCode == http.StatusNotFound { return &csi.DeleteVolumeResponse{}, nil } - glog.Errorf("failed to stop volume %s: %v", volumeID, err) - return nil, status.Errorf(codes.Internal, "failed to stop volume %s: %v", volumeID, err) + if err.Error() != gd2Error.ErrVolAlreadyStopped.Error() { + glog.Errorf("failed to stop volume %s: %v", volumeID, err) + return nil, status.Errorf(codes.Internal, "failed to stop volume %s: %v", volumeID, err) + } } // Delete volume @@ -392,6 +444,8 @@ func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req * for _, cap := range []csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_LIST_VOLUMES, + csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, } { caps = append(caps, newCap(cap)) } @@ -405,15 +459,244 @@ func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req * // CreateSnapshot create snapshot of an existing PV func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { - return nil, status.Error(codes.Unimplemented, "") + + if err := cs.validateCreateSnapshotReq(req); err != nil { + return nil, err + } + glog.V(2).Infof("received request to create snapshot %v from volume %v", req.GetName(), req.GetSourceVolumeId()) + + snapInfo, err := cs.GfDriver.client.SnapshotInfo(req.Name) + if err != nil { + glog.Errorf("failed to get snapshot info for %v with Error %v", req.GetName(), err.Error()) + errResp := cs.client.LastErrorResponse() + //errResp will be nil in case of No route to host error + if errResp != nil && errResp.StatusCode != http.StatusNotFound { + + return nil, status.Errorf(codes.Internal, "CreateSnapshot - failed to get snapshot info %s", err.Error()) + } + if errResp == nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + } else { + + if snapInfo.ParentVolName != req.GetSourceVolumeId() { + glog.Errorf("snapshot %v belongs to different volume %v", req.GetName(), snapInfo.ParentVolName) + return nil, status.Errorf(codes.AlreadyExists, "CreateSnapshot - snapshot %s belongs to different volume %s", snapInfo.ParentVolName, req.GetSourceVolumeId()) + } + + return &csi.CreateSnapshotResponse{ + Snapshot: &csi.Snapshot{ + Id: snapInfo.VolInfo.Name, + SourceVolumeId: snapInfo.ParentVolName, + CreatedAt: snapInfo.CreatedAt.Unix(), + SizeBytes: (int64(snapInfo.VolInfo.Capacity)) * utils.MB, + Status: &csi.SnapshotStatus{ + Type: csi.SnapshotStatus_READY, + }, + }, + }, nil + } + + snapReq := api.SnapCreateReq{ + VolName: req.SourceVolumeId, + SnapName: req.Name, + Force: true, + } + glog.V(2).Infof("snapshot request: %+v", snapReq) + snapResp, err := cs.client.SnapshotCreate(snapReq) + if err != nil { + glog.Errorf("failed to create snapshot %v", err) + return nil, status.Errorf(codes.Internal, "CreateSnapshot - snapshot create failed %s", err.Error()) + } + + actReq := api.SnapActivateReq{ + Force: true, + } + err = cs.client.SnapshotActivate(actReq, req.Name) + if err != nil { + glog.Errorf("failed to activate snapshot %v", err) + return nil, status.Errorf(codes.Internal, "failed to activate snapshot %s", err.Error()) + } + return &csi.CreateSnapshotResponse{ + Snapshot: &csi.Snapshot{ + Id: snapResp.VolInfo.Name, + SourceVolumeId: snapResp.ParentVolName, + CreatedAt: snapResp.CreatedAt.Unix(), + SizeBytes: (int64(snapResp.VolInfo.Capacity)) * utils.MB, + Status: &csi.SnapshotStatus{ + Type: csi.SnapshotStatus_READY, + }, + }, + }, nil +} + +func (cs *ControllerServer) validateCreateSnapshotReq(req *csi.CreateSnapshotRequest) error { + if req == nil { + return status.Errorf(codes.InvalidArgument, "CreateSnapshot request is nil") + } + if req.GetName() == "" { + return status.Error(codes.InvalidArgument, "CreateSnapshot - name cannot be nil") + } + + if req.GetSourceVolumeId() == "" { + return status.Error(codes.InvalidArgument, "CreateSnapshot - sourceVolumeId is nil") + } + if req.GetName() == req.GetSourceVolumeId() { + //In glusterd2 we cannot create a snapshot as same name as volume name + return status.Error(codes.InvalidArgument, "CreateSnapshot - sourceVolumeId and snapshot name cannot be same") + } + return nil } // DeleteSnapshot delete provided snapshot of a PV func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { - return nil, status.Error(codes.Unimplemented, "") + if req == nil { + return nil, status.Errorf(codes.InvalidArgument, "DeleteSnapshot request is nil") + } + if req.GetSnapshotId() == "" { + return nil, status.Error(codes.InvalidArgument, "DeleteSnapshot - snapshotId is empty") + } + glog.V(4).Infof("deleting snapshot %s", req.GetSnapshotId()) + + err := cs.client.SnapshotDeactivate(req.GetSnapshotId()) + if err != nil { + errResp := cs.client.LastErrorResponse() + if errResp != nil && errResp.StatusCode == http.StatusNotFound { + return &csi.DeleteSnapshotResponse{}, nil + } + + if err.Error() != gd2Error.ErrSnapDeactivated.Error() { + glog.Errorf("failed to deactivate snapshot %v", err) + return nil, status.Errorf(codes.Internal, "DeleteSnapshot - failed to deactivate snapshot %s", err.Error()) + } + + } + err = cs.client.SnapshotDelete(req.SnapshotId) + if err != nil { + errResp := cs.client.LastErrorResponse() + if errResp != nil && errResp.StatusCode == http.StatusNotFound { + return &csi.DeleteSnapshotResponse{}, nil + } + glog.Errorf("failed to delete snapshot %v", err) + return nil, status.Errorf(codes.Internal, "DeleteSnapshot - failed to delete snapshot %s", err.Error()) + } + return &csi.DeleteSnapshotResponse{}, nil } // ListSnapshots list the snapshots of a PV func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - return nil, status.Error(codes.Unimplemented, "") + var ( + snaplist api.SnapListResp + err error + startToken int32 + ) + if req.GetStartingToken() != "" { + i, parseErr := strconv.ParseUint(req.StartingToken, 10, 32) + if parseErr != nil { + return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error()) + } + startToken = int32(i) + } + + if len(req.GetSnapshotId()) != 0 { + return cs.listSnapshotFromID(req.GetSnapshotId()) + } + + //If volume id is sent + if len(req.GetSourceVolumeId()) != 0 { + snaplist, err = cs.client.SnapshotList(req.SourceVolumeId) + if err != nil { + errResp := cs.client.LastErrorResponse() + if errResp != nil && errResp.StatusCode == http.StatusNotFound { + resp := csi.ListSnapshotsResponse{} + return &resp, nil + } + glog.Errorf("failed to list snapshots %v", err) + return nil, status.Errorf(codes.Internal, "ListSnapshot - failed to get snapshots %s", err.Error()) + } + } else { + //Get all snapshots + snaplist, err = cs.client.SnapshotList("") + if err != nil { + glog.Errorf("failed to list snapshots %v", err) + return nil, status.Errorf(codes.Internal, "failed to get snapshots %s", err.Error()) + } + } + + return cs.doPagination(req, snaplist, startToken) +} + +func (cs *ControllerServer) listSnapshotFromID(snapID string) (*csi.ListSnapshotsResponse, error) { + var entries []*csi.ListSnapshotsResponse_Entry + snap, err := cs.GfDriver.client.SnapshotInfo(snapID) + if err != nil { + errResp := cs.client.LastErrorResponse() + if errResp != nil && errResp.StatusCode == http.StatusNotFound { + resp := csi.ListSnapshotsResponse{} + return &resp, nil + } + glog.Errorf("failed to get snapshot info %v", err) + return nil, status.Errorf(codes.NotFound, "ListSnapshot - failed to get snapshot info %s", err.Error()) + + } + entries = append(entries, &csi.ListSnapshotsResponse_Entry{ + Snapshot: &csi.Snapshot{ + Id: snap.VolInfo.Name, + SourceVolumeId: snap.ParentVolName, + CreatedAt: snap.CreatedAt.Unix(), + SizeBytes: (int64(snap.VolInfo.Capacity)) * utils.MB, + Status: &csi.SnapshotStatus{ + Type: csi.SnapshotStatus_READY, + }, + }, + }) + + resp := csi.ListSnapshotsResponse{} + resp.Entries = entries + return &resp, nil + +} +func (cs *ControllerServer) doPagination(req *csi.ListSnapshotsRequest, snapList api.SnapListResp, startToken int32) (*csi.ListSnapshotsResponse, error) { + if req.GetStartingToken() != "" && int(startToken) > len(snapList) { + return nil, status.Error(codes.Aborted, "invalid starting token") + } + + var entries []*csi.ListSnapshotsResponse_Entry + for _, snap := range snapList { + for _, s := range snap.SnapList { + entries = append(entries, &csi.ListSnapshotsResponse_Entry{ + Snapshot: &csi.Snapshot{ + Id: s.VolInfo.Name, + SourceVolumeId: snap.ParentName, + CreatedAt: s.CreatedAt.Unix(), + SizeBytes: (int64(s.VolInfo.Capacity)) * utils.MB, + Status: &csi.SnapshotStatus{ + Type: csi.SnapshotStatus_READY, + }, + }, + }) + } + + } + + //TODO need to remove paginating code once glusterd2 issue + //https://github.com/gluster/glusterd2/issues/372 is merged + var ( + maximumEntries = req.MaxEntries + nextToken int32 + remainingEntries = int32(len(snapList)) - startToken + resp csi.ListSnapshotsResponse + ) + + if maximumEntries == 0 || maximumEntries > remainingEntries { + maximumEntries = remainingEntries + } + + resp.Entries = entries[startToken : startToken+maximumEntries] + + if nextToken = startToken + maximumEntries; nextToken < int32(len(snapList)) { + resp.NextToken = fmt.Sprintf("%d", nextToken) + } + return &resp, nil } diff --git a/pkg/glusterfs/driver_test.go b/pkg/glusterfs/driver_test.go index 9c8b93c5d..cc689a5c0 100644 --- a/pkg/glusterfs/driver_test.go +++ b/pkg/glusterfs/driver_test.go @@ -8,6 +8,7 @@ import ( "os" "strings" "testing" + "time" "github.com/gluster/gluster-csi-driver/pkg/glusterfs/utils" @@ -18,7 +19,12 @@ import ( "k8s.io/kubernetes/pkg/util/mount" ) -var volumeCache = make(map[string]uint64) +type volume struct { + Size uint64 + snapList []string +} + +var volumeCache = make(map[string]volume) func TestDriverSuite(t *testing.T) { glusterMounter = &mount.FakeMounter{} @@ -38,15 +44,12 @@ func TestDriverSuite(t *testing.T) { switch r.Method { case "GET": handleGETRequest(w, r, t) - return case "DELETE": - w.WriteHeader(http.StatusNoContent) - return + handleDeleteRequest(w, r, t) case "POST": handlePOSTRequest(w, r, t) - return } })) @@ -127,7 +130,12 @@ func handleGETRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { Capacity: 1000, } writeResp(w, http.StatusOK, resp, t) - volumeCache["test1"] = 1000 + volumeCache["test1"] = volume{Size: 1000} + return + } + + if strings.HasPrefix(r.URL.String(), "/v1/snapshots") { + getSnapShots(w, r, t) return } @@ -138,7 +146,7 @@ func handleGETRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { Name: vol[2], Metadata: map[string]string{volumeOwnerAnn: glusterfsCSIDriverName}, State: api.VolStarted, - Capacity: volumeCache[vol[2]], + Capacity: volumeCache[vol[2]].Size, } writeResp(w, http.StatusOK, resp, t) return @@ -150,12 +158,107 @@ func handleGETRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { writeResp(w, http.StatusNotFound, resp, t) } +func getSnapShots(w http.ResponseWriter, r *http.Request, t *testing.T) { + if strings.Contains(r.URL.String(), "/v1/snapshots/") { + vol := strings.Split(strings.Trim(r.URL.String(), "/"), "/") + if getVolumeNameFromSnap(vol[2]) != "" { + var res api.SnapInfo + res.VolInfo.Name = vol[2] + res.CreatedAt = time.Now() + res.ParentVolName = getVolumeNameFromSnap(vol[2]) + writeResp(w, http.StatusOK, res, t) + return + } + resp := api.ErrorResp{} + resp.Errors = append(resp.Errors, api.HTTPError{ + Code: 1, + Message: "failed to get snapshot", + Fields: map[string]string{ + "failed": "failed", + }, + }) + writeResp(w, http.StatusNotFound, resp, t) + return + } + + if v, ok := r.URL.Query()["volume"]; ok { + if getSnapNameFromVol(v[0]) == "" { + writeResp(w, http.StatusOK, api.SnapListResp{}, t) + return + } + res := make(api.SnapListResp, 0) + snapList := api.SnapList{} + for _, snap := range volumeCache[v[0]].snapList { + listresp := api.SnapInfo{} + listresp.VolInfo.Name = snap + listresp.ParentVolName = v[0] + listresp.CreatedAt = time.Now() + snapList.ParentName = v[0] + snapList.SnapList = append(snapList.SnapList, listresp) + } + res = append(res, snapList) + writeResp(w, http.StatusOK, res, t) + return + } + + if isSnapsPresent() { + res := make(api.SnapListResp, 0) + for vol, snap := range volumeCache { + snapList := api.SnapList{} + for _, s := range snap.snapList { + listresp := api.SnapInfo{} + listresp.VolInfo.Name = s + listresp.ParentVolName = vol + listresp.CreatedAt = time.Now() + snapList.ParentName = vol + snapList.SnapList = append(snapList.SnapList, listresp) + } + if snapList.ParentName != "" { + res = append(res, snapList) + } + } + writeResp(w, http.StatusOK, res, t) + return + } + res := make(api.SnapListResp, 1) + listresp := api.SnapInfo{} + listresp.VolInfo.Name = "snaptest1" + listresp.ParentVolName = "voleTest" + listresp.CreatedAt = time.Now() + res[0].ParentName = "volTest" + res[0].SnapList = append(res[0].SnapList, listresp) + volumeCache["volTest"] = volume{ + snapList: []string{"snaptest1"}, + } + writeResp(w, http.StatusOK, res, t) + +} func handlePOSTRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { if strings.HasSuffix(r.URL.String(), "start") || strings.HasSuffix(r.URL.String(), "stop") { w.WriteHeader(http.StatusOK) return } + if strings.HasSuffix(r.URL.String(), "activate") || strings.HasSuffix(r.URL.String(), "deactivate") { + w.WriteHeader(http.StatusOK) + return + } + if strings.HasPrefix(r.URL.String(), "/v1/snapshots") { + var resp api.SnapCreateResp + var req api.SnapCreateReq + defer r.Body.Close() + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + resp.VolInfo.Name = req.SnapName + resp.ParentVolName = req.VolName + resp.CreatedAt = time.Now() + volResp := volumeCache[req.VolName] + volResp.snapList = append(volResp.snapList, req.SnapName) + volumeCache[req.VolName] = volResp + writeResp(w, http.StatusCreated, resp, t) + } if strings.HasPrefix(r.URL.String(), "/v1/volumes") { var resp api.VolumeCreateResp @@ -163,16 +266,61 @@ func handlePOSTRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { defer r.Body.Close() json.NewDecoder(r.Body).Decode(&req) resp.Name = req.Name - volumeCache[req.Name] = req.Size + volumeCache[req.Name] = volume{Size: req.Size} writeResp(w, http.StatusCreated, resp, t) } } - +func handleDeleteRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { + if strings.HasPrefix(r.URL.String(), "/v1/snapshots") { + key := strings.Split(strings.Trim(r.URL.String(), "/"), "/") + deleteSnap(key[2]) + } + w.WriteHeader(http.StatusNoContent) +} func checkVolume(vol string) bool { _, ok := volumeCache[vol] return ok } +func isSnapsPresent() bool { + found := false + for _, value := range volumeCache { + if len(value.snapList) > 0 { + found = true + } + } + return found +} +func deleteSnap(snapname string) { + for key, value := range volumeCache { + for i, s := range value.snapList { + if s == snapname { + resp := volumeCache[key] + resp.snapList = append(resp.snapList[:i], resp.snapList[i+1:]...) + volumeCache[key] = resp + break + } + } + } +} +func getVolumeNameFromSnap(snap string) string { + for key, value := range volumeCache { + for _, s := range value.snapList { + if snap == s { + return key + } + } + } + return "" +} + +func getSnapNameFromVol(vol string) string { + if len(volumeCache[vol].snapList) > 0 { + return volumeCache[vol].snapList[0] + } + return "" +} + func writeResp(w http.ResponseWriter, status int, resp interface{}, t *testing.T) { w.WriteHeader(status) err := json.NewEncoder(w).Encode(&resp)