Skip to content

Commit

Permalink
Merge pull request #247 from linkernetworks/phstsai/VX-264
Browse files Browse the repository at this point in the history
[Task] VX-264: Need a PV for NFS (#246)
  • Loading branch information
John-Lin authored Aug 16, 2018
2 parents a50bec7 + a1ff08b commit 1298156
Show file tree
Hide file tree
Showing 8 changed files with 85 additions and 32 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ before_script:
# # Enable rbac.
- kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default
# Launch apps (mongodb, prometheus)
- make apps.launch
- make apps.launch-testing
# Check if prometheus is ready
- until curl --connect-timeout 1 -sL -w "%{http_code}\\n" http://`kubectl get service -n vortex prometheus -o jsonpath="{.spec.clusterIP}"`:9090/api/v1/query?query=prometheus_build_info -o /dev/null | grep 200; do sleep 1; echo "wait the prometheus to be available"; kubectl get pods --all-namespaces; done
- until [ `kubectl -n vortex get --no-headers pods | awk '{c[$3]++}END{ print NR-c["Running"]}'` -eq 0 ]; do sleep 1; echo "wait all pod running"; kubectl -n vortex get pods; done
Expand Down
61 changes: 44 additions & 17 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -106,23 +106,50 @@ apps.init-helm:
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'

.PHONY: apps.launch
apps.launch:
yq -y .services deploy/helm/config/development.yaml | helm install --name vortex-services --debug --wait -f - deploy/helm/services
yq -y .apps deploy/helm/config/development.yaml | helm install --name vortex-apps --debug --wait -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.upgrade
apps.upgrade:
yq -y .services deploy/helm/config/development.yaml | helm upgrade vortex-services --debug -f - deploy/helm/services
yq -y .apps deploy/helm/config/development.yaml | helm upgrade vortex-apps --debug -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.teardown
apps.teardown:
ifeq ($(UNAME), Linux)
helm ls --short | xargs -i helm delete --purge {}
else ifeq ($(UNAME), Darwin)
helm ls --short | xargs helm delete --purge
endif
.PHONY: apps.launch-dev
apps.launch-dev:
yq -y .services deploy/helm/config/development.yaml | helm install --name vortex-services-dev --debug --wait -f - deploy/helm/services
yq -y .apps deploy/helm/config/development.yaml | helm install --name vortex-apps-dev --debug --wait -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.launch-prod
apps.launch-prod:
yq -y .services deploy/helm/config/production.yaml | helm install --name vortex-services-prod --debug --wait -f - deploy/helm/services
yq -y .apps deploy/helm/config/production.yaml | helm install --name vortex-apps-prod --debug --wait -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.launch-testing
apps.launch-testing:
yq -y .services deploy/helm/config/testing.yaml | helm install --name vortex-services-testing --debug --wait -f - deploy/helm/services
yq -y .apps deploy/helm/config/testing.yaml | helm install --name vortex-apps-testing --debug --wait -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.upgrade-dev
apps.upgrade-dev:
yq -y .services deploy/helm/config/development.yaml | helm upgrade vortex-services-dev --debug -f - deploy/helm/services
yq -y .apps deploy/helm/config/development.yaml | helm upgrade vortex-apps-dev --debug -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.upgrade-prod
apps.upgrade-prod:
yq -y .services deploy/helm/config/production.yaml | helm upgrade vortex-services-prod --debug -f - deploy/helm/services
yq -y .apps deploy/helm/config/production.yaml | helm upgrade vortex-apps-prod --debug -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.upgrade-testing
apps.upgrade-testing:
yq -y .services deploy/helm/config/testing.yaml | helm upgrade vortex-services-testing --debug -f - deploy/helm/services
yq -y .apps deploy/helm/config/testing.yaml | helm upgrade vortex-apps-testing --debug -f - --set vortex-server.controller.apiserverImageTag=$(SERVER_VERSION) deploy/helm/apps

.PHONY: apps.teardown-dev
apps.teardown-dev:
helm delete --purge vortex-services-dev
helm delete --purge vortex-apps-dev

.PHONY: apps.teardown-prod
apps.teardown-prod:
helm delete --purge vortex-services-prod
helm delete --purge vortex-apps-prod

.PHONY: apps.teardown-testing
apps.teardown-testing:
helm delete --purge vortex-services-testing
helm delete --purge vortex-apps-testing

## dockerfiles/ ########################################

Expand Down
14 changes: 10 additions & 4 deletions deploy/helm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ make apps.init-helm
```

### Using helm deploy apps
This will deploy mongodb and prometheus in your cluster
This will deploy services and apps in your cluster, and please set the CONFIG variable (could be development, production, or testing) right.
```
make apps.launch
make apps.launch-dev
```

If you wnat to deploy certain chart, you can type
```
helm install --debug --wait --set global.environment=<environmtneSetting> <chart path>
Expand All @@ -34,7 +35,12 @@ helm install --debug --wait --set global.environment=<environmtneSetting> <chart
helm install --debug --wait --set global.environment=local deploy/helm/apps/prometheus/charts/cadvisor
```

### Delete all release
### Upgrade the releases
```
make apps.upgrade-dev
```

### Delete the releases
```
make apps.teardown
make apps.teardown-dev
```
6 changes: 4 additions & 2 deletions deploy/helm/config/development.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,12 @@ services:
image:
tag: 4.1.1-xenial
pvc:
localVolume: true
volumeType: nfs
nfsPath: /nfsshare
nfsServer: 172.17.8.100
reclaimPolicy: Delete
accessModes: ReadWriteOnce
storageClass: mongo
storageClass:
storage: 1Gi
service:
nodePort: true
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,10 @@ services:
image:
tag: 4.1.1-xenial
pvc:
localVolume: false
reclaimPolicy: Retain
volumeType: nfs
nfsPath: /nfsshare
nfsServer: 172.17.8.100
reclaimPolicy: Recycle
accessModes: ReadWriteMany
storageClass: mongo
storage: 100Gi
Expand Down
2 changes: 1 addition & 1 deletion deploy/helm/config/testing.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ services:
image:
tag: 4.1.1-xenial
pvc:
localVolume: true
volumeType: local
reclaimPolicy: Delete
accessModes: ReadWriteOnce
storageClass: mongo
Expand Down
13 changes: 8 additions & 5 deletions deploy/helm/services/charts/mongodb/templates/pv.yaml
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@
{{- if .Values.controller.pvc.localVolume }}
---
kind: PersistentVolume
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongo
name: mongo-{{- .Values.controller.pvc.volumeType }}
spec:
storageClassName: {{ .Values.controller.pvc.storageClass }}
persistentVolumeReclaimPolicy: {{ .Values.controller.pvc.reclaimPolicy }}
capacity:
storage: {{ .Values.controller.pvc.storage }}
accessModes:
- {{ .Values.controller.pvc.accessModes }}
{{- if eq .Values.controller.pvc.volumeType "local"}}
hostPath:
path: "/data"
{{- end }}
{{- else if eq .Values.controller.pvc.volumeType "nfs" }}
nfs:
path: {{ .Values.controller.pvc.nfsPath }}
server: {{ .Values.controller.pvc.nfsServer }}
{{- end }}
13 changes: 13 additions & 0 deletions deploy/kubernetes/base/nfs/nfs-pv.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongo-storage
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /nfsshare
server: 10.14.1.100
persistentVolumeReclaimPolicy: Recycle

0 comments on commit 1298156

Please sign in to comment.