如果需要使用rbd作为后端存储的话,需要先安装ceph-common
需要提前在ceph集群上创建pool,然后创建image
[root@ceph01 ~]# ceph osd pool create pool01[root@ceph01 ~]# ceph osd pool application enable pool01 rbd[root@ceph01 ~]# rbd pool init pool01[root@ceph01 ~]# rbd create pool01/test --size 10G --image-format 2 --image-feature layerin[root@ceph01 ~]# rbd info pool01/test
[root@ceph01 ~]# ceph osd pool create pool01
[root@ceph01 ~]# ceph osd pool application enable pool01 rbd
[root@ceph01 ~]# rbd pool init pool01
[root@ceph01 ~]# rbd create pool01/test --size 10G --image-format 2 --image-feature layerin
[root@ceph01 ~]# rbd info pool01/test
apiVersion: apps/v1kind: Deploymentmetadata: creationTimestamp: null labels: app: rbd name: rbdspec: replicas: 1 selector: matchLabels: app: rbd strategy: {} template: metadata: creationTimestamp: null labels: app: rbd spec: volumes: - name: test rbd: fsType: xfs keyring: /root/admin.keyring monitors: - 192.168.200.230:6789 pool: pool01 image: test user: admin readOnly: false containers: - image: nginx imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /usr/share/nginx/html name: test name: nginx resources: {}status: {}
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: rbd
name: rbd
spec:
replicas: 1
selector:
matchLabels:
strategy: {}
template:
volumes:
- name: test
rbd:
fsType: xfs
keyring: /root/admin.keyring
monitors:
- 192.168.200.230:6789
pool: pool01
image: test
user: admin
readOnly: false
containers:
- image: nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: test
name: nginx
resources: {}
status: {}
[root@master ~]# kubectl get podsNAME READY STATUS RESTARTS AGErbd-888b8b747-n56wr 1/1 Running 0 26m
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
rbd-888b8b747-n56wr 1/1 Running 0 26m
这个时候k8s就使用了rbd作为存储
如果这个地方一直显示ContainerCreating的话,可能是没有安装ceph-common,也可能是你的keyring或者ceph.conf没有发放到node节点,具体可以使用describe来看
[root@master euler]# kubectl exec -it rbd-5db4759c-nj2b4 -- bashroot@rbd-5db4759c-nj2b4:/# df -hT |grep /dev/rbd0/dev/rbd0 xfs 10G 105M 9.9G 2% /usr/share/nginx/html
[root@master euler]# kubectl exec -it rbd-5db4759c-nj2b4 -- bash
root@rbd-5db4759c-nj2b4:/# df -hT |grep /dev/rbd0
/dev/rbd0 xfs 10G 105M 9.9G 2% /usr/share/nginx/html
可以看到,/dev/rbd0已经被格式化成xfs并且挂载到了/usr/share/nginx/html
root@rbd-5db4759c-nj2b4:/usr/share/nginx# cd html/root@rbd-5db4759c-nj2b4:/usr/share/nginx/html# lsroot@rbd-5db4759c-nj2b4:/usr/share/nginx/html# echo 123 > index.htmlroot@rbd-5db4759c-nj2b4:/usr/share/nginx/html# chmod 644 index.htmlroot@rbd-5db4759c-nj2b4:/usr/share/nginx/html# exit[root@master euler]# kubectl get pods -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESrbd-5db4759c-nj2b4 1/1 Running 0 8m5s 192.168.166.131 node1 <none> <none>
root@rbd-5db4759c-nj2b4:/usr/share/nginx# cd html/
root@rbd-5db4759c-nj2b4:/usr/share/nginx/html# ls
root@rbd-5db4759c-nj2b4:/usr/share/nginx/html# echo 123 > index.html
root@rbd-5db4759c-nj2b4:/usr/share/nginx/html# chmod 644 index.html
root@rbd-5db4759c-nj2b4:/usr/share/nginx/html# exit
[root@master euler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
rbd-5db4759c-nj2b4 1/1 Running 0 8m5s 192.168.166.131 node1 <none> <none>
访问容器查看内容
[root@master euler]# curl 192.168.166.131123
[root@master euler]# curl 192.168.166.131
123
内容可以正常被访问到,我们将容器删除,然后让他自己重新启动一个来看看文件是否还存在
[root@master euler]# kubectl delete pods rbd-5db4759c-nj2b4 pod "rbd-5db4759c-nj2b4" deleted[root@master euler]# kubectl get podsNAME READY STATUS RESTARTS AGErbd-5db4759c-v9cgm 0/1 ContainerCreating 0 2s[root@master euler]# kubectl get pods -owideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESrbd-5db4759c-v9cgm 1/1 Running 0 40s 192.168.166.132 node1 <none> <none>[root@master euler]# curl 192.168.166.132123
[root@master euler]# kubectl delete pods rbd-5db4759c-nj2b4
pod "rbd-5db4759c-nj2b4" deleted
[root@master euler]# kubectl get pods
rbd-5db4759c-v9cgm 0/1 ContainerCreating 0 2s
[root@master euler]# kubectl get pods -owide
rbd-5db4759c-v9cgm 1/1 Running 0 40s 192.168.166.132 node1 <none> <none>
[root@master euler]# curl 192.168.166.132
可以看到,也是没有问题的,这样k8s就正常的使用了rbd存储
有一个问题,那就是开发人员他们并不是很了解yaml文件里面改怎么去写挂载,每种类型的存储都是不同的写法,那有没有一种方式屏蔽底层的写法,直接告诉k8s集群我想要一个什么样的存储呢?
有的,那就是pv
[root@master euler]# vim pvc.yaml apiVersion: v1kind: PersistentVolumeClaimmetadata: name: myclaimspec: accessModes: - ReadWriteOnce volumeMode: Block resources: requests: storage: 8Gi
[root@master euler]# vim pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
name: myclaim
accessModes:
- ReadWriteOnce
volumeMode: Block
resources:
requests:
storage: 8Gi
这里的pvc使用的是块设备,8个G,目前还没有这个pv可以给到他
具体的这里不细说,CKA里面有写
注意,这里是pvc,并不是pv,pvc就是开发人员定义想要的存储类型,大小,然后我可以根据你的pvc去给你创建pv,或者提前创建好pv你直接申领
[root@master euler]# vim pv.yaml apiVersion: v1kind: PersistentVolumemetadata: name: rbdpvspec: capacity: storage: 8Gi volumeMode: Block accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Recycle mountOptions: - hard - nfsvers=4.1 rbd: fsType: xfs image: test keyring: /etc/ceph/ceph.client.admin.keyring monitors: - 172.16.1.33 pool: rbd readOnly: false user: admin
[root@master euler]# vim pv.yaml
kind: PersistentVolume
name: rbdpv
capacity:
persistentVolumeReclaimPolicy: Recycle
mountOptions:
- hard
- nfsvers=4.1
keyring: /etc/ceph/ceph.client.admin.keyring
- 172.16.1.33
pool: rbd
[root@master euler]# kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEmyclaim Bound rbdpv 8Gi RWO 11s
[root@master euler]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
myclaim Bound rbdpv 8Gi RWO 11s
这个时候pv和就和pvc绑定上了,一个pv只能绑定一个pvc,同样,一个pvc也只能绑定一个pv
[root@master euler]# vim pod-pvc.yaml apiVersion: v1kind: Podmetadata: creationTimestamp: null labels: run: pvc-pod name: pvc-podspec: volumes: - name: rbd persistentVolumeClaim: claimName: myclaim readOnly: false containers: - image: nginx imagePullPolicy: IfNotPresent name: pvc-pod volumeDevices: # 因为是使用的块设备,所以这里是volumeDevices - devicePath: /dev/rbd0 name: rbd resources: {} dnsPolicy: ClusterFirst restartPolicy: Alwaysstatus: {}~
[root@master euler]# vim pod-pvc.yaml
kind: Pod
run: pvc-pod
name: pvc-pod
- name: rbd
persistentVolumeClaim:
claimName: myclaim
volumeDevices: # 因为是使用的块设备,所以这里是volumeDevices
- devicePath: /dev/rbd0
dnsPolicy: ClusterFirst
restartPolicy: Always
~
[root@master euler]# kubectl get podsNAME READY STATUS RESTARTS AGEpvc-pod 1/1 Running 0 2m5srbd-5db4759c-v9cgm 1/1 Running 0 39m
pvc-pod 1/1 Running 0 2m5s
rbd-5db4759c-v9cgm 1/1 Running 0 39m
root@pvc-pod:/# ls /dev/rbd0/dev/rbd0
root@pvc-pod:/# ls /dev/rbd0
/dev/rbd0
可以看到,现在rbd0已经存在于容器内部了
这样做我们每次创建pvc都需要创建对应的pv,我们可以使用动态制备
使用storageClass,但是目前欧拉使用的k8s太老了,所以需要下载欧拉fork的一个storageClass
[root@master ~]# git clone https://gitee.com/yftyxa/ceph-csi.git[root@master ~]# cd ceph-csi/deploy/[root@master deploy]# lsceph-conf.yaml csi-config-map-sample.yaml rbdcephcsi Makefile scc.yamlcephfs nfs service-monitor.yaml
[root@master ~]# git clone https://gitee.com/yftyxa/ceph-csi.git
[root@master ~]# cd ceph-csi/deploy/
[root@master deploy]# ls
ceph-conf.yaml csi-config-map-sample.yaml rbd
cephcsi Makefile scc.yaml
cephfs nfs service-monitor.yaml
我们需要修改/root/ceph-csi/deploy/rbd/kubernetes/csi-config-map.yaml
# 先创建一个csi命名空间[root@master ~]# kubectl create ns csi
# 先创建一个csi命名空间
[root@master ~]# kubectl create ns csi
修改文件内容
[root@master kubernetes]# vim csi-rbdplugin-provisioner.yaml# 将第63行的内容改为false63 - "--extra-create-metadata=false"# 修改第二个文件[root@master kubernetes]# vim csi-config-map.yaml apiVersion: v1kind: ConfigMapmetadata: name: "ceph-csi-config"data: config.json: |- [ { "clusterID": "c1f213ae-2de3-11ef-ae15-00163e179ce3", "monitors": ["172.16.1.33","172.16.1.32","172.16.1.31"] }]
[root@master kubernetes]# vim csi-rbdplugin-provisioner.yaml
# 将第63行的内容改为false
63 - "--extra-create-metadata=false"
# 修改第二个文件
[root@master kubernetes]# vim csi-config-map.yaml
kind: ConfigMap
name: "ceph-csi-config"
data:
config.json: |-
[
{
"clusterID": "c1f213ae-2de3-11ef-ae15-00163e179ce3",
"monitors": ["172.16.1.33","172.16.1.32","172.16.1.31"]
}
]
修改第三个文件
[root@master kubernetes]# vim csidriver.yaml ---apiVersion: storage.k8s.io/v1kind: CSIDrivermetadata: name: "rbd.csi.ceph.com"spec: attachRequired: true podInfoOnMount: false # seLinuxMount: true # 将这一行注释 fsGroupPolicy: File
[root@master kubernetes]# vim csidriver.yaml
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
name: "rbd.csi.ceph.com"
attachRequired: true
podInfoOnMount: false
# seLinuxMount: true # 将这一行注释
fsGroupPolicy: File
自行编写一个文件
[root@master kubernetes]# vim csi-kms-config-map.yaml ---apiVersion: v1kind: ConfigMapmetadata: name: ceph-csi-encryption-kms-configdata: config-json: |- {}
[root@master kubernetes]# vim csi-kms-config-map.yaml
name: ceph-csi-encryption-kms-config
config-json: |-
{}
[root@ceph001 ~]# cat /etc/ceph/ceph.client.admin.keyring [client.admin] key = AQC4QnJmng4HIhAA42s27yOflqOBNtEWDgEmkg== caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *"
[root@ceph001 ~]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQC4QnJmng4HIhAA42s27yOflqOBNtEWDgEmkg==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
然后自行编写一个csi-secret.yaml的文件
[root@master kubernetes]# vim csi-secret.yamlapiVersion: v1kind: Secretmetadata: name: csi-secretstringData: userID: admin userKey: AQC4QnJmng4HIhAA42s27yOflqOBNtEWDgEmkg== adminID: admin adminKey: AQC4QnJmng4HIhAA42s27yOflqOBNtEWDgEmkg== [root@master kubernetes]# kubectl apply -f csi-secret.yaml -n csisecret/csi-secret created[root@master kubernetes]# cd ../../[root@master deploy]# kubectl apply -f ceph-conf.yaml -n csiconfigmap/ceph-config created[root@master deploy]# cd -/root/ceph-csi/deploy/rbd/kubernetes
[root@master kubernetes]# vim csi-secret.yaml
kind: Secret
name: csi-secret
stringData:
userID: admin
userKey: AQC4QnJmng4HIhAA42s27yOflqOBNtEWDgEmkg==
adminID: admin
adminKey: AQC4QnJmng4HIhAA42s27yOflqOBNtEWDgEmkg==
[root@master kubernetes]# kubectl apply -f csi-secret.yaml -n csi
secret/csi-secret created
[root@master kubernetes]# cd ../../
[root@master deploy]# kubectl apply -f ceph-conf.yaml -n csi
configmap/ceph-config created
[root@master deploy]# cd -
/root/ceph-csi/deploy/rbd/kubernetes
[root@master kubernetes]# sed -i "s/namespace: default/namespace: csi/g" *.yaml
[root@master kubernetes]# sed -i "s/namespace: default/namespace: csi/g"
*.yaml
[root@master kubernetes]# [root@master kubernetes]# kubectl apply -f . -n csi
注意:如果你的worker节点数量少于3个的话,是需要将 csi-rbdplugin-provisioner.yaml这个文件里面的replicas改小一点的。
[root@master kubernetes]# kubectl get pods -n csiNAME READY STATUS RESTARTS AGEcsi-rbdplugin-cv455 3/3 Running 1 (2m14s ago) 2m46scsi-rbdplugin-pf5ld 3/3 Running 0 4m36scsi-rbdplugin-provisioner-6846c4df5f-dvqqk 7/7 Running 0 4m36scsi-rbdplugin-provisioner-6846c4df5f-nmcxf 7/7 Running 1 (2m11s ago) 4m36s
[root@master kubernetes]# kubectl get pods -n csi
csi-rbdplugin-cv455 3/3 Running 1 (2m14s ago) 2m46s
csi-rbdplugin-pf5ld 3/3 Running 0 4m36s
csi-rbdplugin-provisioner-6846c4df5f-dvqqk 7/7 Running 0 4m36s
csi-rbdplugin-provisioner-6846c4df5f-nmcxf 7/7 Running 1 (2m11s ago) 4m36s
[root@master rbd]# /root/ceph-csi/examples/rbd[root@master rbd]# grep -Ev "\s*#|^$" storageclass.yaml ---apiVersion: storage.k8s.io/v1kind: StorageClassmetadata: name: csi-rbd-scprovisioner: rbd.csi.ceph.comparameters: clusterID: <cluster-id> pool: <rbd-pool-name> imageFeatures: "layering" csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret csi.storage.k8s.io/provisioner-secret-namespace: default csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret csi.storage.k8s.io/controller-expand-secret-namespace: default csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret csi.storage.k8s.io/node-stage-secret-namespace: default csi.storage.k8s.io/fstype: ext4reclaimPolicy: DeleteallowVolumeExpansion: truemountOptions: - discard
[root@master rbd]# /root/ceph-csi/examples/rbd
[root@master rbd]# grep -Ev "\s*#|^$" storageclass.yaml
kind: StorageClass
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: <cluster-id>
pool: <rbd-pool-name>
imageFeatures: "layering"
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
- discard
将这里的内容复制出来
---apiVersion: storage.k8s.io/v1kind: StorageClassmetadata: name: csi-rbd-scprovisioner: rbd.csi.ceph.comparameters: clusterID: c1f213ae-2de3-11ef-ae15-00163e179ce3 pool: rbd imageFeatures: "layering" csi.storage.k8s.io/provisioner-secret-name: csi-secret csi.storage.k8s.io/provisioner-secret-namespace: csi csi.storage.k8s.io/controller-expand-secret-name: csi-secret csi.storage.k8s.io/controller-expand-secret-namespace: csi csi.storage.k8s.io/node-stage-secret-name: csi-secret csi.storage.k8s.io/node-stage-secret-namespace: csi csi.storage.k8s.io/fstype: ext4reclaimPolicy: RetainallowVolumeExpansion: truemountOptions: - discard
clusterID: c1f213ae-2de3-11ef-ae15-00163e179ce3
csi.storage.k8s.io/provisioner-secret-name: csi-secret
csi.storage.k8s.io/provisioner-secret-namespace: csi
csi.storage.k8s.io/controller-expand-secret-name: csi-secret
csi.storage.k8s.io/controller-expand-secret-namespace: csi
csi.storage.k8s.io/node-stage-secret-name: csi-secret
csi.storage.k8s.io/node-stage-secret-namespace: csi
reclaimPolicy: Retain
修改成这个样子,这里面的clusterID改成自己的,secret-name自己查一下
[root@master euler]# cp pvc.yaml sc-pvc.yaml[root@master euler]# vim sc-pvc.yamlapiVersion: v1kind: PersistentVolumeClaimmetadata: name: sc-pvcspec: accessModes: - ReadWriteOnce volumeMode: Block storageClassName: "csi-rbd-sc" resources: requests: storage: 15Gi
[root@master euler]# cp pvc.yaml sc-pvc.yaml
[root@master euler]# vim sc-pvc.yaml
name: sc-pvc
storageClassName: "csi-rbd-sc"
storage: 15Gi
现在我们只需要创建pvc,他就自己可以创建pv了
[root@master euler]# kubectl apply -f sc-pvc.yaml persistentvolumeclaim/sc-pvc created[root@master euler]# kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEmyclaim Bound rbdpv 8Gi RWO 111msc-pvc Bound pvc-dfe3497f-9ed7-4961-9265-9e7242073c28 15Gi RWO csi-rbd-sc 2s
[root@master euler]# kubectl apply -f sc-pvc.yaml
persistentvolumeclaim/sc-pvc created
myclaim Bound rbdpv 8Gi RWO 111m
sc-pvc Bound pvc-dfe3497f-9ed7-4961-9265-9e7242073c28 15Gi RWO csi-rbd-sc 2s
回到ceph集群查看rbd
[root@ceph001 ~]# rbd lscsi-vol-56e37046-b9d7-4ef1-a534-970a766744f3test[root@ceph001 ~]# rbd info csi-vol-56e37046-b9d7-4ef1-a534-970a766744f3rbd image 'csi-vol-56e37046-b9d7-4ef1-a534-970a766744f3': size 15 GiB in 3840 objects order 22 (4 MiB objects) snapshot_count: 0 id: 38019ee708da block_name_prefix: rbd_data.38019ee708da format: 2 features: layering op_features: flags: create_timestamp: Wed Jun 19 04:55:35 2024 access_timestamp: Wed Jun 19 04:55:35 2024 modify_timestamp: Wed Jun 19 04:55:35 2024
[root@ceph001 ~]# rbd ls
csi-vol-56e37046-b9d7-4ef1-a534-970a766744f3
test
[root@ceph001 ~]# rbd info csi-vol-56e37046-b9d7-4ef1-a534-970a766744f3
rbd image 'csi-vol-56e37046-b9d7-4ef1-a534-970a766744f3':
size 15 GiB in 3840 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 38019ee708da
block_name_prefix: rbd_data.38019ee708da
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Jun 19 04:55:35 2024
access_timestamp: Wed Jun 19 04:55:35 2024
modify_timestamp: Wed Jun 19 04:55:35 2024
如果不设置为默认的话,每次写yaml文件都需要指定sc,将sc设为默认的话就不用每次都指定了
[root@master euler]# kubectl edit sc csi-rbd-sc# 在注释里面写入这一行 annotations: storageclass.kubernetes.io/is-default-class: "true"
[root@master euler]# kubectl edit sc csi-rbd-sc
# 在注释里面写入这一行
annotations:
storageclass.kubernetes.io/is-default-class: "true"
[root@master euler]# kubectl get scNAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGEcsi-rbd-sc (default) rbd.csi.ceph.com Retain Immediate true 29m
[root@master euler]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
csi-rbd-sc (default) rbd.csi.ceph.com Retain Immediate true 29m
再去查看sc就会有一个default的显示
[root@master euler]# cp sc-pvc.yaml sc-pvc1.yaml [root@master euler]# cat sc-pvc1.yaml apiVersion: v1kind: PersistentVolumeClaimmetadata: name: sc-pvc1spec: accessModes: - ReadWriteOnce volumeMode: Block resources: requests: storage: 20Gi
[root@master euler]# cp sc-pvc.yaml sc-pvc1.yaml
[root@master euler]# cat sc-pvc1.yaml
name: sc-pvc1
storage: 20Gi
这个文件里面是没有指定storageClassName的
[root@master euler]# kubectl apply -f sc-pvc1.yaml persistentvolumeclaim/sc-pvc1 created[root@master euler]# kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEmyclaim Bound rbdpv 8Gi RWO 138msc-pvc Bound pvc-dfe3497f-9ed7-4961-9265-9e7242073c28 15Gi RWO csi-rbd-sc 27msc-pvc1 Bound pvc-167cf73b-4983-4c28-aa98-bb65bb966649 20Gi RWO csi-rbd-sc 6s
[root@master euler]# kubectl apply -f sc-pvc1.yaml
persistentvolumeclaim/sc-pvc1 created
myclaim Bound rbdpv 8Gi RWO 138m
sc-pvc Bound pvc-dfe3497f-9ed7-4961-9265-9e7242073c28 15Gi RWO csi-rbd-sc 27m
sc-pvc1 Bound pvc-167cf73b-4983-4c28-aa98-bb65bb966649 20Gi RWO csi-rbd-sc 6s
这样就好了
本文来自博客园,作者:FuShudi,转载请注明原文链接:https://www.cnblogs.com/fsdstudy/p/18254695
原文链接:https://www.cnblogs.com/fsdstudy/p/18254695
本站QQ群:前端 618073944 | Java 606181507 | Python 626812652 | C/C++ 612253063 | 微信 634508462 | 苹果 692586424 | C#/.net 182808419 | PHP 305140648 | 运维 608723728