Skip to content
This repository was archived by the owner on Oct 21, 2020. It is now read-only.

Commit 559ed29

Browse files
author
Matthew Wong
authored
Merge pull request #770 from silenceshell/silenceshell-patch-1
cephfs support capacity
2 parents 2d6258a + c16153f commit 559ed29

File tree

2 files changed

+33
-14
lines changed

2 files changed

+33
-14
lines changed

ceph/cephfs/cephfs-provisioner.go

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,13 +61,16 @@ type cephFSProvisioner struct {
6161
identity string
6262
// Namespace secrets will be created in. If empty, secrets will be created in each PVC's namespace.
6363
secretNamespace string
64+
// enable PVC quota
65+
enableQuota bool
6466
}
6567

66-
func newCephFSProvisioner(client kubernetes.Interface, id string, secretNamespace string) controller.Provisioner {
68+
func newCephFSProvisioner(client kubernetes.Interface, id string, secretNamespace string, enableQuota bool) controller.Provisioner {
6769
return &cephFSProvisioner{
6870
client: client,
6971
identity: id,
7072
secretNamespace: secretNamespace,
73+
enableQuota: enableQuota,
7174
}
7275
}
7376

@@ -129,7 +132,13 @@ func (p *cephFSProvisioner) Provision(options controller.VolumeOptions) (*v1.Per
129132
}
130133
// provision share
131134
// create cmd
132-
cmd := exec.Command(provisionCmd, "-n", share, "-u", user)
135+
args := []string{"-n", share, "-u", user}
136+
if p.enableQuota {
137+
capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
138+
requestBytes := strconv.FormatInt(capacity.Value(), 10)
139+
args = append(args, "-s", requestBytes)
140+
}
141+
cmd := exec.Command(provisionCmd, args...)
133142
// set env
134143
cmd.Env = []string{
135144
"CEPH_CLUSTER_NAME=" + cluster,
@@ -186,7 +195,10 @@ func (p *cephFSProvisioner) Provision(options controller.VolumeOptions) (*v1.Per
186195
Spec: v1.PersistentVolumeSpec{
187196
PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy,
188197
AccessModes: options.PVC.Spec.AccessModes,
189-
Capacity: v1.ResourceList{ //FIXME: kernel cephfs doesn't enforce quota, capacity is not meaningless here.
198+
Capacity: v1.ResourceList{
199+
// Quotas are supported by the userspace client(ceph-fuse, libcephfs), or kernel client >= 4.17 but only on mimic clusters.
200+
// In other cases capacity is meaningless here.
201+
// If quota is enabled, provisioner will set ceph.quota.max_bytes on volume path.
190202
v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
191203
},
192204
PersistentVolumeSource: v1.PersistentVolumeSource{
@@ -337,6 +349,7 @@ var (
337349
kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig")
338350
id = flag.String("id", "", "Unique provisioner identity")
339351
secretNamespace = flag.String("secret-namespace", "", "Namespace secrets will be created in (default: '', created in each PVC's namespace)")
352+
enableQuota = flag.Bool("enable-quota", false, "Enable PVC quota")
340353
)
341354

342355
func main() {
@@ -387,7 +400,7 @@ func main() {
387400
// Create the provisioner: it implements the Provisioner interface expected by
388401
// the controller
389402
glog.Infof("Creating CephFS provisioner %s with identity: %s, secret namespace: %s", prName, prID, *secretNamespace)
390-
cephFSProvisioner := newCephFSProvisioner(clientset, prID, *secretNamespace)
403+
cephFSProvisioner := newCephFSProvisioner(clientset, prID, *secretNamespace, *enableQuota)
391404

392405
// Start the provision controller which will dynamically provision cephFS
393406
// PVs

ceph/cephfs/cephfs_provisioner/cephfs_provisioner.py

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -84,15 +84,15 @@ def volume_client(self):
8484
cluster_name = os.environ["CEPH_CLUSTER_NAME"]
8585
except KeyError:
8686
cluster_name = "ceph"
87-
try:
87+
try:
8888
mons = os.environ["CEPH_MON"]
8989
except KeyError:
9090
raise ValueError("Missing CEPH_MON env")
9191
try:
9292
auth_id = os.environ["CEPH_AUTH_ID"]
9393
except KeyError:
9494
raise ValueError("Missing CEPH_AUTH_ID")
95-
try:
95+
try:
9696
auth_key = os.environ["CEPH_AUTH_KEY"]
9797
except:
9898
raise ValueError("Missing CEPH_AUTH_KEY")
@@ -297,34 +297,40 @@ def __del__(self):
297297
self._volume_client.disconnect()
298298
self._volume_client = None
299299

300+
def usage():
301+
print "Usage: " + sys.argv[0] + " --remove -n share_name -u ceph_user_id -s size"
302+
300303
def main():
301304
create = True
302305
share = ""
303306
user = ""
307+
size = None
304308
cephfs = CephFSNativeDriver()
305309
try:
306-
opts, args = getopt.getopt(sys.argv[1:], "rn:u:", ["remove"])
310+
opts, args = getopt.getopt(sys.argv[1:], "rn:u:s:", ["remove"])
307311
except getopt.GetoptError:
308-
print "Usage: " + sys.argv[0] + " --remove -n share_name -u ceph_user_id"
312+
usage()
309313
sys.exit(1)
310314

311315
for opt, arg in opts:
312316
if opt == '-n':
313317
share = arg
314318
elif opt == '-u':
315319
user = arg
320+
elif opt == '-s':
321+
size = arg
316322
elif opt in ("-r", "--remove"):
317323
create = False
318324

319325
if share == "" or user == "":
320-
print "Usage: " + sys.argv[0] + " --remove -n share_name -u ceph_user_id"
326+
usage()
321327
sys.exit(1)
322328

323-
if create == True:
324-
print cephfs.create_share(share, user)
329+
if create:
330+
print cephfs.create_share(share, user, size=size)
325331
else:
326-
cephfs.delete_share(share, user)
327-
328-
332+
cephfs.delete_share(share, user)
333+
334+
329335
if __name__ == "__main__":
330336
main()

0 commit comments

Comments
 (0)