Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / fs / gfs2 / quota.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 /*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/mm.h>
42 #include <linux/spinlock.h>
43 #include <linux/completion.h>
44 #include <linux/buffer_head.h>
45 #include <linux/sort.h>
46 #include <linux/fs.h>
47 #include <linux/bio.h>
48 #include <linux/gfs2_ondisk.h>
49 #include <linux/kthread.h>
50 #include <linux/freezer.h>
51 #include <linux/quota.h>
52 #include <linux/dqblk_xfs.h>
53 #include <linux/lockref.h>
54 #include <linux/list_lru.h>
55
56 #include "gfs2.h"
57 #include "incore.h"
58 #include "bmap.h"
59 #include "glock.h"
60 #include "glops.h"
61 #include "log.h"
62 #include "meta_io.h"
63 #include "quota.h"
64 #include "rgrp.h"
65 #include "super.h"
66 #include "trans.h"
67 #include "inode.h"
68 #include "util.h"
69
70 struct gfs2_quota_change_host {
71 u64 qc_change;
72 u32 qc_flags; /* GFS2_QCF_... */
73 struct kqid qc_id;
74 };
75
76 /* Lock order: qd_lock -> qd->lockref.lock -> lru lock */
77 static DEFINE_SPINLOCK(qd_lock);
78 struct list_lru gfs2_qd_lru;
79
80 static void gfs2_qd_dispose(struct list_head *list)
81 {
82 struct gfs2_quota_data *qd;
83 struct gfs2_sbd *sdp;
84
85 while (!list_empty(list)) {
86 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
87 sdp = qd->qd_gl->gl_sbd;
88
89 list_del(&qd->qd_lru);
90
91 /* Free from the filesystem-specific list */
92 spin_lock(&qd_lock);
93 list_del(&qd->qd_list);
94 spin_unlock(&qd_lock);
95
96 gfs2_assert_warn(sdp, !qd->qd_change);
97 gfs2_assert_warn(sdp, !qd->qd_slot_count);
98 gfs2_assert_warn(sdp, !qd->qd_bh_count);
99
100 gfs2_glock_put(qd->qd_gl);
101 atomic_dec(&sdp->sd_quota_count);
102
103 /* Delete it from the common reclaim list */
104 kmem_cache_free(gfs2_quotad_cachep, qd);
105 }
106 }
107
108
109 static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
110 {
111 struct list_head *dispose = arg;
112 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
113
114 if (!spin_trylock(&qd->qd_lockref.lock))
115 return LRU_SKIP;
116
117 if (qd->qd_lockref.count == 0) {
118 lockref_mark_dead(&qd->qd_lockref);
119 list_move(&qd->qd_lru, dispose);
120 }
121
122 spin_unlock(&qd->qd_lockref.lock);
123 return LRU_REMOVED;
124 }
125
126 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
127 struct shrink_control *sc)
128 {
129 LIST_HEAD(dispose);
130 unsigned long freed;
131
132 if (!(sc->gfp_mask & __GFP_FS))
133 return SHRINK_STOP;
134
135 freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
136 &dispose, &sc->nr_to_scan);
137
138 gfs2_qd_dispose(&dispose);
139
140 return freed;
141 }
142
143 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
144 struct shrink_control *sc)
145 {
146 return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
147 }
148
149 struct shrinker gfs2_qd_shrinker = {
150 .count_objects = gfs2_qd_shrink_count,
151 .scan_objects = gfs2_qd_shrink_scan,
152 .seeks = DEFAULT_SEEKS,
153 .flags = SHRINKER_NUMA_AWARE,
154 };
155
156
157 static u64 qd2index(struct gfs2_quota_data *qd)
158 {
159 struct kqid qid = qd->qd_id;
160 return (2 * (u64)from_kqid(&init_user_ns, qid)) +
161 ((qid.type == USRQUOTA) ? 0 : 1);
162 }
163
164 static u64 qd2offset(struct gfs2_quota_data *qd)
165 {
166 u64 offset;
167
168 offset = qd2index(qd);
169 offset *= sizeof(struct gfs2_quota);
170
171 return offset;
172 }
173
174 static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
175 struct gfs2_quota_data **qdp)
176 {
177 struct gfs2_quota_data *qd;
178 int error;
179
180 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
181 if (!qd)
182 return -ENOMEM;
183
184 qd->qd_lockref.count = 1;
185 spin_lock_init(&qd->qd_lockref.lock);
186 qd->qd_id = qid;
187 qd->qd_slot = -1;
188 INIT_LIST_HEAD(&qd->qd_lru);
189
190 error = gfs2_glock_get(sdp, qd2index(qd),
191 &gfs2_quota_glops, CREATE, &qd->qd_gl);
192 if (error)
193 goto fail;
194
195 *qdp = qd;
196
197 return 0;
198
199 fail:
200 kmem_cache_free(gfs2_quotad_cachep, qd);
201 return error;
202 }
203
204 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
205 struct gfs2_quota_data **qdp)
206 {
207 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
208 int error, found;
209
210 *qdp = NULL;
211
212 for (;;) {
213 found = 0;
214 spin_lock(&qd_lock);
215 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
216 if (qid_eq(qd->qd_id, qid) &&
217 lockref_get_not_dead(&qd->qd_lockref)) {
218 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
219 found = 1;
220 break;
221 }
222 }
223
224 if (!found)
225 qd = NULL;
226
227 if (!qd && new_qd) {
228 qd = new_qd;
229 list_add(&qd->qd_list, &sdp->sd_quota_list);
230 atomic_inc(&sdp->sd_quota_count);
231 new_qd = NULL;
232 }
233
234 spin_unlock(&qd_lock);
235
236 if (qd) {
237 if (new_qd) {
238 gfs2_glock_put(new_qd->qd_gl);
239 kmem_cache_free(gfs2_quotad_cachep, new_qd);
240 }
241 *qdp = qd;
242 return 0;
243 }
244
245 error = qd_alloc(sdp, qid, &new_qd);
246 if (error)
247 return error;
248 }
249 }
250
251 static void qd_hold(struct gfs2_quota_data *qd)
252 {
253 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
254 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
255 lockref_get(&qd->qd_lockref);
256 }
257
258 static void qd_put(struct gfs2_quota_data *qd)
259 {
260 if (lockref_put_or_lock(&qd->qd_lockref))
261 return;
262
263 qd->qd_lockref.count = 0;
264 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
265 spin_unlock(&qd->qd_lockref.lock);
266
267 }
268
269 static int slot_get(struct gfs2_quota_data *qd)
270 {
271 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
272 unsigned int c, o = 0, b;
273 unsigned char byte = 0;
274
275 spin_lock(&qd_lock);
276
277 if (qd->qd_slot_count++) {
278 spin_unlock(&qd_lock);
279 return 0;
280 }
281
282 for (c = 0; c < sdp->sd_quota_chunks; c++)
283 for (o = 0; o < PAGE_SIZE; o++) {
284 byte = sdp->sd_quota_bitmap[c][o];
285 if (byte != 0xFF)
286 goto found;
287 }
288
289 goto fail;
290
291 found:
292 for (b = 0; b < 8; b++)
293 if (!(byte & (1 << b)))
294 break;
295 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
296
297 if (qd->qd_slot >= sdp->sd_quota_slots)
298 goto fail;
299
300 sdp->sd_quota_bitmap[c][o] |= 1 << b;
301
302 spin_unlock(&qd_lock);
303
304 return 0;
305
306 fail:
307 qd->qd_slot_count--;
308 spin_unlock(&qd_lock);
309 return -ENOSPC;
310 }
311
312 static void slot_hold(struct gfs2_quota_data *qd)
313 {
314 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
315
316 spin_lock(&qd_lock);
317 gfs2_assert(sdp, qd->qd_slot_count);
318 qd->qd_slot_count++;
319 spin_unlock(&qd_lock);
320 }
321
322 static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
323 unsigned int bit, int new_value)
324 {
325 unsigned int c, o, b = bit;
326 int old_value;
327
328 c = b / (8 * PAGE_SIZE);
329 b %= 8 * PAGE_SIZE;
330 o = b / 8;
331 b %= 8;
332
333 old_value = (bitmap[c][o] & (1 << b));
334 gfs2_assert_withdraw(sdp, !old_value != !new_value);
335
336 if (new_value)
337 bitmap[c][o] |= 1 << b;
338 else
339 bitmap[c][o] &= ~(1 << b);
340 }
341
342 static void slot_put(struct gfs2_quota_data *qd)
343 {
344 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
345
346 spin_lock(&qd_lock);
347 gfs2_assert(sdp, qd->qd_slot_count);
348 if (!--qd->qd_slot_count) {
349 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
350 qd->qd_slot = -1;
351 }
352 spin_unlock(&qd_lock);
353 }
354
355 static int bh_get(struct gfs2_quota_data *qd)
356 {
357 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
358 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
359 unsigned int block, offset;
360 struct buffer_head *bh;
361 int error;
362 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
363
364 mutex_lock(&sdp->sd_quota_mutex);
365
366 if (qd->qd_bh_count++) {
367 mutex_unlock(&sdp->sd_quota_mutex);
368 return 0;
369 }
370
371 block = qd->qd_slot / sdp->sd_qc_per_block;
372 offset = qd->qd_slot % sdp->sd_qc_per_block;
373
374 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
375 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
376 if (error)
377 goto fail;
378 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
379 if (error)
380 goto fail;
381 error = -EIO;
382 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
383 goto fail_brelse;
384
385 qd->qd_bh = bh;
386 qd->qd_bh_qc = (struct gfs2_quota_change *)
387 (bh->b_data + sizeof(struct gfs2_meta_header) +
388 offset * sizeof(struct gfs2_quota_change));
389
390 mutex_unlock(&sdp->sd_quota_mutex);
391
392 return 0;
393
394 fail_brelse:
395 brelse(bh);
396 fail:
397 qd->qd_bh_count--;
398 mutex_unlock(&sdp->sd_quota_mutex);
399 return error;
400 }
401
402 static void bh_put(struct gfs2_quota_data *qd)
403 {
404 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
405
406 mutex_lock(&sdp->sd_quota_mutex);
407 gfs2_assert(sdp, qd->qd_bh_count);
408 if (!--qd->qd_bh_count) {
409 brelse(qd->qd_bh);
410 qd->qd_bh = NULL;
411 qd->qd_bh_qc = NULL;
412 }
413 mutex_unlock(&sdp->sd_quota_mutex);
414 }
415
416 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
417 u64 *sync_gen)
418 {
419 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
420 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
421 (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
422 return 0;
423
424 if (!lockref_get_not_dead(&qd->qd_lockref))
425 return 0;
426
427 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
428 set_bit(QDF_LOCKED, &qd->qd_flags);
429 qd->qd_change_sync = qd->qd_change;
430 gfs2_assert_warn(sdp, qd->qd_slot_count);
431 qd->qd_slot_count++;
432 return 1;
433 }
434
435 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
436 {
437 struct gfs2_quota_data *qd = NULL;
438 int error;
439 int found = 0;
440
441 *qdp = NULL;
442
443 if (sdp->sd_vfs->s_flags & MS_RDONLY)
444 return 0;
445
446 spin_lock(&qd_lock);
447
448 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
449 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
450 if (found)
451 break;
452 }
453
454 if (!found)
455 qd = NULL;
456
457 spin_unlock(&qd_lock);
458
459 if (qd) {
460 gfs2_assert_warn(sdp, qd->qd_change_sync);
461 error = bh_get(qd);
462 if (error) {
463 clear_bit(QDF_LOCKED, &qd->qd_flags);
464 slot_put(qd);
465 qd_put(qd);
466 return error;
467 }
468 }
469
470 *qdp = qd;
471
472 return 0;
473 }
474
475 static void qd_unlock(struct gfs2_quota_data *qd)
476 {
477 gfs2_assert_warn(qd->qd_gl->gl_sbd,
478 test_bit(QDF_LOCKED, &qd->qd_flags));
479 clear_bit(QDF_LOCKED, &qd->qd_flags);
480 bh_put(qd);
481 slot_put(qd);
482 qd_put(qd);
483 }
484
485 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
486 struct gfs2_quota_data **qdp)
487 {
488 int error;
489
490 error = qd_get(sdp, qid, qdp);
491 if (error)
492 return error;
493
494 error = slot_get(*qdp);
495 if (error)
496 goto fail;
497
498 error = bh_get(*qdp);
499 if (error)
500 goto fail_slot;
501
502 return 0;
503
504 fail_slot:
505 slot_put(*qdp);
506 fail:
507 qd_put(*qdp);
508 return error;
509 }
510
511 static void qdsb_put(struct gfs2_quota_data *qd)
512 {
513 bh_put(qd);
514 slot_put(qd);
515 qd_put(qd);
516 }
517
518 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
519 {
520 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
521 struct gfs2_quota_data **qd;
522 int error;
523
524 if (ip->i_res == NULL) {
525 error = gfs2_rs_alloc(ip);
526 if (error)
527 return error;
528 }
529
530 qd = ip->i_res->rs_qa_qd;
531
532 if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
533 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
534 return -EIO;
535
536 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
537 return 0;
538
539 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
540 if (error)
541 goto out;
542 ip->i_res->rs_qa_qd_num++;
543 qd++;
544
545 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
546 if (error)
547 goto out;
548 ip->i_res->rs_qa_qd_num++;
549 qd++;
550
551 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
552 !uid_eq(uid, ip->i_inode.i_uid)) {
553 error = qdsb_get(sdp, make_kqid_uid(uid), qd);
554 if (error)
555 goto out;
556 ip->i_res->rs_qa_qd_num++;
557 qd++;
558 }
559
560 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
561 !gid_eq(gid, ip->i_inode.i_gid)) {
562 error = qdsb_get(sdp, make_kqid_gid(gid), qd);
563 if (error)
564 goto out;
565 ip->i_res->rs_qa_qd_num++;
566 qd++;
567 }
568
569 out:
570 if (error)
571 gfs2_quota_unhold(ip);
572 return error;
573 }
574
575 void gfs2_quota_unhold(struct gfs2_inode *ip)
576 {
577 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
578 unsigned int x;
579
580 if (ip->i_res == NULL)
581 return;
582 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
583
584 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
585 qdsb_put(ip->i_res->rs_qa_qd[x]);
586 ip->i_res->rs_qa_qd[x] = NULL;
587 }
588 ip->i_res->rs_qa_qd_num = 0;
589 }
590
591 static int sort_qd(const void *a, const void *b)
592 {
593 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
594 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
595
596 if (qid_lt(qd_a->qd_id, qd_b->qd_id))
597 return -1;
598 if (qid_lt(qd_b->qd_id, qd_a->qd_id))
599 return 1;
600 return 0;
601 }
602
603 static void do_qc(struct gfs2_quota_data *qd, s64 change)
604 {
605 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
606 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
607 struct gfs2_quota_change *qc = qd->qd_bh_qc;
608 s64 x;
609
610 mutex_lock(&sdp->sd_quota_mutex);
611 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
612
613 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
614 qc->qc_change = 0;
615 qc->qc_flags = 0;
616 if (qd->qd_id.type == USRQUOTA)
617 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
618 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
619 }
620
621 x = be64_to_cpu(qc->qc_change) + change;
622 qc->qc_change = cpu_to_be64(x);
623
624 spin_lock(&qd_lock);
625 qd->qd_change = x;
626 spin_unlock(&qd_lock);
627
628 if (!x) {
629 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
630 clear_bit(QDF_CHANGE, &qd->qd_flags);
631 qc->qc_flags = 0;
632 qc->qc_id = 0;
633 slot_put(qd);
634 qd_put(qd);
635 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
636 qd_hold(qd);
637 slot_hold(qd);
638 }
639
640 mutex_unlock(&sdp->sd_quota_mutex);
641 }
642
643 /**
644 * gfs2_adjust_quota - adjust record of current block usage
645 * @ip: The quota inode
646 * @loc: Offset of the entry in the quota file
647 * @change: The amount of usage change to record
648 * @qd: The quota data
649 * @fdq: The updated limits to record
650 *
651 * This function was mostly borrowed from gfs2_block_truncate_page which was
652 * in turn mostly borrowed from ext3
653 *
654 * Returns: 0 or -ve on error
655 */
656
657 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
658 s64 change, struct gfs2_quota_data *qd,
659 struct fs_disk_quota *fdq)
660 {
661 struct inode *inode = &ip->i_inode;
662 struct gfs2_sbd *sdp = GFS2_SB(inode);
663 struct address_space *mapping = inode->i_mapping;
664 unsigned long index = loc >> PAGE_CACHE_SHIFT;
665 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
666 unsigned blocksize, iblock, pos;
667 struct buffer_head *bh;
668 struct page *page;
669 void *kaddr, *ptr;
670 struct gfs2_quota q, *qp;
671 int err, nbytes;
672 u64 size;
673
674 if (gfs2_is_stuffed(ip)) {
675 err = gfs2_unstuff_dinode(ip, NULL);
676 if (err)
677 return err;
678 }
679
680 memset(&q, 0, sizeof(struct gfs2_quota));
681 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
682 if (err < 0)
683 return err;
684
685 err = -EIO;
686 qp = &q;
687 qp->qu_value = be64_to_cpu(qp->qu_value);
688 qp->qu_value += change;
689 qp->qu_value = cpu_to_be64(qp->qu_value);
690 qd->qd_qb.qb_value = qp->qu_value;
691 if (fdq) {
692 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
693 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
694 qd->qd_qb.qb_warn = qp->qu_warn;
695 }
696 if (fdq->d_fieldmask & FS_DQ_BHARD) {
697 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
698 qd->qd_qb.qb_limit = qp->qu_limit;
699 }
700 if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
701 qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
702 qd->qd_qb.qb_value = qp->qu_value;
703 }
704 }
705
706 /* Write the quota into the quota file on disk */
707 ptr = qp;
708 nbytes = sizeof(struct gfs2_quota);
709 get_a_page:
710 page = find_or_create_page(mapping, index, GFP_NOFS);
711 if (!page)
712 return -ENOMEM;
713
714 blocksize = inode->i_sb->s_blocksize;
715 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
716
717 if (!page_has_buffers(page))
718 create_empty_buffers(page, blocksize, 0);
719
720 bh = page_buffers(page);
721 pos = blocksize;
722 while (offset >= pos) {
723 bh = bh->b_this_page;
724 iblock++;
725 pos += blocksize;
726 }
727
728 if (!buffer_mapped(bh)) {
729 gfs2_block_map(inode, iblock, bh, 1);
730 if (!buffer_mapped(bh))
731 goto unlock_out;
732 /* If it's a newly allocated disk block for quota, zero it */
733 if (buffer_new(bh))
734 zero_user(page, pos - blocksize, bh->b_size);
735 }
736
737 if (PageUptodate(page))
738 set_buffer_uptodate(bh);
739
740 if (!buffer_uptodate(bh)) {
741 ll_rw_block(READ | REQ_META, 1, &bh);
742 wait_on_buffer(bh);
743 if (!buffer_uptodate(bh))
744 goto unlock_out;
745 }
746
747 gfs2_trans_add_data(ip->i_gl, bh);
748
749 kaddr = kmap_atomic(page);
750 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
751 nbytes = PAGE_CACHE_SIZE - offset;
752 memcpy(kaddr + offset, ptr, nbytes);
753 flush_dcache_page(page);
754 kunmap_atomic(kaddr);
755 unlock_page(page);
756 page_cache_release(page);
757
758 /* If quota straddles page boundary, we need to update the rest of the
759 * quota at the beginning of the next page */
760 if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
761 ptr = ptr + nbytes;
762 nbytes = sizeof(struct gfs2_quota) - nbytes;
763 offset = 0;
764 index++;
765 goto get_a_page;
766 }
767
768 size = loc + sizeof(struct gfs2_quota);
769 if (size > inode->i_size)
770 i_size_write(inode, size);
771 inode->i_mtime = inode->i_atime = CURRENT_TIME;
772 mark_inode_dirty(inode);
773 return 0;
774
775 unlock_out:
776 unlock_page(page);
777 page_cache_release(page);
778 return err;
779 }
780
781 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
782 {
783 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
784 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
785 struct gfs2_alloc_parms ap = { .aflags = 0, };
786 unsigned int data_blocks, ind_blocks;
787 struct gfs2_holder *ghs, i_gh;
788 unsigned int qx, x;
789 struct gfs2_quota_data *qd;
790 unsigned reserved;
791 loff_t offset;
792 unsigned int nalloc = 0, blocks;
793 int error;
794
795 error = gfs2_rs_alloc(ip);
796 if (error)
797 return error;
798
799 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
800 &data_blocks, &ind_blocks);
801
802 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
803 if (!ghs)
804 return -ENOMEM;
805
806 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
807 mutex_lock(&ip->i_inode.i_mutex);
808 for (qx = 0; qx < num_qd; qx++) {
809 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
810 GL_NOCACHE, &ghs[qx]);
811 if (error)
812 goto out;
813 }
814
815 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
816 if (error)
817 goto out;
818
819 for (x = 0; x < num_qd; x++) {
820 offset = qd2offset(qda[x]);
821 if (gfs2_write_alloc_required(ip, offset,
822 sizeof(struct gfs2_quota)))
823 nalloc++;
824 }
825
826 /*
827 * 1 blk for unstuffing inode if stuffed. We add this extra
828 * block to the reservation unconditionally. If the inode
829 * doesn't need unstuffing, the block will be released to the
830 * rgrp since it won't be allocated during the transaction
831 */
832 /* +3 in the end for unstuffing block, inode size update block
833 * and another block in case quota straddles page boundary and
834 * two blocks need to be updated instead of 1 */
835 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
836
837 reserved = 1 + (nalloc * (data_blocks + ind_blocks));
838 ap.target = reserved;
839 error = gfs2_inplace_reserve(ip, &ap);
840 if (error)
841 goto out_alloc;
842
843 if (nalloc)
844 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
845
846 error = gfs2_trans_begin(sdp, blocks, 0);
847 if (error)
848 goto out_ipres;
849
850 for (x = 0; x < num_qd; x++) {
851 qd = qda[x];
852 offset = qd2offset(qd);
853 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
854 if (error)
855 goto out_end_trans;
856
857 do_qc(qd, -qd->qd_change_sync);
858 set_bit(QDF_REFRESH, &qd->qd_flags);
859 }
860
861 error = 0;
862
863 out_end_trans:
864 gfs2_trans_end(sdp);
865 out_ipres:
866 gfs2_inplace_release(ip);
867 out_alloc:
868 gfs2_glock_dq_uninit(&i_gh);
869 out:
870 while (qx--)
871 gfs2_glock_dq_uninit(&ghs[qx]);
872 mutex_unlock(&ip->i_inode.i_mutex);
873 kfree(ghs);
874 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
875 return error;
876 }
877
878 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
879 {
880 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
881 struct gfs2_quota q;
882 struct gfs2_quota_lvb *qlvb;
883 loff_t pos;
884 int error;
885
886 memset(&q, 0, sizeof(struct gfs2_quota));
887 pos = qd2offset(qd);
888 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
889 if (error < 0)
890 return error;
891
892 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
893 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
894 qlvb->__pad = 0;
895 qlvb->qb_limit = q.qu_limit;
896 qlvb->qb_warn = q.qu_warn;
897 qlvb->qb_value = q.qu_value;
898 qd->qd_qb = *qlvb;
899
900 return 0;
901 }
902
903 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
904 struct gfs2_holder *q_gh)
905 {
906 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
907 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
908 struct gfs2_holder i_gh;
909 int error;
910
911 restart:
912 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
913 if (error)
914 return error;
915
916 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
917
918 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
919 gfs2_glock_dq_uninit(q_gh);
920 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
921 GL_NOCACHE, q_gh);
922 if (error)
923 return error;
924
925 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
926 if (error)
927 goto fail;
928
929 error = update_qd(sdp, qd);
930 if (error)
931 goto fail_gunlock;
932
933 gfs2_glock_dq_uninit(&i_gh);
934 gfs2_glock_dq_uninit(q_gh);
935 force_refresh = 0;
936 goto restart;
937 }
938
939 return 0;
940
941 fail_gunlock:
942 gfs2_glock_dq_uninit(&i_gh);
943 fail:
944 gfs2_glock_dq_uninit(q_gh);
945 return error;
946 }
947
948 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
949 {
950 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
951 struct gfs2_quota_data *qd;
952 unsigned int x;
953 int error = 0;
954
955 error = gfs2_quota_hold(ip, uid, gid);
956 if (error)
957 return error;
958
959 if (capable(CAP_SYS_RESOURCE) ||
960 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
961 return 0;
962
963 sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
964 sizeof(struct gfs2_quota_data *), sort_qd, NULL);
965
966 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
967 int force = NO_FORCE;
968 qd = ip->i_res->rs_qa_qd[x];
969 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
970 force = FORCE;
971 error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
972 if (error)
973 break;
974 }
975
976 if (!error)
977 set_bit(GIF_QD_LOCKED, &ip->i_flags);
978 else {
979 while (x--)
980 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
981 gfs2_quota_unhold(ip);
982 }
983
984 return error;
985 }
986
987 static int need_sync(struct gfs2_quota_data *qd)
988 {
989 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
990 struct gfs2_tune *gt = &sdp->sd_tune;
991 s64 value;
992 unsigned int num, den;
993 int do_sync = 1;
994
995 if (!qd->qd_qb.qb_limit)
996 return 0;
997
998 spin_lock(&qd_lock);
999 value = qd->qd_change;
1000 spin_unlock(&qd_lock);
1001
1002 spin_lock(&gt->gt_spin);
1003 num = gt->gt_quota_scale_num;
1004 den = gt->gt_quota_scale_den;
1005 spin_unlock(&gt->gt_spin);
1006
1007 if (value < 0)
1008 do_sync = 0;
1009 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1010 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1011 do_sync = 0;
1012 else {
1013 value *= gfs2_jindex_size(sdp) * num;
1014 value = div_s64(value, den);
1015 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1016 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1017 do_sync = 0;
1018 }
1019
1020 return do_sync;
1021 }
1022
1023 void gfs2_quota_unlock(struct gfs2_inode *ip)
1024 {
1025 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1026 struct gfs2_quota_data *qda[4];
1027 unsigned int count = 0;
1028 unsigned int x;
1029 int found;
1030
1031 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1032 goto out;
1033
1034 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1035 struct gfs2_quota_data *qd;
1036 int sync;
1037
1038 qd = ip->i_res->rs_qa_qd[x];
1039 sync = need_sync(qd);
1040
1041 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
1042 if (!sync)
1043 continue;
1044
1045 spin_lock(&qd_lock);
1046 found = qd_check_sync(sdp, qd, NULL);
1047 spin_unlock(&qd_lock);
1048
1049 if (!found)
1050 continue;
1051
1052 gfs2_assert_warn(sdp, qd->qd_change_sync);
1053 if (bh_get(qd)) {
1054 clear_bit(QDF_LOCKED, &qd->qd_flags);
1055 slot_put(qd);
1056 qd_put(qd);
1057 continue;
1058 }
1059
1060 qda[count++] = qd;
1061 }
1062
1063 if (count) {
1064 do_sync(count, qda);
1065 for (x = 0; x < count; x++)
1066 qd_unlock(qda[x]);
1067 }
1068
1069 out:
1070 gfs2_quota_unhold(ip);
1071 }
1072
1073 #define MAX_LINE 256
1074
1075 static int print_message(struct gfs2_quota_data *qd, char *type)
1076 {
1077 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1078
1079 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1080 sdp->sd_fsname, type,
1081 (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1082 from_kqid(&init_user_ns, qd->qd_id));
1083
1084 return 0;
1085 }
1086
1087 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1088 {
1089 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1090 struct gfs2_quota_data *qd;
1091 s64 value;
1092 unsigned int x;
1093 int error = 0;
1094
1095 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1096 return 0;
1097
1098 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1099 return 0;
1100
1101 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1102 qd = ip->i_res->rs_qa_qd[x];
1103
1104 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1105 qid_eq(qd->qd_id, make_kqid_gid(gid))))
1106 continue;
1107
1108 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1109 spin_lock(&qd_lock);
1110 value += qd->qd_change;
1111 spin_unlock(&qd_lock);
1112
1113 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1114 print_message(qd, "exceeded");
1115 quota_send_warning(qd->qd_id,
1116 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1117
1118 error = -EDQUOT;
1119 break;
1120 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1121 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
1122 time_after_eq(jiffies, qd->qd_last_warn +
1123 gfs2_tune_get(sdp,
1124 gt_quota_warn_period) * HZ)) {
1125 quota_send_warning(qd->qd_id,
1126 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1127 error = print_message(qd, "warning");
1128 qd->qd_last_warn = jiffies;
1129 }
1130 }
1131
1132 return error;
1133 }
1134
1135 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1136 kuid_t uid, kgid_t gid)
1137 {
1138 struct gfs2_quota_data *qd;
1139 unsigned int x;
1140
1141 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1142 return;
1143 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1144 return;
1145
1146 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1147 qd = ip->i_res->rs_qa_qd[x];
1148
1149 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1150 qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1151 do_qc(qd, change);
1152 }
1153 }
1154 }
1155
1156 int gfs2_quota_sync(struct super_block *sb, int type)
1157 {
1158 struct gfs2_sbd *sdp = sb->s_fs_info;
1159 struct gfs2_quota_data **qda;
1160 unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
1161 unsigned int num_qd;
1162 unsigned int x;
1163 int error = 0;
1164
1165 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1166 if (!qda)
1167 return -ENOMEM;
1168
1169 mutex_lock(&sdp->sd_quota_sync_mutex);
1170 sdp->sd_quota_sync_gen++;
1171
1172 do {
1173 num_qd = 0;
1174
1175 for (;;) {
1176 error = qd_fish(sdp, qda + num_qd);
1177 if (error || !qda[num_qd])
1178 break;
1179 if (++num_qd == max_qd)
1180 break;
1181 }
1182
1183 if (num_qd) {
1184 if (!error)
1185 error = do_sync(num_qd, qda);
1186 if (!error)
1187 for (x = 0; x < num_qd; x++)
1188 qda[x]->qd_sync_gen =
1189 sdp->sd_quota_sync_gen;
1190
1191 for (x = 0; x < num_qd; x++)
1192 qd_unlock(qda[x]);
1193 }
1194 } while (!error && num_qd == max_qd);
1195
1196 mutex_unlock(&sdp->sd_quota_sync_mutex);
1197 kfree(qda);
1198
1199 return error;
1200 }
1201
1202 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1203 {
1204 struct gfs2_quota_data *qd;
1205 struct gfs2_holder q_gh;
1206 int error;
1207
1208 error = qd_get(sdp, qid, &qd);
1209 if (error)
1210 return error;
1211
1212 error = do_glock(qd, FORCE, &q_gh);
1213 if (!error)
1214 gfs2_glock_dq_uninit(&q_gh);
1215
1216 qd_put(qd);
1217 return error;
1218 }
1219
1220 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1221 {
1222 const struct gfs2_quota_change *str = buf;
1223
1224 qc->qc_change = be64_to_cpu(str->qc_change);
1225 qc->qc_flags = be32_to_cpu(str->qc_flags);
1226 qc->qc_id = make_kqid(&init_user_ns,
1227 (qc->qc_flags & GFS2_QCF_USER)?USRQUOTA:GRPQUOTA,
1228 be32_to_cpu(str->qc_id));
1229 }
1230
1231 int gfs2_quota_init(struct gfs2_sbd *sdp)
1232 {
1233 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1234 u64 size = i_size_read(sdp->sd_qc_inode);
1235 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1236 unsigned int x, slot = 0;
1237 unsigned int found = 0;
1238 u64 dblock;
1239 u32 extlen = 0;
1240 int error;
1241
1242 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1243 return -EIO;
1244
1245 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1246 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1247
1248 error = -ENOMEM;
1249
1250 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1251 sizeof(unsigned char *), GFP_NOFS);
1252 if (!sdp->sd_quota_bitmap)
1253 return error;
1254
1255 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1256 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1257 if (!sdp->sd_quota_bitmap[x])
1258 goto fail;
1259 }
1260
1261 for (x = 0; x < blocks; x++) {
1262 struct buffer_head *bh;
1263 unsigned int y;
1264
1265 if (!extlen) {
1266 int new = 0;
1267 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1268 if (error)
1269 goto fail;
1270 }
1271 error = -EIO;
1272 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1273 if (!bh)
1274 goto fail;
1275 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1276 brelse(bh);
1277 goto fail;
1278 }
1279
1280 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1281 y++, slot++) {
1282 struct gfs2_quota_change_host qc;
1283 struct gfs2_quota_data *qd;
1284
1285 gfs2_quota_change_in(&qc, bh->b_data +
1286 sizeof(struct gfs2_meta_header) +
1287 y * sizeof(struct gfs2_quota_change));
1288 if (!qc.qc_change)
1289 continue;
1290
1291 error = qd_alloc(sdp, qc.qc_id, &qd);
1292 if (error) {
1293 brelse(bh);
1294 goto fail;
1295 }
1296
1297 set_bit(QDF_CHANGE, &qd->qd_flags);
1298 qd->qd_change = qc.qc_change;
1299 qd->qd_slot = slot;
1300 qd->qd_slot_count = 1;
1301
1302 spin_lock(&qd_lock);
1303 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1304 list_add(&qd->qd_list, &sdp->sd_quota_list);
1305 atomic_inc(&sdp->sd_quota_count);
1306 spin_unlock(&qd_lock);
1307
1308 found++;
1309 }
1310
1311 brelse(bh);
1312 dblock++;
1313 extlen--;
1314 }
1315
1316 if (found)
1317 fs_info(sdp, "found %u quota changes\n", found);
1318
1319 return 0;
1320
1321 fail:
1322 gfs2_quota_cleanup(sdp);
1323 return error;
1324 }
1325
1326 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1327 {
1328 struct list_head *head = &sdp->sd_quota_list;
1329 struct gfs2_quota_data *qd;
1330 unsigned int x;
1331
1332 spin_lock(&qd_lock);
1333 while (!list_empty(head)) {
1334 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1335
1336 /*
1337 * To be removed in due course... we should be able to
1338 * ensure that all refs to the qd have done by this point
1339 * so that this rather odd test is not required
1340 */
1341 spin_lock(&qd->qd_lockref.lock);
1342 if (qd->qd_lockref.count > 1 ||
1343 (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1344 spin_unlock(&qd->qd_lockref.lock);
1345 list_move(&qd->qd_list, head);
1346 spin_unlock(&qd_lock);
1347 schedule();
1348 spin_lock(&qd_lock);
1349 continue;
1350 }
1351 spin_unlock(&qd->qd_lockref.lock);
1352
1353 list_del(&qd->qd_list);
1354 /* Also remove if this qd exists in the reclaim list */
1355 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1356 atomic_dec(&sdp->sd_quota_count);
1357 spin_unlock(&qd_lock);
1358
1359 if (!qd->qd_lockref.count) {
1360 gfs2_assert_warn(sdp, !qd->qd_change);
1361 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1362 } else
1363 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1364 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1365
1366 gfs2_glock_put(qd->qd_gl);
1367 kmem_cache_free(gfs2_quotad_cachep, qd);
1368
1369 spin_lock(&qd_lock);
1370 }
1371 spin_unlock(&qd_lock);
1372
1373 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1374
1375 if (sdp->sd_quota_bitmap) {
1376 for (x = 0; x < sdp->sd_quota_chunks; x++)
1377 kfree(sdp->sd_quota_bitmap[x]);
1378 kfree(sdp->sd_quota_bitmap);
1379 }
1380 }
1381
1382 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1383 {
1384 if (error == 0 || error == -EROFS)
1385 return;
1386 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1387 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1388 }
1389
1390 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1391 int (*fxn)(struct super_block *sb, int type),
1392 unsigned long t, unsigned long *timeo,
1393 unsigned int *new_timeo)
1394 {
1395 if (t >= *timeo) {
1396 int error = fxn(sdp->sd_vfs, 0);
1397 quotad_error(sdp, msg, error);
1398 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1399 } else {
1400 *timeo -= t;
1401 }
1402 }
1403
1404 static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1405 {
1406 struct gfs2_inode *ip;
1407
1408 while(1) {
1409 ip = NULL;
1410 spin_lock(&sdp->sd_trunc_lock);
1411 if (!list_empty(&sdp->sd_trunc_list)) {
1412 ip = list_entry(sdp->sd_trunc_list.next,
1413 struct gfs2_inode, i_trunc_list);
1414 list_del_init(&ip->i_trunc_list);
1415 }
1416 spin_unlock(&sdp->sd_trunc_lock);
1417 if (ip == NULL)
1418 return;
1419 gfs2_glock_finish_truncate(ip);
1420 }
1421 }
1422
1423 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1424 if (!sdp->sd_statfs_force_sync) {
1425 sdp->sd_statfs_force_sync = 1;
1426 wake_up(&sdp->sd_quota_wait);
1427 }
1428 }
1429
1430
1431 /**
1432 * gfs2_quotad - Write cached quota changes into the quota file
1433 * @sdp: Pointer to GFS2 superblock
1434 *
1435 */
1436
1437 int gfs2_quotad(void *data)
1438 {
1439 struct gfs2_sbd *sdp = data;
1440 struct gfs2_tune *tune = &sdp->sd_tune;
1441 unsigned long statfs_timeo = 0;
1442 unsigned long quotad_timeo = 0;
1443 unsigned long t = 0;
1444 DEFINE_WAIT(wait);
1445 int empty;
1446
1447 while (!kthread_should_stop()) {
1448
1449 /* Update the master statfs file */
1450 if (sdp->sd_statfs_force_sync) {
1451 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1452 quotad_error(sdp, "statfs", error);
1453 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1454 }
1455 else
1456 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1457 &statfs_timeo,
1458 &tune->gt_statfs_quantum);
1459
1460 /* Update quota file */
1461 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1462 &quotad_timeo, &tune->gt_quota_quantum);
1463
1464 /* Check for & recover partially truncated inodes */
1465 quotad_check_trunc_list(sdp);
1466
1467 try_to_freeze();
1468
1469 t = min(quotad_timeo, statfs_timeo);
1470
1471 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1472 spin_lock(&sdp->sd_trunc_lock);
1473 empty = list_empty(&sdp->sd_trunc_list);
1474 spin_unlock(&sdp->sd_trunc_lock);
1475 if (empty && !sdp->sd_statfs_force_sync)
1476 t -= schedule_timeout(t);
1477 else
1478 t = 0;
1479 finish_wait(&sdp->sd_quota_wait, &wait);
1480 }
1481
1482 return 0;
1483 }
1484
1485 static int gfs2_quota_get_xstate(struct super_block *sb,
1486 struct fs_quota_stat *fqs)
1487 {
1488 struct gfs2_sbd *sdp = sb->s_fs_info;
1489
1490 memset(fqs, 0, sizeof(struct fs_quota_stat));
1491 fqs->qs_version = FS_QSTAT_VERSION;
1492
1493 switch (sdp->sd_args.ar_quota) {
1494 case GFS2_QUOTA_ON:
1495 fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1496 /*FALLTHRU*/
1497 case GFS2_QUOTA_ACCOUNT:
1498 fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
1499 break;
1500 case GFS2_QUOTA_OFF:
1501 break;
1502 }
1503
1504 if (sdp->sd_quota_inode) {
1505 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1506 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1507 }
1508 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1509 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1510 fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
1511 return 0;
1512 }
1513
1514 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1515 struct fs_disk_quota *fdq)
1516 {
1517 struct gfs2_sbd *sdp = sb->s_fs_info;
1518 struct gfs2_quota_lvb *qlvb;
1519 struct gfs2_quota_data *qd;
1520 struct gfs2_holder q_gh;
1521 int error;
1522
1523 memset(fdq, 0, sizeof(struct fs_disk_quota));
1524
1525 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1526 return -ESRCH; /* Crazy XFS error code */
1527
1528 if ((qid.type != USRQUOTA) &&
1529 (qid.type != GRPQUOTA))
1530 return -EINVAL;
1531
1532 error = qd_get(sdp, qid, &qd);
1533 if (error)
1534 return error;
1535 error = do_glock(qd, FORCE, &q_gh);
1536 if (error)
1537 goto out;
1538
1539 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1540 fdq->d_version = FS_DQUOT_VERSION;
1541 fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1542 fdq->d_id = from_kqid_munged(current_user_ns(), qid);
1543 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1544 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1545 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1546
1547 gfs2_glock_dq_uninit(&q_gh);
1548 out:
1549 qd_put(qd);
1550 return error;
1551 }
1552
1553 /* GFS2 only supports a subset of the XFS fields */
1554 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1555
1556 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1557 struct fs_disk_quota *fdq)
1558 {
1559 struct gfs2_sbd *sdp = sb->s_fs_info;
1560 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1561 struct gfs2_quota_data *qd;
1562 struct gfs2_holder q_gh, i_gh;
1563 unsigned int data_blocks, ind_blocks;
1564 unsigned int blocks = 0;
1565 int alloc_required;
1566 loff_t offset;
1567 int error;
1568
1569 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1570 return -ESRCH; /* Crazy XFS error code */
1571
1572 if ((qid.type != USRQUOTA) &&
1573 (qid.type != GRPQUOTA))
1574 return -EINVAL;
1575
1576 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1577 return -EINVAL;
1578
1579 error = qd_get(sdp, qid, &qd);
1580 if (error)
1581 return error;
1582
1583 error = gfs2_rs_alloc(ip);
1584 if (error)
1585 goto out_put;
1586
1587 mutex_lock(&ip->i_inode.i_mutex);
1588 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1589 if (error)
1590 goto out_unlockput;
1591 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1592 if (error)
1593 goto out_q;
1594
1595 /* Check for existing entry, if none then alloc new blocks */
1596 error = update_qd(sdp, qd);
1597 if (error)
1598 goto out_i;
1599
1600 /* If nothing has changed, this is a no-op */
1601 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1602 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1603 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1604
1605 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1606 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1607 fdq->d_fieldmask ^= FS_DQ_BHARD;
1608
1609 if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1610 ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1611 fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1612
1613 if (fdq->d_fieldmask == 0)
1614 goto out_i;
1615
1616 offset = qd2offset(qd);
1617 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1618 if (gfs2_is_stuffed(ip))
1619 alloc_required = 1;
1620 if (alloc_required) {
1621 struct gfs2_alloc_parms ap = { .aflags = 0, };
1622 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1623 &data_blocks, &ind_blocks);
1624 blocks = 1 + data_blocks + ind_blocks;
1625 ap.target = blocks;
1626 error = gfs2_inplace_reserve(ip, &ap);
1627 if (error)
1628 goto out_i;
1629 blocks += gfs2_rg_blocks(ip, blocks);
1630 }
1631
1632 /* Some quotas span block boundaries and can update two blocks,
1633 adding an extra block to the transaction to handle such quotas */
1634 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1635 if (error)
1636 goto out_release;
1637
1638 /* Apply changes */
1639 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1640
1641 gfs2_trans_end(sdp);
1642 out_release:
1643 if (alloc_required)
1644 gfs2_inplace_release(ip);
1645 out_i:
1646 gfs2_glock_dq_uninit(&i_gh);
1647 out_q:
1648 gfs2_glock_dq_uninit(&q_gh);
1649 out_unlockput:
1650 mutex_unlock(&ip->i_inode.i_mutex);
1651 out_put:
1652 qd_put(qd);
1653 return error;
1654 }
1655
1656 const struct quotactl_ops gfs2_quotactl_ops = {
1657 .quota_sync = gfs2_quota_sync,
1658 .get_xstate = gfs2_quota_get_xstate,
1659 .get_dqblk = gfs2_get_dqblk,
1660 .set_dqblk = gfs2_set_dqblk,
1661 };
This page took 0.089091 seconds and 5 git commands to generate.