[GFS2] Macros removal in gfs2.h
[deliverable/linux.git] / fs / gfs2 / unlinked.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/kthread.h>
5c676f6d 16#include <linux/gfs2_ondisk.h>
b3b94faa
DT
17#include <asm/semaphore.h>
18
19#include "gfs2.h"
5c676f6d
SW
20#include "lm_interface.h"
21#include "incore.h"
b3b94faa
DT
22#include "bmap.h"
23#include "inode.h"
24#include "meta_io.h"
25#include "trans.h"
26#include "unlinked.h"
5c676f6d 27#include "util.h"
b3b94faa
DT
28
29static int munge_ondisk(struct gfs2_sbd *sdp, unsigned int slot,
30 struct gfs2_unlinked_tag *ut)
31{
5c676f6d 32 struct gfs2_inode *ip = sdp->sd_ut_inode->u.generic_ip;
b3b94faa
DT
33 unsigned int block, offset;
34 uint64_t dblock;
35 int new = 0;
36 struct buffer_head *bh;
37 int error;
38
39 block = slot / sdp->sd_ut_per_block;
40 offset = slot % sdp->sd_ut_per_block;
41
42 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
43 if (error)
44 return error;
45 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
46 if (error)
47 return error;
48 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
49 error = -EIO;
50 goto out;
51 }
52
f55ab26a 53 mutex_lock(&sdp->sd_unlinked_mutex);
d4e9c4c3 54 gfs2_trans_add_bh(ip->i_gl, bh, 1);
b3b94faa
DT
55 gfs2_unlinked_tag_out(ut, bh->b_data +
56 sizeof(struct gfs2_meta_header) +
57 offset * sizeof(struct gfs2_unlinked_tag));
f55ab26a 58 mutex_unlock(&sdp->sd_unlinked_mutex);
b3b94faa
DT
59
60 out:
61 brelse(bh);
62
63 return error;
64}
65
66static void ul_hash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
67{
68 spin_lock(&sdp->sd_unlinked_spin);
69 list_add(&ul->ul_list, &sdp->sd_unlinked_list);
70 gfs2_assert(sdp, ul->ul_count);
71 ul->ul_count++;
72 atomic_inc(&sdp->sd_unlinked_count);
73 spin_unlock(&sdp->sd_unlinked_spin);
74}
75
76static void ul_unhash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
77{
78 spin_lock(&sdp->sd_unlinked_spin);
79 list_del_init(&ul->ul_list);
80 gfs2_assert(sdp, ul->ul_count > 1);
81 ul->ul_count--;
82 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_unlinked_count) > 0);
83 atomic_dec(&sdp->sd_unlinked_count);
84 spin_unlock(&sdp->sd_unlinked_spin);
85}
86
87static struct gfs2_unlinked *ul_fish(struct gfs2_sbd *sdp)
88{
89 struct list_head *head;
90 struct gfs2_unlinked *ul;
91 int found = 0;
92
93 if (sdp->sd_vfs->s_flags & MS_RDONLY)
94 return NULL;
95
96 spin_lock(&sdp->sd_unlinked_spin);
97
98 head = &sdp->sd_unlinked_list;
99
100 list_for_each_entry(ul, head, ul_list) {
101 if (test_bit(ULF_LOCKED, &ul->ul_flags))
102 continue;
103
104 list_move_tail(&ul->ul_list, head);
105 ul->ul_count++;
106 set_bit(ULF_LOCKED, &ul->ul_flags);
107 found = 1;
108
109 break;
110 }
111
112 if (!found)
113 ul = NULL;
114
115 spin_unlock(&sdp->sd_unlinked_spin);
116
117 return ul;
118}
119
120/**
121 * enforce_limit - limit the number of inodes waiting to be deallocated
122 * @sdp: the filesystem
123 *
124 * Returns: errno
125 */
126
127static void enforce_limit(struct gfs2_sbd *sdp)
128{
129 unsigned int tries = 0, min = 0;
130 int error;
131
132 if (atomic_read(&sdp->sd_unlinked_count) >=
133 gfs2_tune_get(sdp, gt_ilimit)) {
134 tries = gfs2_tune_get(sdp, gt_ilimit_tries);
135 min = gfs2_tune_get(sdp, gt_ilimit_min);
136 }
137
138 while (tries--) {
139 struct gfs2_unlinked *ul = ul_fish(sdp);
140 if (!ul)
141 break;
142 error = gfs2_inode_dealloc(sdp, ul);
143 gfs2_unlinked_put(sdp, ul);
144
145 if (!error) {
146 if (!--min)
147 break;
148 } else if (error != 1)
149 break;
150 }
151}
152
153static struct gfs2_unlinked *ul_alloc(struct gfs2_sbd *sdp)
154{
155 struct gfs2_unlinked *ul;
156
157 ul = kzalloc(sizeof(struct gfs2_unlinked), GFP_KERNEL);
158 if (ul) {
159 INIT_LIST_HEAD(&ul->ul_list);
160 ul->ul_count = 1;
161 set_bit(ULF_LOCKED, &ul->ul_flags);
162 }
163
164 return ul;
165}
166
167int gfs2_unlinked_get(struct gfs2_sbd *sdp, struct gfs2_unlinked **ul)
168{
169 unsigned int c, o = 0, b;
170 unsigned char byte = 0;
171
172 enforce_limit(sdp);
173
174 *ul = ul_alloc(sdp);
175 if (!*ul)
176 return -ENOMEM;
177
178 spin_lock(&sdp->sd_unlinked_spin);
179
180 for (c = 0; c < sdp->sd_unlinked_chunks; c++)
181 for (o = 0; o < PAGE_SIZE; o++) {
182 byte = sdp->sd_unlinked_bitmap[c][o];
183 if (byte != 0xFF)
184 goto found;
185 }
186
187 goto fail;
188
189 found:
190 for (b = 0; b < 8; b++)
191 if (!(byte & (1 << b)))
192 break;
193 (*ul)->ul_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
194
195 if ((*ul)->ul_slot >= sdp->sd_unlinked_slots)
196 goto fail;
197
198 sdp->sd_unlinked_bitmap[c][o] |= 1 << b;
199
200 spin_unlock(&sdp->sd_unlinked_spin);
201
202 return 0;
203
204 fail:
205 spin_unlock(&sdp->sd_unlinked_spin);
206 kfree(*ul);
207 return -ENOSPC;
208}
209
210void gfs2_unlinked_put(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
211{
212 gfs2_assert_warn(sdp, test_and_clear_bit(ULF_LOCKED, &ul->ul_flags));
213
214 spin_lock(&sdp->sd_unlinked_spin);
215 gfs2_assert(sdp, ul->ul_count);
216 ul->ul_count--;
217 if (!ul->ul_count) {
218 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, ul->ul_slot, 0);
219 spin_unlock(&sdp->sd_unlinked_spin);
220 kfree(ul);
221 } else
222 spin_unlock(&sdp->sd_unlinked_spin);
223}
224
225int gfs2_unlinked_ondisk_add(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
226{
227 int error;
228
229 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
230 gfs2_assert_warn(sdp, list_empty(&ul->ul_list));
231
232 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
233 if (!error)
234 ul_hash(sdp, ul);
235
236 return error;
237}
238
239int gfs2_unlinked_ondisk_munge(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
240{
241 int error;
242
243 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
244 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
245
246 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
247
248 return error;
249}
250
251int gfs2_unlinked_ondisk_rm(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
252{
253 struct gfs2_unlinked_tag ut;
254 int error;
255
256 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
257 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
258
259 memset(&ut, 0, sizeof(struct gfs2_unlinked_tag));
260
261 error = munge_ondisk(sdp, ul->ul_slot, &ut);
262 if (error)
263 return error;
264
265 ul_unhash(sdp, ul);
266
267 return 0;
268}
269
270/**
271 * gfs2_unlinked_dealloc - Go through the list of inodes to be deallocated
272 * @sdp: the filesystem
273 *
274 * Returns: errno
275 */
276
277int gfs2_unlinked_dealloc(struct gfs2_sbd *sdp)
278{
279 unsigned int hits, strikes;
280 int error;
281
282 for (;;) {
283 hits = 0;
284 strikes = 0;
285
286 for (;;) {
287 struct gfs2_unlinked *ul = ul_fish(sdp);
288 if (!ul)
289 return 0;
290 error = gfs2_inode_dealloc(sdp, ul);
291 gfs2_unlinked_put(sdp, ul);
292
293 if (!error) {
294 hits++;
295 if (strikes)
296 strikes--;
297 } else if (error == 1) {
298 strikes++;
299 if (strikes >=
300 atomic_read(&sdp->sd_unlinked_count)) {
301 error = 0;
302 break;
303 }
304 } else
305 return error;
306 }
307
308 if (!hits || kthread_should_stop())
309 break;
310
311 cond_resched();
312 }
313
314 return 0;
315}
316
317int gfs2_unlinked_init(struct gfs2_sbd *sdp)
318{
5c676f6d 319 struct gfs2_inode *ip = sdp->sd_ut_inode->u.generic_ip;
b3b94faa
DT
320 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
321 unsigned int x, slot = 0;
322 unsigned int found = 0;
323 uint64_t dblock;
324 uint32_t extlen = 0;
325 int error;
326
327 if (!ip->i_di.di_size ||
328 ip->i_di.di_size > (64 << 20) ||
329 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
330 gfs2_consist_inode(ip);
331 return -EIO;
332 }
333 sdp->sd_unlinked_slots = blocks * sdp->sd_ut_per_block;
5c676f6d
SW
334 sdp->sd_unlinked_chunks = DIV_ROUND_UP(sdp->sd_unlinked_slots,
335 8 * PAGE_SIZE);
b3b94faa
DT
336
337 error = -ENOMEM;
338
339 sdp->sd_unlinked_bitmap = kcalloc(sdp->sd_unlinked_chunks,
340 sizeof(unsigned char *),
341 GFP_KERNEL);
342 if (!sdp->sd_unlinked_bitmap)
343 return error;
344
345 for (x = 0; x < sdp->sd_unlinked_chunks; x++) {
346 sdp->sd_unlinked_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
347 if (!sdp->sd_unlinked_bitmap[x])
348 goto fail;
349 }
350
351 for (x = 0; x < blocks; x++) {
352 struct buffer_head *bh;
353 unsigned int y;
354
355 if (!extlen) {
356 int new = 0;
357 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
358 if (error)
359 goto fail;
360 }
361 gfs2_meta_ra(ip->i_gl, dblock, extlen);
362 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
363 &bh);
364 if (error)
365 goto fail;
366 error = -EIO;
367 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
368 brelse(bh);
369 goto fail;
370 }
371
372 for (y = 0;
373 y < sdp->sd_ut_per_block && slot < sdp->sd_unlinked_slots;
374 y++, slot++) {
375 struct gfs2_unlinked_tag ut;
376 struct gfs2_unlinked *ul;
377
378 gfs2_unlinked_tag_in(&ut, bh->b_data +
379 sizeof(struct gfs2_meta_header) +
380 y * sizeof(struct gfs2_unlinked_tag));
381 if (!ut.ut_inum.no_addr)
382 continue;
383
384 error = -ENOMEM;
385 ul = ul_alloc(sdp);
386 if (!ul) {
387 brelse(bh);
388 goto fail;
389 }
390 ul->ul_ut = ut;
391 ul->ul_slot = slot;
392
393 spin_lock(&sdp->sd_unlinked_spin);
394 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, slot, 1);
395 spin_unlock(&sdp->sd_unlinked_spin);
396 ul_hash(sdp, ul);
397
398 gfs2_unlinked_put(sdp, ul);
399 found++;
400 }
401
402 brelse(bh);
403 dblock++;
404 extlen--;
405 }
406
407 if (found)
408 fs_info(sdp, "found %u unlinked inodes\n", found);
409
410 return 0;
411
412 fail:
413 gfs2_unlinked_cleanup(sdp);
414 return error;
415}
416
417/**
418 * gfs2_unlinked_cleanup - get rid of any extra struct gfs2_unlinked structures
419 * @sdp: the filesystem
420 *
421 */
422
423void gfs2_unlinked_cleanup(struct gfs2_sbd *sdp)
424{
425 struct list_head *head = &sdp->sd_unlinked_list;
426 struct gfs2_unlinked *ul;
427 unsigned int x;
428
429 spin_lock(&sdp->sd_unlinked_spin);
430 while (!list_empty(head)) {
431 ul = list_entry(head->next, struct gfs2_unlinked, ul_list);
432
433 if (ul->ul_count > 1) {
434 list_move_tail(&ul->ul_list, head);
435 spin_unlock(&sdp->sd_unlinked_spin);
436 schedule();
437 spin_lock(&sdp->sd_unlinked_spin);
438 continue;
439 }
440
441 list_del_init(&ul->ul_list);
442 atomic_dec(&sdp->sd_unlinked_count);
443
444 gfs2_assert_warn(sdp, ul->ul_count == 1);
445 gfs2_assert_warn(sdp, !test_bit(ULF_LOCKED, &ul->ul_flags));
446 kfree(ul);
447 }
448 spin_unlock(&sdp->sd_unlinked_spin);
449
450 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_unlinked_count));
451
452 if (sdp->sd_unlinked_bitmap) {
453 for (x = 0; x < sdp->sd_unlinked_chunks; x++)
454 kfree(sdp->sd_unlinked_bitmap[x]);
455 kfree(sdp->sd_unlinked_bitmap);
456 }
457}
458
This page took 0.043983 seconds and 5 git commands to generate.