[PATCH] move capable() to capability.h
[deliverable/linux.git] / fs / ufs / balloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
8
9#include <linux/fs.h>
10#include <linux/ufs_fs.h>
11#include <linux/stat.h>
12#include <linux/time.h>
13#include <linux/string.h>
14#include <linux/quotaops.h>
15#include <linux/buffer_head.h>
16#include <linux/sched.h>
17#include <linux/bitops.h>
18#include <asm/byteorder.h>
19
20#include "swab.h"
21#include "util.h"
22
23#undef UFS_BALLOC_DEBUG
24
25#ifdef UFS_BALLOC_DEBUG
26#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
27#else
28#define UFSD(x)
29#endif
30
31static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
32static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
34static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
35static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
36static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
37
38/*
39 * Free 'count' fragments from fragment number 'fragment'
40 */
41void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
42 struct super_block * sb;
43 struct ufs_sb_private_info * uspi;
44 struct ufs_super_block_first * usb1;
45 struct ufs_cg_private_info * ucpi;
46 struct ufs_cylinder_group * ucg;
47 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
48
49 sb = inode->i_sb;
50 uspi = UFS_SB(sb)->s_uspi;
51 usb1 = ubh_get_usb_first(USPI_UBH);
52
53 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
54
55 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 ufs_error (sb, "ufs_free_fragments", "internal error");
57
58 lock_super(sb);
59
60 cgno = ufs_dtog(fragment);
61 bit = ufs_dtogd(fragment);
62 if (cgno >= uspi->s_ncg) {
63 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
64 goto failed;
65 }
66
67 ucpi = ufs_load_cylinder (sb, cgno);
68 if (!ucpi)
69 goto failed;
70 ucg = ubh_get_ucg (UCPI_UBH);
71 if (!ufs_cg_chkmagic(sb, ucg)) {
72 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
73 goto failed;
74 }
75
76 end_bit = bit + count;
77 bbase = ufs_blknum (bit);
78 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
79 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 for (i = bit; i < end_bit; i++) {
81 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
82 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
83 else ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
85 }
86
87 DQUOT_FREE_BLOCK (inode, count);
88
89
90 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
92 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
94 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
95
96 /*
97 * Trying to reassemble free fragments into block
98 */
99 blkno = ufs_fragstoblks (bbase);
100 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
101 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
103 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105 ufs_clusteracct (sb, ucpi, blkno, 1);
106 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
108 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109 cylno = ufs_cbtocylno (bbase);
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
111 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112 }
113
114 ubh_mark_buffer_dirty (USPI_UBH);
115 ubh_mark_buffer_dirty (UCPI_UBH);
116 if (sb->s_flags & MS_SYNCHRONOUS) {
096125f3 117 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
1da177e4
LT
118 ubh_wait_on_buffer (UCPI_UBH);
119 }
120 sb->s_dirt = 1;
121
122 unlock_super (sb);
123 UFSD(("EXIT\n"))
124 return;
125
126failed:
127 unlock_super (sb);
128 UFSD(("EXIT (FAILED)\n"))
129 return;
130}
131
132/*
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
134 */
135void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
136 struct super_block * sb;
137 struct ufs_sb_private_info * uspi;
138 struct ufs_super_block_first * usb1;
139 struct ufs_cg_private_info * ucpi;
140 struct ufs_cylinder_group * ucg;
141 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
142
143 sb = inode->i_sb;
144 uspi = UFS_SB(sb)->s_uspi;
145 usb1 = ubh_get_usb_first(USPI_UBH);
146
147 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
148
149 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150 ufs_error (sb, "ufs_free_blocks", "internal error, "
151 "fragment %u, count %u\n", fragment, count);
152 goto failed;
153 }
154
155 lock_super(sb);
156
157do_more:
158 overflow = 0;
159 cgno = ufs_dtog (fragment);
160 bit = ufs_dtogd (fragment);
161 if (cgno >= uspi->s_ncg) {
162 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
163 goto failed;
164 }
165 end_bit = bit + count;
166 if (end_bit > uspi->s_fpg) {
167 overflow = bit + count - uspi->s_fpg;
168 count -= overflow;
169 end_bit -= overflow;
170 }
171
172 ucpi = ufs_load_cylinder (sb, cgno);
173 if (!ucpi)
174 goto failed;
175 ucg = ubh_get_ucg (UCPI_UBH);
176 if (!ufs_cg_chkmagic(sb, ucg)) {
177 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
178 goto failed;
179 }
180
181 for (i = bit; i < end_bit; i += uspi->s_fpb) {
182 blkno = ufs_fragstoblks(i);
183 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185 }
186 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1);
189 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
190
191 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
192 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
193 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
194 cylno = ufs_cbtocylno(i);
195 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
196 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
197 }
198
199 ubh_mark_buffer_dirty (USPI_UBH);
200 ubh_mark_buffer_dirty (UCPI_UBH);
201 if (sb->s_flags & MS_SYNCHRONOUS) {
096125f3 202 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
1da177e4
LT
203 ubh_wait_on_buffer (UCPI_UBH);
204 }
205
206 if (overflow) {
207 fragment += count;
208 count = overflow;
209 goto do_more;
210 }
211
212 sb->s_dirt = 1;
213 unlock_super (sb);
214 UFSD(("EXIT\n"))
215 return;
216
217failed:
218 unlock_super (sb);
219 UFSD(("EXIT (FAILED)\n"))
220 return;
221}
222
223
224
225#define NULLIFY_FRAGMENTS \
226 for (i = oldcount; i < newcount; i++) { \
227 bh = sb_getblk(sb, result + i); \
228 memset (bh->b_data, 0, sb->s_blocksize); \
229 set_buffer_uptodate(bh); \
230 mark_buffer_dirty (bh); \
231 if (IS_SYNC(inode)) \
232 sync_dirty_buffer(bh); \
233 brelse (bh); \
234 }
235
236unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
237 unsigned goal, unsigned count, int * err )
238{
239 struct super_block * sb;
240 struct ufs_sb_private_info * uspi;
241 struct ufs_super_block_first * usb1;
242 struct buffer_head * bh;
243 unsigned cgno, oldcount, newcount, tmp, request, i, result;
244
245 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
246
247 sb = inode->i_sb;
248 uspi = UFS_SB(sb)->s_uspi;
249 usb1 = ubh_get_usb_first(USPI_UBH);
250 *err = -ENOSPC;
251
252 lock_super (sb);
253
254 tmp = fs32_to_cpu(sb, *p);
255 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
256 ufs_warning (sb, "ufs_new_fragments", "internal warning"
257 " fragment %u, count %u", fragment, count);
258 count = uspi->s_fpb - ufs_fragnum(fragment);
259 }
260 oldcount = ufs_fragnum (fragment);
261 newcount = oldcount + count;
262
263 /*
264 * Somebody else has just allocated our fragments
265 */
266 if (oldcount) {
267 if (!tmp) {
268 ufs_error (sb, "ufs_new_fragments", "internal error, "
269 "fragment %u, tmp %u\n", fragment, tmp);
270 unlock_super (sb);
271 return (unsigned)-1;
272 }
273 if (fragment < UFS_I(inode)->i_lastfrag) {
274 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
275 unlock_super (sb);
276 return 0;
277 }
278 }
279 else {
280 if (tmp) {
281 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
282 unlock_super(sb);
283 return 0;
284 }
285 }
286
287 /*
288 * There is not enough space for user on the device
289 */
290 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
291 unlock_super (sb);
292 UFSD(("EXIT (FAILED)\n"))
293 return 0;
294 }
295
296 if (goal >= uspi->s_size)
297 goal = 0;
298 if (goal == 0)
299 cgno = ufs_inotocg (inode->i_ino);
300 else
301 cgno = ufs_dtog (goal);
302
303 /*
304 * allocate new fragment
305 */
306 if (oldcount == 0) {
307 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
308 if (result) {
309 *p = cpu_to_fs32(sb, result);
310 *err = 0;
311 inode->i_blocks += count << uspi->s_nspfshift;
312 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
313 NULLIFY_FRAGMENTS
314 }
315 unlock_super(sb);
316 UFSD(("EXIT, result %u\n", result))
317 return result;
318 }
319
320 /*
321 * resize block
322 */
323 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
324 if (result) {
325 *err = 0;
326 inode->i_blocks += count << uspi->s_nspfshift;
327 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
328 NULLIFY_FRAGMENTS
329 unlock_super(sb);
330 UFSD(("EXIT, result %u\n", result))
331 return result;
332 }
333
334 /*
335 * allocate new block and move data
336 */
337 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
338 case UFS_OPTSPACE:
339 request = newcount;
340 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
341 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
342 break;
343 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
344 break;
345 default:
346 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
347
348 case UFS_OPTTIME:
349 request = uspi->s_fpb;
350 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
351 (uspi->s_minfree - 2) / 100)
352 break;
353 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
354 break;
355 }
356 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
357 if (result) {
358 for (i = 0; i < oldcount; i++) {
359 bh = sb_bread(sb, tmp + i);
360 if(bh)
361 {
362 clear_buffer_dirty(bh);
363 bh->b_blocknr = result + i;
364 mark_buffer_dirty (bh);
365 if (IS_SYNC(inode))
366 sync_dirty_buffer(bh);
367 brelse (bh);
368 }
369 else
370 {
371 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
372 unlock_super(sb);
373 return 0;
374 }
375 }
376 *p = cpu_to_fs32(sb, result);
377 *err = 0;
378 inode->i_blocks += count << uspi->s_nspfshift;
379 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
380 NULLIFY_FRAGMENTS
381 unlock_super(sb);
382 if (newcount < request)
383 ufs_free_fragments (inode, result + newcount, request - newcount);
384 ufs_free_fragments (inode, tmp, oldcount);
385 UFSD(("EXIT, result %u\n", result))
386 return result;
387 }
388
389 unlock_super(sb);
390 UFSD(("EXIT (FAILED)\n"))
391 return 0;
392}
393
394static unsigned
395ufs_add_fragments (struct inode * inode, unsigned fragment,
396 unsigned oldcount, unsigned newcount, int * err)
397{
398 struct super_block * sb;
399 struct ufs_sb_private_info * uspi;
400 struct ufs_super_block_first * usb1;
401 struct ufs_cg_private_info * ucpi;
402 struct ufs_cylinder_group * ucg;
403 unsigned cgno, fragno, fragoff, count, fragsize, i;
404
405 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
406
407 sb = inode->i_sb;
408 uspi = UFS_SB(sb)->s_uspi;
409 usb1 = ubh_get_usb_first (USPI_UBH);
410 count = newcount - oldcount;
411
412 cgno = ufs_dtog(fragment);
413 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
414 return 0;
415 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
416 return 0;
417 ucpi = ufs_load_cylinder (sb, cgno);
418 if (!ucpi)
419 return 0;
420 ucg = ubh_get_ucg (UCPI_UBH);
421 if (!ufs_cg_chkmagic(sb, ucg)) {
422 ufs_panic (sb, "ufs_add_fragments",
423 "internal error, bad magic number on cg %u", cgno);
424 return 0;
425 }
426
427 fragno = ufs_dtogd (fragment);
428 fragoff = ufs_fragnum (fragno);
429 for (i = oldcount; i < newcount; i++)
430 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
431 return 0;
432 /*
433 * Block can be extended
434 */
435 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
436 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
437 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
438 break;
439 fragsize = i - oldcount;
440 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
441 ufs_panic (sb, "ufs_add_fragments",
442 "internal error or corrupted bitmap on cg %u", cgno);
443 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
444 if (fragsize != count)
445 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
446 for (i = oldcount; i < newcount; i++)
447 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
448 if(DQUOT_ALLOC_BLOCK(inode, count)) {
449 *err = -EDQUOT;
450 return 0;
451 }
452
453 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
454 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
455 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
456
457 ubh_mark_buffer_dirty (USPI_UBH);
458 ubh_mark_buffer_dirty (UCPI_UBH);
459 if (sb->s_flags & MS_SYNCHRONOUS) {
096125f3 460 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
1da177e4
LT
461 ubh_wait_on_buffer (UCPI_UBH);
462 }
463 sb->s_dirt = 1;
464
465 UFSD(("EXIT, fragment %u\n", fragment))
466
467 return fragment;
468}
469
470#define UFS_TEST_FREE_SPACE_CG \
471 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
472 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
473 goto cg_found; \
474 for (k = count; k < uspi->s_fpb; k++) \
475 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
476 goto cg_found;
477
478static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
479 unsigned goal, unsigned count, int * err)
480{
481 struct super_block * sb;
482 struct ufs_sb_private_info * uspi;
483 struct ufs_super_block_first * usb1;
484 struct ufs_cg_private_info * ucpi;
485 struct ufs_cylinder_group * ucg;
486 unsigned oldcg, i, j, k, result, allocsize;
487
488 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
489
490 sb = inode->i_sb;
491 uspi = UFS_SB(sb)->s_uspi;
492 usb1 = ubh_get_usb_first(USPI_UBH);
493 oldcg = cgno;
494
495 /*
496 * 1. searching on preferred cylinder group
497 */
498 UFS_TEST_FREE_SPACE_CG
499
500 /*
501 * 2. quadratic rehash
502 */
503 for (j = 1; j < uspi->s_ncg; j *= 2) {
504 cgno += j;
505 if (cgno >= uspi->s_ncg)
506 cgno -= uspi->s_ncg;
507 UFS_TEST_FREE_SPACE_CG
508 }
509
510 /*
511 * 3. brute force search
512 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
513 */
514 cgno = (oldcg + 1) % uspi->s_ncg;
515 for (j = 2; j < uspi->s_ncg; j++) {
516 cgno++;
517 if (cgno >= uspi->s_ncg)
518 cgno = 0;
519 UFS_TEST_FREE_SPACE_CG
520 }
521
522 UFSD(("EXIT (FAILED)\n"))
523 return 0;
524
525cg_found:
526 ucpi = ufs_load_cylinder (sb, cgno);
527 if (!ucpi)
528 return 0;
529 ucg = ubh_get_ucg (UCPI_UBH);
530 if (!ufs_cg_chkmagic(sb, ucg))
531 ufs_panic (sb, "ufs_alloc_fragments",
532 "internal error, bad magic number on cg %u", cgno);
533 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
534
535 if (count == uspi->s_fpb) {
536 result = ufs_alloccg_block (inode, ucpi, goal, err);
537 if (result == (unsigned)-1)
538 return 0;
539 goto succed;
540 }
541
542 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
543 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
544 break;
545
546 if (allocsize == uspi->s_fpb) {
547 result = ufs_alloccg_block (inode, ucpi, goal, err);
548 if (result == (unsigned)-1)
549 return 0;
550 goal = ufs_dtogd (result);
551 for (i = count; i < uspi->s_fpb; i++)
552 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
553 i = uspi->s_fpb - count;
554 DQUOT_FREE_BLOCK(inode, i);
555
556 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
557 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
558 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
559 fs32_add(sb, &ucg->cg_frsum[i], 1);
560 goto succed;
561 }
562
563 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
564 if (result == (unsigned)-1)
565 return 0;
566 if(DQUOT_ALLOC_BLOCK(inode, count)) {
567 *err = -EDQUOT;
568 return 0;
569 }
570 for (i = 0; i < count; i++)
571 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
572
573 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
574 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
575 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
576 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
577
578 if (count != allocsize)
579 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
580
581succed:
582 ubh_mark_buffer_dirty (USPI_UBH);
583 ubh_mark_buffer_dirty (UCPI_UBH);
584 if (sb->s_flags & MS_SYNCHRONOUS) {
096125f3 585 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
1da177e4
LT
586 ubh_wait_on_buffer (UCPI_UBH);
587 }
588 sb->s_dirt = 1;
589
590 result += cgno * uspi->s_fpg;
591 UFSD(("EXIT3, result %u\n", result))
592 return result;
593}
594
595static unsigned ufs_alloccg_block (struct inode * inode,
596 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
597{
598 struct super_block * sb;
599 struct ufs_sb_private_info * uspi;
600 struct ufs_super_block_first * usb1;
601 struct ufs_cylinder_group * ucg;
602 unsigned result, cylno, blkno;
603
604 UFSD(("ENTER, goal %u\n", goal))
605
606 sb = inode->i_sb;
607 uspi = UFS_SB(sb)->s_uspi;
608 usb1 = ubh_get_usb_first(USPI_UBH);
609 ucg = ubh_get_ucg(UCPI_UBH);
610
611 if (goal == 0) {
612 goal = ucpi->c_rotor;
613 goto norot;
614 }
615 goal = ufs_blknum (goal);
616 goal = ufs_dtogd (goal);
617
618 /*
619 * If the requested block is available, use it.
620 */
621 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
622 result = goal;
623 goto gotit;
624 }
625
626norot:
627 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
628 if (result == (unsigned)-1)
629 return (unsigned)-1;
630 ucpi->c_rotor = result;
631gotit:
632 blkno = ufs_fragstoblks(result);
633 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
634 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
635 ufs_clusteracct (sb, ucpi, blkno, -1);
636 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
637 *err = -EDQUOT;
638 return (unsigned)-1;
639 }
640
641 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
642 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
643 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
644 cylno = ufs_cbtocylno(result);
645 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
646 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
647
648 UFSD(("EXIT, result %u\n", result))
649
650 return result;
651}
652
653static unsigned ufs_bitmap_search (struct super_block * sb,
654 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
655{
656 struct ufs_sb_private_info * uspi;
657 struct ufs_super_block_first * usb1;
658 struct ufs_cylinder_group * ucg;
659 unsigned start, length, location, result;
660 unsigned possition, fragsize, blockmap, mask;
661
662 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
663
664 uspi = UFS_SB(sb)->s_uspi;
665 usb1 = ubh_get_usb_first (USPI_UBH);
666 ucg = ubh_get_ucg(UCPI_UBH);
667
668 if (goal)
669 start = ufs_dtogd(goal) >> 3;
670 else
671 start = ucpi->c_frotor >> 3;
672
673 length = ((uspi->s_fpg + 7) >> 3) - start;
674 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
675 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
676 1 << (count - 1 + (uspi->s_fpb & 7)));
677 if (location == 0) {
678 length = start + 1;
679 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
680 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
681 1 << (count - 1 + (uspi->s_fpb & 7)));
682 if (location == 0) {
683 ufs_error (sb, "ufs_bitmap_search",
684 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
685 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
686 return (unsigned)-1;
687 }
688 start = 0;
689 }
690 result = (start + length - location) << 3;
691 ucpi->c_frotor = result;
692
693 /*
694 * found the byte in the map
695 */
696 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
697 fragsize = 0;
698 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
699 if (blockmap & mask) {
700 if (!(possition & uspi->s_fpbmask))
701 fragsize = 1;
702 else
703 fragsize++;
704 }
705 else {
706 if (fragsize == count) {
707 result += possition - count;
708 UFSD(("EXIT, result %u\n", result))
709 return result;
710 }
711 fragsize = 0;
712 }
713 }
714 if (fragsize == count) {
715 result += possition - count;
716 UFSD(("EXIT, result %u\n", result))
717 return result;
718 }
719 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
720 UFSD(("EXIT (FAILED)\n"))
721 return (unsigned)-1;
722}
723
724static void ufs_clusteracct(struct super_block * sb,
725 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
726{
727 struct ufs_sb_private_info * uspi;
728 int i, start, end, forw, back;
729
730 uspi = UFS_SB(sb)->s_uspi;
731 if (uspi->s_contigsumsize <= 0)
732 return;
733
734 if (cnt > 0)
735 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
736 else
737 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
738
739 /*
740 * Find the size of the cluster going forward.
741 */
742 start = blkno + 1;
743 end = start + uspi->s_contigsumsize;
744 if ( end >= ucpi->c_nclusterblks)
745 end = ucpi->c_nclusterblks;
746 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
747 if (i > end)
748 i = end;
749 forw = i - start;
750
751 /*
752 * Find the size of the cluster going backward.
753 */
754 start = blkno - 1;
755 end = start - uspi->s_contigsumsize;
756 if (end < 0 )
757 end = -1;
758 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
759 if ( i < end)
760 i = end;
761 back = start - i;
762
763 /*
764 * Account for old cluster and the possibly new forward and
765 * back clusters.
766 */
767 i = back + forw + 1;
768 if (i > uspi->s_contigsumsize)
769 i = uspi->s_contigsumsize;
770 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
771 if (back > 0)
772 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
773 if (forw > 0)
774 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
775}
776
777
778static unsigned char ufs_fragtable_8fpb[] = {
779 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
780 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
781 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
782 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
783 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
784 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
785 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
786 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
787 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
788 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
789 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
790 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
791 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
792 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
793 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
794 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
795};
796
797static unsigned char ufs_fragtable_other[] = {
798 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
799 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
800 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
801 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
802 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
803 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
804 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
805 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
806 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
807 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
808 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
809 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
810 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
811 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
812 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
813 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
814};
This page took 0.110618 seconds and 5 git commands to generate.