drbd: Moved the mdev member into drbd_work (from drbd_request and drbd_peer_request)
[deliverable/linux.git] / drivers / block / drbd / drbd_int.h
1 /*
2 drbd_int.h
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26 #ifndef _DRBD_INT_H
27 #define _DRBD_INT_H
28
29 #include <linux/compiler.h>
30 #include <linux/types.h>
31 #include <linux/version.h>
32 #include <linux/list.h>
33 #include <linux/sched.h>
34 #include <linux/bitops.h>
35 #include <linux/slab.h>
36 #include <linux/crypto.h>
37 #include <linux/ratelimit.h>
38 #include <linux/tcp.h>
39 #include <linux/mutex.h>
40 #include <linux/major.h>
41 #include <linux/blkdev.h>
42 #include <linux/genhd.h>
43 #include <linux/idr.h>
44 #include <net/tcp.h>
45 #include <linux/lru_cache.h>
46 #include <linux/prefetch.h>
47 #include <linux/drbd.h>
48 #include "drbd_state.h"
49
50 #ifdef __CHECKER__
51 # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
52 # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
53 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
54 # define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
55 #else
56 # define __protected_by(x)
57 # define __protected_read_by(x)
58 # define __protected_write_by(x)
59 # define __must_hold(x)
60 #endif
61
62 #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0)
63
64 /* module parameter, defined in drbd_main.c */
65 extern unsigned int minor_count;
66 extern int disable_sendpage;
67 extern int allow_oos;
68 extern unsigned int cn_idx;
69
70 #ifdef CONFIG_DRBD_FAULT_INJECTION
71 extern int enable_faults;
72 extern int fault_rate;
73 extern int fault_devs;
74 #endif
75
76 extern char usermode_helper[];
77
78
79 /* I don't remember why XCPU ...
80 * This is used to wake the asender,
81 * and to interrupt sending the sending task
82 * on disconnect.
83 */
84 #define DRBD_SIG SIGXCPU
85
86 /* This is used to stop/restart our threads.
87 * Cannot use SIGTERM nor SIGKILL, since these
88 * are sent out by init on runlevel changes
89 * I choose SIGHUP for now.
90 */
91 #define DRBD_SIGKILL SIGHUP
92
93 #define ID_IN_SYNC (4711ULL)
94 #define ID_OUT_OF_SYNC (4712ULL)
95 #define ID_SYNCER (-1ULL)
96
97 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
98
99 struct drbd_conf;
100 struct drbd_tconn;
101
102
103 /* to shorten dev_warn(DEV, "msg"); and relatives statements */
104 #define DEV (disk_to_dev(mdev->vdisk))
105
106 #define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
107 printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
108 #define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
109 #define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
110 #define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
111 #define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
112 #define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
113 #define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
114 #define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
115
116 #define D_ASSERT(exp) if (!(exp)) \
117 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
118
119 /**
120 * expect - Make an assertion
121 *
122 * Unlike the assert macro, this macro returns a boolean result.
123 */
124 #define expect(exp) ({ \
125 bool _bool = (exp); \
126 if (!_bool) \
127 dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
128 #exp, __func__); \
129 _bool; \
130 })
131
132 /* Defines to control fault insertion */
133 enum {
134 DRBD_FAULT_MD_WR = 0, /* meta data write */
135 DRBD_FAULT_MD_RD = 1, /* read */
136 DRBD_FAULT_RS_WR = 2, /* resync */
137 DRBD_FAULT_RS_RD = 3,
138 DRBD_FAULT_DT_WR = 4, /* data */
139 DRBD_FAULT_DT_RD = 5,
140 DRBD_FAULT_DT_RA = 6, /* data read ahead */
141 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
142 DRBD_FAULT_AL_EE = 8, /* alloc ee */
143 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
144
145 DRBD_FAULT_MAX,
146 };
147
148 extern unsigned int
149 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
150
151 static inline int
152 drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
153 #ifdef CONFIG_DRBD_FAULT_INJECTION
154 return fault_rate &&
155 (enable_faults & (1<<type)) &&
156 _drbd_insert_fault(mdev, type);
157 #else
158 return 0;
159 #endif
160 }
161
162 /* integer division, round _UP_ to the next integer */
163 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
164 /* usual integer division */
165 #define div_floor(A, B) ((A)/(B))
166
167 /* drbd_meta-data.c (still in drbd_main.c) */
168 /* 4th incarnation of the disk layout. */
169 #define DRBD_MD_MAGIC (DRBD_MAGIC+4)
170
171 extern struct drbd_conf **minor_table;
172 extern struct ratelimit_state drbd_ratelimit_state;
173
174 /* on the wire */
175 enum drbd_packet {
176 /* receiver (data socket) */
177 P_DATA = 0x00,
178 P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
179 P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */
180 P_BARRIER = 0x03,
181 P_BITMAP = 0x04,
182 P_BECOME_SYNC_TARGET = 0x05,
183 P_BECOME_SYNC_SOURCE = 0x06,
184 P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */
185 P_DATA_REQUEST = 0x08, /* Used to ask for a data block */
186 P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */
187 P_SYNC_PARAM = 0x0a,
188 P_PROTOCOL = 0x0b,
189 P_UUIDS = 0x0c,
190 P_SIZES = 0x0d,
191 P_STATE = 0x0e,
192 P_SYNC_UUID = 0x0f,
193 P_AUTH_CHALLENGE = 0x10,
194 P_AUTH_RESPONSE = 0x11,
195 P_STATE_CHG_REQ = 0x12,
196
197 /* asender (meta socket */
198 P_PING = 0x13,
199 P_PING_ACK = 0x14,
200 P_RECV_ACK = 0x15, /* Used in protocol B */
201 P_WRITE_ACK = 0x16, /* Used in protocol C */
202 P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
203 P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */
204 P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
205 P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
206 P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
207 P_BARRIER_ACK = 0x1c,
208 P_STATE_CHG_REPLY = 0x1d,
209
210 /* "new" commands, no longer fitting into the ordering scheme above */
211
212 P_OV_REQUEST = 0x1e, /* data socket */
213 P_OV_REPLY = 0x1f,
214 P_OV_RESULT = 0x20, /* meta socket */
215 P_CSUM_RS_REQUEST = 0x21, /* data socket */
216 P_RS_IS_IN_SYNC = 0x22, /* meta socket */
217 P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
218 P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
219 /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
220 /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
221 P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
222 P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
223 P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
224
225 P_MAX_CMD = 0x2A,
226 P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
227 P_MAX_OPT_CMD = 0x101,
228
229 /* special command ids for handshake */
230
231 P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */
232 P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */
233
234 P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */
235 };
236
237 extern const char *cmdname(enum drbd_packet cmd);
238
239 /* for sending/receiving the bitmap,
240 * possibly in some encoding scheme */
241 struct bm_xfer_ctx {
242 /* "const"
243 * stores total bits and long words
244 * of the bitmap, so we don't need to
245 * call the accessor functions over and again. */
246 unsigned long bm_bits;
247 unsigned long bm_words;
248 /* during xfer, current position within the bitmap */
249 unsigned long bit_offset;
250 unsigned long word_offset;
251
252 /* statistics; index: (h->command == P_BITMAP) */
253 unsigned packets[2];
254 unsigned bytes[2];
255 };
256
257 extern void INFO_bm_xfer_stats(struct drbd_conf *mdev,
258 const char *direction, struct bm_xfer_ctx *c);
259
260 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
261 {
262 /* word_offset counts "native long words" (32 or 64 bit),
263 * aligned at 64 bit.
264 * Encoded packet may end at an unaligned bit offset.
265 * In case a fallback clear text packet is transmitted in
266 * between, we adjust this offset back to the last 64bit
267 * aligned "native long word", which makes coding and decoding
268 * the plain text bitmap much more convenient. */
269 #if BITS_PER_LONG == 64
270 c->word_offset = c->bit_offset >> 6;
271 #elif BITS_PER_LONG == 32
272 c->word_offset = c->bit_offset >> 5;
273 c->word_offset &= ~(1UL);
274 #else
275 # error "unsupported BITS_PER_LONG"
276 #endif
277 }
278
279 #ifndef __packed
280 #define __packed __attribute__((packed))
281 #endif
282
283 /* This is the layout for a packet on the wire.
284 * The byteorder is the network byte order.
285 * (except block_id and barrier fields.
286 * these are pointers to local structs
287 * and have no relevance for the partner,
288 * which just echoes them as received.)
289 *
290 * NOTE that the payload starts at a long aligned offset,
291 * regardless of 32 or 64 bit arch!
292 */
293 struct p_header80 {
294 u32 magic;
295 u16 command;
296 u16 length; /* bytes of data after this header */
297 } __packed;
298
299 /* Header for big packets, Used for data packets exceeding 64kB */
300 struct p_header95 {
301 u16 magic; /* use DRBD_MAGIC_BIG here */
302 u16 command;
303 u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */
304 } __packed;
305
306 struct p_header {
307 union {
308 struct p_header80 h80;
309 struct p_header95 h95;
310 };
311 u8 payload[0];
312 };
313
314 /*
315 * short commands, packets without payload, plain p_header:
316 * P_PING
317 * P_PING_ACK
318 * P_BECOME_SYNC_TARGET
319 * P_BECOME_SYNC_SOURCE
320 * P_UNPLUG_REMOTE
321 */
322
323 /*
324 * commands with out-of-struct payload:
325 * P_BITMAP (no additional fields)
326 * P_DATA, P_DATA_REPLY (see p_data)
327 * P_COMPRESSED_BITMAP (see receive_compressed_bitmap)
328 */
329
330 /* these defines must not be changed without changing the protocol version */
331 #define DP_HARDBARRIER 1 /* depricated */
332 #define DP_RW_SYNC 2 /* equals REQ_SYNC */
333 #define DP_MAY_SET_IN_SYNC 4
334 #define DP_UNPLUG 8 /* not used anymore */
335 #define DP_FUA 16 /* equals REQ_FUA */
336 #define DP_FLUSH 32 /* equals REQ_FLUSH */
337 #define DP_DISCARD 64 /* equals REQ_DISCARD */
338
339 struct p_data {
340 struct p_header head;
341 u64 sector; /* 64 bits sector number */
342 u64 block_id; /* to identify the request in protocol B&C */
343 u32 seq_num;
344 u32 dp_flags;
345 } __packed;
346
347 /*
348 * commands which share a struct:
349 * p_block_ack:
350 * P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
351 * P_DISCARD_ACK (proto C, two-primaries conflict detection)
352 * p_block_req:
353 * P_DATA_REQUEST, P_RS_DATA_REQUEST
354 */
355 struct p_block_ack {
356 struct p_header head;
357 u64 sector;
358 u64 block_id;
359 u32 blksize;
360 u32 seq_num;
361 } __packed;
362
363
364 struct p_block_req {
365 struct p_header head;
366 u64 sector;
367 u64 block_id;
368 u32 blksize;
369 u32 pad; /* to multiple of 8 Byte */
370 } __packed;
371
372 /*
373 * commands with their own struct for additional fields:
374 * P_HAND_SHAKE
375 * P_BARRIER
376 * P_BARRIER_ACK
377 * P_SYNC_PARAM
378 * ReportParams
379 */
380
381 struct p_handshake {
382 struct p_header head; /* Note: vnr will be ignored */
383 u32 protocol_min;
384 u32 feature_flags;
385 u32 protocol_max;
386
387 /* should be more than enough for future enhancements
388 * for now, feature_flags and the reserverd array shall be zero.
389 */
390
391 u32 _pad;
392 u64 reserverd[7];
393 } __packed;
394 /* 80 bytes, FIXED for the next century */
395
396 struct p_barrier {
397 struct p_header head;
398 u32 barrier; /* barrier number _handle_ only */
399 u32 pad; /* to multiple of 8 Byte */
400 } __packed;
401
402 struct p_barrier_ack {
403 struct p_header head;
404 u32 barrier;
405 u32 set_size;
406 } __packed;
407
408 struct p_rs_param {
409 struct p_header head;
410 u32 rate;
411
412 /* Since protocol version 88 and higher. */
413 char verify_alg[0];
414 } __packed;
415
416 struct p_rs_param_89 {
417 struct p_header head;
418 u32 rate;
419 /* protocol version 89: */
420 char verify_alg[SHARED_SECRET_MAX];
421 char csums_alg[SHARED_SECRET_MAX];
422 } __packed;
423
424 struct p_rs_param_95 {
425 struct p_header head;
426 u32 rate;
427 char verify_alg[SHARED_SECRET_MAX];
428 char csums_alg[SHARED_SECRET_MAX];
429 u32 c_plan_ahead;
430 u32 c_delay_target;
431 u32 c_fill_target;
432 u32 c_max_rate;
433 } __packed;
434
435 enum drbd_conn_flags {
436 CF_WANT_LOSE = 1,
437 CF_DRY_RUN = 2,
438 };
439
440 struct p_protocol {
441 struct p_header head;
442 u32 protocol;
443 u32 after_sb_0p;
444 u32 after_sb_1p;
445 u32 after_sb_2p;
446 u32 conn_flags;
447 u32 two_primaries;
448
449 /* Since protocol version 87 and higher. */
450 char integrity_alg[0];
451
452 } __packed;
453
454 struct p_uuids {
455 struct p_header head;
456 u64 uuid[UI_EXTENDED_SIZE];
457 } __packed;
458
459 struct p_rs_uuid {
460 struct p_header head;
461 u64 uuid;
462 } __packed;
463
464 struct p_sizes {
465 struct p_header head;
466 u64 d_size; /* size of disk */
467 u64 u_size; /* user requested size */
468 u64 c_size; /* current exported size */
469 u32 max_bio_size; /* Maximal size of a BIO */
470 u16 queue_order_type; /* not yet implemented in DRBD*/
471 u16 dds_flags; /* use enum dds_flags here. */
472 } __packed;
473
474 struct p_state {
475 struct p_header head;
476 u32 state;
477 } __packed;
478
479 struct p_req_state {
480 struct p_header head;
481 u32 mask;
482 u32 val;
483 } __packed;
484
485 struct p_req_state_reply {
486 struct p_header head;
487 u32 retcode;
488 } __packed;
489
490 struct p_drbd06_param {
491 u64 size;
492 u32 state;
493 u32 blksize;
494 u32 protocol;
495 u32 version;
496 u32 gen_cnt[5];
497 u32 bit_map_gen[5];
498 } __packed;
499
500 struct p_discard {
501 struct p_header head;
502 u64 block_id;
503 u32 seq_num;
504 u32 pad;
505 } __packed;
506
507 struct p_block_desc {
508 struct p_header head;
509 u64 sector;
510 u32 blksize;
511 u32 pad; /* to multiple of 8 Byte */
512 } __packed;
513
514 /* Valid values for the encoding field.
515 * Bump proto version when changing this. */
516 enum drbd_bitmap_code {
517 /* RLE_VLI_Bytes = 0,
518 * and other bit variants had been defined during
519 * algorithm evaluation. */
520 RLE_VLI_Bits = 2,
521 };
522
523 struct p_compressed_bm {
524 struct p_header head;
525 /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
526 * (encoding & 0x80): polarity (set/unset) of first runlength
527 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
528 * used to pad up to head.length bytes
529 */
530 u8 encoding;
531
532 u8 code[0];
533 } __packed;
534
535 struct p_delay_probe93 {
536 struct p_header head;
537 u32 seq_num; /* sequence number to match the two probe packets */
538 u32 offset; /* usecs the probe got sent after the reference time point */
539 } __packed;
540
541 /* DCBP: Drbd Compressed Bitmap Packet ... */
542 static inline enum drbd_bitmap_code
543 DCBP_get_code(struct p_compressed_bm *p)
544 {
545 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
546 }
547
548 static inline void
549 DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
550 {
551 BUG_ON(code & ~0xf);
552 p->encoding = (p->encoding & ~0xf) | code;
553 }
554
555 static inline int
556 DCBP_get_start(struct p_compressed_bm *p)
557 {
558 return (p->encoding & 0x80) != 0;
559 }
560
561 static inline void
562 DCBP_set_start(struct p_compressed_bm *p, int set)
563 {
564 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
565 }
566
567 static inline int
568 DCBP_get_pad_bits(struct p_compressed_bm *p)
569 {
570 return (p->encoding >> 4) & 0x7;
571 }
572
573 static inline void
574 DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
575 {
576 BUG_ON(n & ~0x7);
577 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
578 }
579
580 /* one bitmap packet, including the p_header,
581 * should fit within one _architecture independend_ page.
582 * so we need to use the fixed size 4KiB page size
583 * most architectures have used for a long time.
584 */
585 #define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header))
586 #define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
587 #define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
588 #if (PAGE_SIZE < 4096)
589 /* drbd_send_bitmap / receive_bitmap would break horribly */
590 #error "PAGE_SIZE too small"
591 #endif
592
593 union p_polymorph {
594 struct p_header header;
595 struct p_handshake handshake;
596 struct p_data data;
597 struct p_block_ack block_ack;
598 struct p_barrier barrier;
599 struct p_barrier_ack barrier_ack;
600 struct p_rs_param_89 rs_param_89;
601 struct p_rs_param_95 rs_param_95;
602 struct p_protocol protocol;
603 struct p_sizes sizes;
604 struct p_uuids uuids;
605 struct p_state state;
606 struct p_req_state req_state;
607 struct p_req_state_reply req_state_reply;
608 struct p_block_req block_req;
609 struct p_delay_probe93 delay_probe93;
610 struct p_rs_uuid rs_uuid;
611 struct p_block_desc block_desc;
612 } __packed;
613
614 /**********************************************************************/
615 enum drbd_thread_state {
616 NONE,
617 RUNNING,
618 EXITING,
619 RESTARTING
620 };
621
622 struct drbd_thread {
623 spinlock_t t_lock;
624 struct task_struct *task;
625 struct completion stop;
626 enum drbd_thread_state t_state;
627 int (*function) (struct drbd_thread *);
628 struct drbd_conf *mdev;
629 int reset_cpu_mask;
630 char name[9];
631 };
632
633 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
634 {
635 /* THINK testing the t_state seems to be uncritical in all cases
636 * (but thread_{start,stop}), so we can read it *without* the lock.
637 * --lge */
638
639 smp_rmb();
640 return thi->t_state;
641 }
642
643 struct drbd_work;
644 typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
645 struct drbd_work {
646 struct list_head list;
647 drbd_work_cb cb;
648 struct drbd_conf *mdev;
649 };
650
651 #include "drbd_interval.h"
652
653 struct drbd_request {
654 struct drbd_work w;
655
656 /* if local IO is not allowed, will be NULL.
657 * if local IO _is_ allowed, holds the locally submitted bio clone,
658 * or, after local IO completion, the ERR_PTR(error).
659 * see drbd_endio_pri(). */
660 struct bio *private_bio;
661
662 struct drbd_interval i;
663 unsigned int epoch; /* barrier_nr */
664
665 /* barrier_nr: used to check on "completion" whether this req was in
666 * the current epoch, and we therefore have to close it,
667 * starting a new epoch...
668 */
669
670 struct list_head tl_requests; /* ring list in the transfer log */
671 struct bio *master_bio; /* master bio pointer */
672 unsigned long rq_state; /* see comments above _req_mod() */
673 int seq_num;
674 unsigned long start_time;
675 };
676
677 struct drbd_tl_epoch {
678 struct drbd_work w;
679 struct list_head requests; /* requests before */
680 struct drbd_tl_epoch *next; /* pointer to the next barrier */
681 unsigned int br_number; /* the barriers identifier. */
682 int n_writes; /* number of requests attached before this barrier */
683 };
684
685 struct drbd_epoch {
686 struct list_head list;
687 unsigned int barrier_nr;
688 atomic_t epoch_size; /* increased on every request added. */
689 atomic_t active; /* increased on every req. added, and dec on every finished. */
690 unsigned long flags;
691 };
692
693 /* drbd_epoch flag bits */
694 enum {
695 DE_HAVE_BARRIER_NUMBER,
696 };
697
698 enum epoch_event {
699 EV_PUT,
700 EV_GOT_BARRIER_NR,
701 EV_BECAME_LAST,
702 EV_CLEANUP = 32, /* used as flag */
703 };
704
705 struct drbd_wq_barrier {
706 struct drbd_work w;
707 struct completion done;
708 };
709
710 struct digest_info {
711 int digest_size;
712 void *digest;
713 };
714
715 struct drbd_peer_request {
716 struct drbd_work w;
717 struct drbd_epoch *epoch; /* for writes */
718 struct page *pages;
719 atomic_t pending_bios;
720 struct drbd_interval i;
721 /* see comments on ee flag bits below */
722 unsigned long flags;
723 union {
724 u64 block_id;
725 struct digest_info *digest;
726 };
727 };
728
729 /* ee flag bits.
730 * While corresponding bios are in flight, the only modification will be
731 * set_bit WAS_ERROR, which has to be atomic.
732 * If no bios are in flight yet, or all have been completed,
733 * non-atomic modification to ee->flags is ok.
734 */
735 enum {
736 __EE_CALL_AL_COMPLETE_IO,
737 __EE_MAY_SET_IN_SYNC,
738
739 /* In case a barrier failed,
740 * we need to resubmit without the barrier flag. */
741 __EE_RESUBMITTED,
742
743 /* we may have several bios per peer request.
744 * if any of those fail, we set this flag atomically
745 * from the endio callback */
746 __EE_WAS_ERROR,
747
748 /* This ee has a pointer to a digest instead of a block id */
749 __EE_HAS_DIGEST,
750 };
751 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
752 #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
753 #define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
754 #define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
755 #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
756
757 /* flag bits per mdev */
758 enum {
759 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
760 UNPLUG_QUEUED, /* only relevant with kernel 2.4 */
761 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
762 MD_DIRTY, /* current uuids and flags not yet on disk */
763 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
764 CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */
765 CL_ST_CHG_SUCCESS,
766 CL_ST_CHG_FAIL,
767 CRASHED_PRIMARY, /* This node was a crashed primary.
768 * Gets cleared when the state.conn
769 * goes into C_CONNECTED state. */
770 NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
771 CONSIDER_RESYNC,
772
773 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
774 SUSPEND_IO, /* suspend application io */
775 BITMAP_IO, /* suspend application io;
776 once no more io in flight, start bitmap io */
777 BITMAP_IO_QUEUED, /* Started bitmap IO */
778 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
779 WAS_IO_ERROR, /* Local disk failed returned IO error */
780 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
781 CONFIG_PENDING, /* serialization of (re)configuration requests.
782 * if set, also prevents the device from dying */
783 DEVICE_DYING, /* device became unconfigured,
784 * but worker thread is still handling the cleanup.
785 * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed,
786 * while this is set. */
787 RESIZE_PENDING, /* Size change detected locally, waiting for the response from
788 * the peer, if it changed there as well. */
789 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
790 GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
791 NEW_CUR_UUID, /* Create new current UUID when thawing IO */
792 AL_SUSPENDED, /* Activity logging is currently suspended. */
793 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
794 B_RS_H_DONE, /* Before resync handler done (already executed) */
795 };
796
797 struct drbd_bitmap; /* opaque for drbd_conf */
798
799 /* definition of bits in bm_flags to be used in drbd_bm_lock
800 * and drbd_bitmap_io and friends. */
801 enum bm_flag {
802 /* do we need to kfree, or vfree bm_pages? */
803 BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
804
805 /* currently locked for bulk operation */
806 BM_LOCKED_MASK = 0x7,
807
808 /* in detail, that is: */
809 BM_DONT_CLEAR = 0x1,
810 BM_DONT_SET = 0x2,
811 BM_DONT_TEST = 0x4,
812
813 /* (test bit, count bit) allowed (common case) */
814 BM_LOCKED_TEST_ALLOWED = 0x3,
815
816 /* testing bits, as well as setting new bits allowed, but clearing bits
817 * would be unexpected. Used during bitmap receive. Setting new bits
818 * requires sending of "out-of-sync" information, though. */
819 BM_LOCKED_SET_ALLOWED = 0x1,
820
821 /* clear is not expected while bitmap is locked for bulk operation */
822 };
823
824
825 /* TODO sort members for performance
826 * MAYBE group them further */
827
828 /* THINK maybe we actually want to use the default "event/%s" worker threads
829 * or similar in linux 2.6, which uses per cpu data and threads.
830 */
831 struct drbd_work_queue {
832 struct list_head q;
833 struct semaphore s; /* producers up it, worker down()s it */
834 spinlock_t q_lock; /* to protect the list. */
835 };
836
837 struct drbd_socket {
838 struct drbd_work_queue work;
839 struct mutex mutex;
840 struct socket *socket;
841 /* this way we get our
842 * send/receive buffers off the stack */
843 union p_polymorph sbuf;
844 union p_polymorph rbuf;
845 };
846
847 struct drbd_md {
848 u64 md_offset; /* sector offset to 'super' block */
849
850 u64 la_size_sect; /* last agreed size, unit sectors */
851 u64 uuid[UI_SIZE];
852 u64 device_uuid;
853 u32 flags;
854 u32 md_size_sect;
855
856 s32 al_offset; /* signed relative sector offset to al area */
857 s32 bm_offset; /* signed relative sector offset to bitmap */
858
859 /* u32 al_nr_extents; important for restoring the AL
860 * is stored into sync_conf.al_extents, which in turn
861 * gets applied to act_log->nr_elements
862 */
863 };
864
865 /* for sync_conf and other types... */
866 #define NL_PACKET(name, number, fields) struct name { fields };
867 #define NL_INTEGER(pn,pr,member) int member;
868 #define NL_INT64(pn,pr,member) __u64 member;
869 #define NL_BIT(pn,pr,member) unsigned member:1;
870 #define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
871 #include "linux/drbd_nl.h"
872
873 struct drbd_backing_dev {
874 struct block_device *backing_bdev;
875 struct block_device *md_bdev;
876 struct drbd_md md;
877 struct disk_conf dc; /* The user provided config... */
878 sector_t known_size; /* last known size of that backing device */
879 };
880
881 struct drbd_md_io {
882 struct drbd_conf *mdev;
883 struct completion event;
884 int error;
885 };
886
887 struct bm_io_work {
888 struct drbd_work w;
889 char *why;
890 enum bm_flag flags;
891 int (*io_fn)(struct drbd_conf *mdev);
892 void (*done)(struct drbd_conf *mdev, int rv);
893 };
894
895 enum write_ordering_e {
896 WO_none,
897 WO_drain_io,
898 WO_bdev_flush,
899 };
900
901 struct fifo_buffer {
902 int *values;
903 unsigned int head_index;
904 unsigned int size;
905 };
906
907 /* flag bits per tconn */
908 enum {
909 NET_CONGESTED, /* The data socket is congested */
910 DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
911 SEND_PING, /* whether asender should send a ping asap */
912 SIGNAL_ASENDER, /* whether asender wants to be interrupted */
913 };
914
915 struct drbd_tconn { /* is a resource from the config file */
916 char *name; /* Resource name */
917 struct list_head all_tconn; /* List of all drbd_tconn, prot by global_state_lock */
918 struct drbd_conf *volume0; /* TODO: Remove me again */
919 struct idr volumes; /* <tconn, vnr> to mdev mapping */
920
921 unsigned long flags;
922 struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
923 atomic_t net_cnt; /* Users of net_conf */
924 wait_queue_head_t net_cnt_wait;
925
926 struct drbd_socket data; /* data/barrier/cstate/parameter packets */
927 struct drbd_socket meta; /* ping/ack (metadata) packets */
928 int agreed_pro_version; /* actually used protocol version */
929 unsigned long last_received; /* in jiffies, either socket */
930 unsigned int ko_count;
931
932 spinlock_t req_lock;
933 struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
934 struct drbd_tl_epoch *newest_tle;
935 struct drbd_tl_epoch *oldest_tle;
936 struct list_head out_of_sequence_requests;
937
938 struct crypto_hash *cram_hmac_tfm;
939 struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */
940 struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */
941 void *int_dig_out;
942 void *int_dig_in;
943 void *int_dig_vv;
944
945 struct drbd_thread receiver;
946 struct drbd_thread worker;
947 struct drbd_thread asender;
948 cpumask_var_t cpu_mask;
949 };
950
951 struct drbd_conf {
952 struct drbd_tconn *tconn;
953 int vnr; /* volume number within the connection */
954
955 /* things that are stored as / read from meta data on disk */
956 unsigned long flags;
957
958 /* configured by drbdsetup */
959 struct syncer_conf sync_conf;
960 struct drbd_backing_dev *ldev __protected_by(local);
961
962 sector_t p_size; /* partner's disk size */
963 struct request_queue *rq_queue;
964 struct block_device *this_bdev;
965 struct gendisk *vdisk;
966
967 struct drbd_work resync_work,
968 unplug_work,
969 go_diskless,
970 md_sync_work,
971 start_resync_work;
972 struct timer_list resync_timer;
973 struct timer_list md_sync_timer;
974 struct timer_list start_resync_timer;
975 struct timer_list request_timer;
976 #ifdef DRBD_DEBUG_MD_SYNC
977 struct {
978 unsigned int line;
979 const char* func;
980 } last_md_mark_dirty;
981 #endif
982
983 /* Used after attach while negotiating new disk state. */
984 union drbd_state new_state_tmp;
985
986 union drbd_state state;
987 wait_queue_head_t misc_wait;
988 wait_queue_head_t state_wait; /* upon each state change. */
989 unsigned int send_cnt;
990 unsigned int recv_cnt;
991 unsigned int read_cnt;
992 unsigned int writ_cnt;
993 unsigned int al_writ_cnt;
994 unsigned int bm_writ_cnt;
995 atomic_t ap_bio_cnt; /* Requests we need to complete */
996 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
997 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
998 atomic_t unacked_cnt; /* Need to send replys for */
999 atomic_t local_cnt; /* Waiting for local completion */
1000
1001 /* Interval tree of pending local requests */
1002 struct rb_root read_requests;
1003 struct rb_root write_requests;
1004
1005 /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
1006 unsigned long rs_total;
1007 /* number of resync blocks that failed in this run */
1008 unsigned long rs_failed;
1009 /* Syncer's start time [unit jiffies] */
1010 unsigned long rs_start;
1011 /* cumulated time in PausedSyncX state [unit jiffies] */
1012 unsigned long rs_paused;
1013 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */
1014 unsigned long rs_same_csum;
1015 #define DRBD_SYNC_MARKS 8
1016 #define DRBD_SYNC_MARK_STEP (3*HZ)
1017 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
1018 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
1019 /* marks's time [unit jiffies] */
1020 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
1021 /* current index into rs_mark_{left,time} */
1022 int rs_last_mark;
1023
1024 /* where does the admin want us to start? (sector) */
1025 sector_t ov_start_sector;
1026 /* where are we now? (sector) */
1027 sector_t ov_position;
1028 /* Start sector of out of sync range (to merge printk reporting). */
1029 sector_t ov_last_oos_start;
1030 /* size of out-of-sync range in sectors. */
1031 sector_t ov_last_oos_size;
1032 unsigned long ov_left; /* in bits */
1033 struct crypto_hash *csums_tfm;
1034 struct crypto_hash *verify_tfm;
1035
1036 struct drbd_bitmap *bitmap;
1037 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
1038
1039 /* Used to track operations of resync... */
1040 struct lru_cache *resync;
1041 /* Number of locked elements in resync LRU */
1042 unsigned int resync_locked;
1043 /* resync extent number waiting for application requests */
1044 unsigned int resync_wenr;
1045
1046 int open_cnt;
1047 u64 *p_uuid;
1048 struct drbd_epoch *current_epoch;
1049 spinlock_t epoch_lock;
1050 unsigned int epochs;
1051 enum write_ordering_e write_ordering;
1052 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
1053 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
1054 struct list_head done_ee; /* need to send P_WRITE_ACK */
1055 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
1056 struct list_head net_ee; /* zero-copy network send in progress */
1057
1058 int next_barrier_nr;
1059 struct list_head resync_reads;
1060 atomic_t pp_in_use; /* allocated from page pool */
1061 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
1062 wait_queue_head_t ee_wait;
1063 struct page *md_io_page; /* one page buffer for md_io */
1064 struct page *md_io_tmpp; /* for logical_block_size != 512 */
1065 struct mutex md_io_mutex; /* protects the md_io_buffer */
1066 spinlock_t al_lock;
1067 wait_queue_head_t al_wait;
1068 struct lru_cache *act_log; /* activity log */
1069 unsigned int al_tr_number;
1070 int al_tr_cycle;
1071 int al_tr_pos; /* position of the next transaction in the journal */
1072 wait_queue_head_t seq_wait;
1073 atomic_t packet_seq;
1074 unsigned int peer_seq;
1075 spinlock_t peer_seq_lock;
1076 unsigned int minor;
1077 unsigned long comm_bm_set; /* communicated number of set bits. */
1078 struct bm_io_work bm_io_work;
1079 u64 ed_uuid; /* UUID of the exposed data */
1080 struct mutex state_mutex;
1081 char congestion_reason; /* Why we where congested... */
1082 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
1083 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
1084 int rs_last_sect_ev; /* counter to compare with */
1085 int rs_last_events; /* counter of read or write "events" (unit sectors)
1086 * on the lower level device when we last looked. */
1087 int c_sync_rate; /* current resync rate after syncer throttle magic */
1088 struct fifo_buffer rs_plan_s; /* correction values of resync planer */
1089 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
1090 int rs_planed; /* resync sectors already planned */
1091 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
1092 int peer_max_bio_size;
1093 int local_max_bio_size;
1094 };
1095
1096 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
1097 {
1098 struct drbd_conf *mdev;
1099
1100 mdev = minor < minor_count ? minor_table[minor] : NULL;
1101
1102 return mdev;
1103 }
1104
1105 static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
1106 {
1107 return mdev->minor;
1108 }
1109
1110 static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
1111 {
1112 return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
1113 }
1114
1115 /* returns 1 if it was successful,
1116 * returns 0 if there was no data socket.
1117 * so wherever you are going to use the data.socket, e.g. do
1118 * if (!drbd_get_data_sock(mdev->tconn))
1119 * return 0;
1120 * CODE();
1121 * drbd_get_data_sock(mdev->tconn);
1122 */
1123 static inline int drbd_get_data_sock(struct drbd_tconn *tconn)
1124 {
1125 mutex_lock(&tconn->data.mutex);
1126 /* drbd_disconnect() could have called drbd_free_sock()
1127 * while we were waiting in down()... */
1128 if (unlikely(tconn->data.socket == NULL)) {
1129 mutex_unlock(&tconn->data.mutex);
1130 return 0;
1131 }
1132 return 1;
1133 }
1134
1135 static inline void drbd_put_data_sock(struct drbd_tconn *tconn)
1136 {
1137 mutex_unlock(&tconn->data.mutex);
1138 }
1139
1140 /*
1141 * function declarations
1142 *************************/
1143
1144 /* drbd_main.c */
1145
1146 enum dds_flags {
1147 DDSF_FORCED = 1,
1148 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1149 };
1150
1151 extern void drbd_init_set_defaults(struct drbd_conf *mdev);
1152 extern int drbd_thread_start(struct drbd_thread *thi);
1153 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1154 extern char *drbd_task_to_thread_name(struct drbd_conf *mdev, struct task_struct *task);
1155 #ifdef CONFIG_SMP
1156 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1157 extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
1158 #else
1159 #define drbd_thread_current_set_cpu(A) ({})
1160 #define drbd_calc_cpu_mask(A) ({})
1161 #endif
1162 extern void drbd_free_resources(struct drbd_conf *mdev);
1163 extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
1164 unsigned int set_size);
1165 extern void tl_clear(struct drbd_conf *mdev);
1166 extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
1167 extern void drbd_free_sock(struct drbd_tconn *tconn);
1168 extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1169 void *buf, size_t size, unsigned msg_flags);
1170 extern int drbd_send_protocol(struct drbd_tconn *tconn);
1171 extern int drbd_send_uuids(struct drbd_conf *mdev);
1172 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
1173 extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
1174 extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
1175 extern int _drbd_send_state(struct drbd_conf *mdev);
1176 extern int drbd_send_state(struct drbd_conf *mdev);
1177 extern int _conn_send_cmd(struct drbd_tconn *tconn, int vnr, struct socket *sock,
1178 enum drbd_packet cmd, struct p_header *h, size_t size,
1179 unsigned msg_flags);
1180 extern int conn_send_cmd2(struct drbd_tconn *tconn, enum drbd_packet cmd,
1181 char *data, size_t size);
1182 #define USE_DATA_SOCKET 1
1183 #define USE_META_SOCKET 0
1184 extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1185 enum drbd_packet cmd, struct p_header *h, size_t size);
1186 extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
1187 extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
1188 u32 set_size);
1189 extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
1190 struct drbd_peer_request *);
1191 extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1192 struct p_block_req *rp);
1193 extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1194 struct p_data *dp, int data_size);
1195 extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
1196 sector_t sector, int blksize, u64 block_id);
1197 extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
1198 extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
1199 struct drbd_peer_request *);
1200 extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
1201 extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1202 sector_t sector, int size, u64 block_id);
1203 extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
1204 int size, void *digest, int digest_size,
1205 enum drbd_packet cmd);
1206 extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
1207
1208 extern int drbd_send_bitmap(struct drbd_conf *mdev);
1209 extern int _drbd_send_bitmap(struct drbd_conf *mdev);
1210 extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
1211 extern void drbd_free_bc(struct drbd_backing_dev *ldev);
1212 extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
1213 void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
1214
1215 extern void drbd_md_sync(struct drbd_conf *mdev);
1216 extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
1217 extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1218 extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1219 extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
1220 extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
1221 extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
1222 extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
1223 extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
1224 extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1225 #ifndef DRBD_DEBUG_MD_SYNC
1226 extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
1227 #else
1228 #define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
1229 extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
1230 unsigned int line, const char *func);
1231 #endif
1232 extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
1233 int (*io_fn)(struct drbd_conf *),
1234 void (*done)(struct drbd_conf *, int),
1235 char *why, enum bm_flag flags);
1236 extern int drbd_bitmap_io(struct drbd_conf *mdev,
1237 int (*io_fn)(struct drbd_conf *),
1238 char *why, enum bm_flag flags);
1239 extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1240 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1241 extern void drbd_go_diskless(struct drbd_conf *mdev);
1242 extern void drbd_ldev_destroy(struct drbd_conf *mdev);
1243
1244
1245 /* Meta data layout
1246 We reserve a 128MB Block (4k aligned)
1247 * either at the end of the backing device
1248 * or on a separate meta data device. */
1249
1250 #define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
1251 /* The following numbers are sectors */
1252 #define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
1253 #define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */
1254 /* Allows up to about 3.8TB */
1255 #define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
1256
1257 /* Since the smalles IO unit is usually 512 byte */
1258 #define MD_SECTOR_SHIFT 9
1259 #define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT)
1260
1261 /* activity log */
1262 #define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */
1263 #define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */
1264 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1265
1266 #if BITS_PER_LONG == 32
1267 #define LN2_BPL 5
1268 #define cpu_to_lel(A) cpu_to_le32(A)
1269 #define lel_to_cpu(A) le32_to_cpu(A)
1270 #elif BITS_PER_LONG == 64
1271 #define LN2_BPL 6
1272 #define cpu_to_lel(A) cpu_to_le64(A)
1273 #define lel_to_cpu(A) le64_to_cpu(A)
1274 #else
1275 #error "LN2 of BITS_PER_LONG unknown!"
1276 #endif
1277
1278 /* resync bitmap */
1279 /* 16MB sized 'bitmap extent' to track syncer usage */
1280 struct bm_extent {
1281 int rs_left; /* number of bits set (out of sync) in this extent. */
1282 int rs_failed; /* number of failed resync requests in this extent. */
1283 unsigned long flags;
1284 struct lc_element lce;
1285 };
1286
1287 #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */
1288 #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */
1289 #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */
1290
1291 /* drbd_bitmap.c */
1292 /*
1293 * We need to store one bit for a block.
1294 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1295 * Bit 0 ==> local node thinks this block is binary identical on both nodes
1296 * Bit 1 ==> local node thinks this block needs to be synced.
1297 */
1298
1299 #define SLEEP_TIME (HZ/10)
1300
1301 #define BM_BLOCK_SHIFT 12 /* 4k per bit */
1302 #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1303 /* (9+3) : 512 bytes @ 8 bits; representing 16M storage
1304 * per sector of on disk bitmap */
1305 #define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */
1306 #define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1307
1308 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1309 #error "HAVE YOU FIXED drbdmeta AS WELL??"
1310 #endif
1311
1312 /* thus many _storage_ sectors are described by one bit */
1313 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1314 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1315 #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1316
1317 /* bit to represented kilo byte conversion */
1318 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1319
1320 /* in which _bitmap_ extent (resp. sector) the bit for a certain
1321 * _storage_ sector is located in */
1322 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1323
1324 /* how much _storage_ sectors we have per bitmap sector */
1325 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1326 #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1327
1328 /* in one sector of the bitmap, we have this many activity_log extents. */
1329 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1330 #define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
1331
1332 #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
1333 #define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
1334
1335 /* the extent in "PER_EXTENT" below is an activity log extent
1336 * we need that many (long words/bytes) to store the bitmap
1337 * of one AL_EXTENT_SIZE chunk of storage.
1338 * we can store the bitmap for that many AL_EXTENTS within
1339 * one sector of the _on_disk_ bitmap:
1340 * bit 0 bit 37 bit 38 bit (512*8)-1
1341 * ...|........|........|.. // ..|........|
1342 * sect. 0 `296 `304 ^(512*8*8)-1
1343 *
1344 #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1345 #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128
1346 #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4
1347 */
1348
1349 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1350 #define DRBD_MAX_SECTORS_BM \
1351 ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
1352 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
1353 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
1354 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
1355 #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1356 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1357 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1358 #else
1359 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
1360 /* 16 TB in units of sectors */
1361 #if BITS_PER_LONG == 32
1362 /* adjust by one page worth of bitmap,
1363 * so we won't wrap around in drbd_bm_find_next_bit.
1364 * you should use 64bit OS for that much storage, anyways. */
1365 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1366 #else
1367 /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1368 #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1369 /* corresponds to (1UL << 38) bits right now. */
1370 #endif
1371 #endif
1372
1373 #define HT_SHIFT 8
1374 #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
1375 #define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */
1376
1377 #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
1378
1379 extern int drbd_bm_init(struct drbd_conf *mdev);
1380 extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
1381 extern void drbd_bm_cleanup(struct drbd_conf *mdev);
1382 extern void drbd_bm_set_all(struct drbd_conf *mdev);
1383 extern void drbd_bm_clear_all(struct drbd_conf *mdev);
1384 /* set/clear/test only a few bits at a time */
1385 extern int drbd_bm_set_bits(
1386 struct drbd_conf *mdev, unsigned long s, unsigned long e);
1387 extern int drbd_bm_clear_bits(
1388 struct drbd_conf *mdev, unsigned long s, unsigned long e);
1389 extern int drbd_bm_count_bits(
1390 struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
1391 /* bm_set_bits variant for use while holding drbd_bm_lock,
1392 * may process the whole bitmap in one go */
1393 extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
1394 const unsigned long s, const unsigned long e);
1395 extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
1396 extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
1397 extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
1398 extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
1399 extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
1400 extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
1401 unsigned long al_enr);
1402 extern size_t drbd_bm_words(struct drbd_conf *mdev);
1403 extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
1404 extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
1405
1406 #define DRBD_END_OF_BITMAP (~(unsigned long)0)
1407 extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1408 /* bm_find_next variants for use while you hold drbd_bm_lock() */
1409 extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1410 extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
1411 extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev);
1412 extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
1413 extern int drbd_bm_rs_done(struct drbd_conf *mdev);
1414 /* for receive_bitmap */
1415 extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
1416 size_t number, unsigned long *buffer);
1417 /* for _drbd_send_bitmap */
1418 extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
1419 size_t number, unsigned long *buffer);
1420
1421 extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
1422 extern void drbd_bm_unlock(struct drbd_conf *mdev);
1423 /* drbd_main.c */
1424
1425 extern struct kmem_cache *drbd_request_cache;
1426 extern struct kmem_cache *drbd_ee_cache; /* peer requests */
1427 extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
1428 extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
1429 extern mempool_t *drbd_request_mempool;
1430 extern mempool_t *drbd_ee_mempool;
1431
1432 extern struct page *drbd_pp_pool; /* drbd's page pool */
1433 extern spinlock_t drbd_pp_lock;
1434 extern int drbd_pp_vacant;
1435 extern wait_queue_head_t drbd_pp_wait;
1436
1437 extern rwlock_t global_state_lock;
1438
1439 extern struct drbd_conf *drbd_new_device(unsigned int minor);
1440 extern void drbd_free_mdev(struct drbd_conf *mdev);
1441
1442 struct drbd_tconn *drbd_new_tconn(char *name);
1443 extern void drbd_free_tconn(struct drbd_tconn *tconn);
1444
1445 extern int proc_details;
1446
1447 /* drbd_req */
1448 extern int drbd_make_request(struct request_queue *q, struct bio *bio);
1449 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
1450 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1451 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1452
1453
1454 /* drbd_nl.c */
1455 extern void drbd_suspend_io(struct drbd_conf *mdev);
1456 extern void drbd_resume_io(struct drbd_conf *mdev);
1457 extern char *ppsize(char *buf, unsigned long long size);
1458 extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
1459 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
1460 extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
1461 extern void resync_after_online_grow(struct drbd_conf *);
1462 extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
1463 extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
1464 enum drbd_role new_role,
1465 int force);
1466 extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
1467 extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
1468 extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
1469
1470 /* drbd_worker.c */
1471 extern int drbd_worker(struct drbd_thread *thi);
1472 extern int drbd_alter_sa(struct drbd_conf *mdev, int na);
1473 extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
1474 extern void resume_next_sg(struct drbd_conf *mdev);
1475 extern void suspend_other_sg(struct drbd_conf *mdev);
1476 extern int drbd_resync_finished(struct drbd_conf *mdev);
1477 /* maybe rather drbd_main.c ? */
1478 extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
1479 struct drbd_backing_dev *bdev, sector_t sector, int rw);
1480 extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
1481 extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
1482
1483 static inline void ov_oos_print(struct drbd_conf *mdev)
1484 {
1485 if (mdev->ov_last_oos_size) {
1486 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
1487 (unsigned long long)mdev->ov_last_oos_start,
1488 (unsigned long)mdev->ov_last_oos_size);
1489 }
1490 mdev->ov_last_oos_size=0;
1491 }
1492
1493
1494 extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
1495 extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
1496 struct drbd_peer_request *, void *);
1497 /* worker callbacks */
1498 extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
1499 extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
1500 extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int);
1501 extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int);
1502 extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
1503 extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
1504 extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
1505 extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
1506 extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int);
1507 extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
1508 extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
1509 extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
1510 extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
1511 extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
1512 extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
1513 extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
1514 extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
1515 extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int);
1516 extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int);
1517
1518 extern void resync_timer_fn(unsigned long data);
1519 extern void start_resync_timer_fn(unsigned long data);
1520
1521 /* drbd_receiver.c */
1522 extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
1523 extern int drbd_submit_ee(struct drbd_conf *, struct drbd_peer_request *,
1524 const unsigned, const int);
1525 extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
1526 extern struct drbd_peer_request *drbd_alloc_ee(struct drbd_conf *,
1527 u64, sector_t, unsigned int,
1528 gfp_t) __must_hold(local);
1529 extern void drbd_free_some_ee(struct drbd_conf *, struct drbd_peer_request *,
1530 int);
1531 #define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
1532 #define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
1533 extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
1534 struct list_head *head);
1535 extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
1536 struct list_head *head);
1537 extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
1538 extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
1539 extern void drbd_flush_workqueue(struct drbd_conf *mdev);
1540
1541 /* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
1542 * mess with get_fs/set_fs, we know we are KERNEL_DS always. */
1543 static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
1544 char __user *optval, int optlen)
1545 {
1546 int err;
1547 if (level == SOL_SOCKET)
1548 err = sock_setsockopt(sock, level, optname, optval, optlen);
1549 else
1550 err = sock->ops->setsockopt(sock, level, optname, optval,
1551 optlen);
1552 return err;
1553 }
1554
1555 static inline void drbd_tcp_cork(struct socket *sock)
1556 {
1557 int __user val = 1;
1558 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1559 (char __user *)&val, sizeof(val));
1560 }
1561
1562 static inline void drbd_tcp_uncork(struct socket *sock)
1563 {
1564 int __user val = 0;
1565 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1566 (char __user *)&val, sizeof(val));
1567 }
1568
1569 static inline void drbd_tcp_nodelay(struct socket *sock)
1570 {
1571 int __user val = 1;
1572 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1573 (char __user *)&val, sizeof(val));
1574 }
1575
1576 static inline void drbd_tcp_quickack(struct socket *sock)
1577 {
1578 int __user val = 2;
1579 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1580 (char __user *)&val, sizeof(val));
1581 }
1582
1583 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
1584
1585 /* drbd_proc.c */
1586 extern struct proc_dir_entry *drbd_proc;
1587 extern const struct file_operations drbd_proc_fops;
1588 extern const char *drbd_conn_str(enum drbd_conns s);
1589 extern const char *drbd_role_str(enum drbd_role s);
1590
1591 /* drbd_actlog.c */
1592 extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector);
1593 extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector);
1594 extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
1595 extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1596 extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1597 extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
1598 extern int drbd_rs_del_all(struct drbd_conf *mdev);
1599 extern void drbd_rs_failed_io(struct drbd_conf *mdev,
1600 sector_t sector, int size);
1601 extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
1602 extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
1603 extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
1604 int size, const char *file, const unsigned int line);
1605 #define drbd_set_in_sync(mdev, sector, size) \
1606 __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
1607 extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
1608 int size, const char *file, const unsigned int line);
1609 #define drbd_set_out_of_sync(mdev, sector, size) \
1610 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
1611 extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
1612 extern void drbd_al_shrink(struct drbd_conf *mdev);
1613
1614
1615 /* drbd_nl.c */
1616
1617 void drbd_nl_cleanup(void);
1618 int __init drbd_nl_init(void);
1619 void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
1620 void drbd_bcast_sync_progress(struct drbd_conf *mdev);
1621 void drbd_bcast_ee(struct drbd_conf *, const char *, const int, const char *,
1622 const char *, const struct drbd_peer_request *);
1623
1624
1625 /*
1626 * inline helper functions
1627 *************************/
1628
1629 /* see also page_chain_add and friends in drbd_receiver.c */
1630 static inline struct page *page_chain_next(struct page *page)
1631 {
1632 return (struct page *)page_private(page);
1633 }
1634 #define page_chain_for_each(page) \
1635 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1636 page = page_chain_next(page))
1637 #define page_chain_for_each_safe(page, n) \
1638 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1639
1640 static inline int drbd_bio_has_active_page(struct bio *bio)
1641 {
1642 struct bio_vec *bvec;
1643 int i;
1644
1645 __bio_for_each_segment(bvec, bio, i, 0) {
1646 if (page_count(bvec->bv_page) > 1)
1647 return 1;
1648 }
1649
1650 return 0;
1651 }
1652
1653 static inline int drbd_ee_has_active_page(struct drbd_peer_request *peer_req)
1654 {
1655 struct page *page = peer_req->pages;
1656 page_chain_for_each(page) {
1657 if (page_count(page) > 1)
1658 return 1;
1659 }
1660 return 0;
1661 }
1662
1663
1664
1665
1666
1667
1668 static inline void drbd_state_lock(struct drbd_conf *mdev)
1669 {
1670 wait_event(mdev->misc_wait,
1671 !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
1672 }
1673
1674 static inline void drbd_state_unlock(struct drbd_conf *mdev)
1675 {
1676 clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
1677 wake_up(&mdev->misc_wait);
1678 }
1679
1680 static inline enum drbd_state_rv
1681 _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1682 enum chg_state_flags flags, struct completion *done)
1683 {
1684 enum drbd_state_rv rv;
1685
1686 read_lock(&global_state_lock);
1687 rv = __drbd_set_state(mdev, ns, flags, done);
1688 read_unlock(&global_state_lock);
1689
1690 return rv;
1691 }
1692
1693 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1694 static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
1695 {
1696 switch (mdev->ldev->dc.on_io_error) {
1697 case EP_PASS_ON:
1698 if (!forcedetach) {
1699 if (__ratelimit(&drbd_ratelimit_state))
1700 dev_err(DEV, "Local IO failed in %s.\n", where);
1701 if (mdev->state.disk > D_INCONSISTENT)
1702 _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
1703 break;
1704 }
1705 /* NOTE fall through to detach case if forcedetach set */
1706 case EP_DETACH:
1707 case EP_CALL_HELPER:
1708 set_bit(WAS_IO_ERROR, &mdev->flags);
1709 if (mdev->state.disk > D_FAILED) {
1710 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1711 dev_err(DEV,
1712 "Local IO failed in %s. Detaching...\n", where);
1713 }
1714 break;
1715 }
1716 }
1717
1718 /**
1719 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1720 * @mdev: DRBD device.
1721 * @error: Error code passed to the IO completion callback
1722 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1723 *
1724 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1725 */
1726 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1727 static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
1728 int error, int forcedetach, const char *where)
1729 {
1730 if (error) {
1731 unsigned long flags;
1732 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1733 __drbd_chk_io_error_(mdev, forcedetach, where);
1734 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1735 }
1736 }
1737
1738
1739 /**
1740 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1741 * @bdev: Meta data block device.
1742 *
1743 * BTW, for internal meta data, this happens to be the maximum capacity
1744 * we could agree upon with our peer node.
1745 */
1746 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1747 {
1748 switch (bdev->dc.meta_dev_idx) {
1749 case DRBD_MD_INDEX_INTERNAL:
1750 case DRBD_MD_INDEX_FLEX_INT:
1751 return bdev->md.md_offset + bdev->md.bm_offset;
1752 case DRBD_MD_INDEX_FLEX_EXT:
1753 default:
1754 return bdev->md.md_offset;
1755 }
1756 }
1757
1758 /**
1759 * drbd_md_last_sector() - Return the last sector number of the meta data area
1760 * @bdev: Meta data block device.
1761 */
1762 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1763 {
1764 switch (bdev->dc.meta_dev_idx) {
1765 case DRBD_MD_INDEX_INTERNAL:
1766 case DRBD_MD_INDEX_FLEX_INT:
1767 return bdev->md.md_offset + MD_AL_OFFSET - 1;
1768 case DRBD_MD_INDEX_FLEX_EXT:
1769 default:
1770 return bdev->md.md_offset + bdev->md.md_size_sect;
1771 }
1772 }
1773
1774 /* Returns the number of 512 byte sectors of the device */
1775 static inline sector_t drbd_get_capacity(struct block_device *bdev)
1776 {
1777 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1778 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1779 }
1780
1781 /**
1782 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1783 * @bdev: Meta data block device.
1784 *
1785 * returns the capacity we announce to out peer. we clip ourselves at the
1786 * various MAX_SECTORS, because if we don't, current implementation will
1787 * oops sooner or later
1788 */
1789 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1790 {
1791 sector_t s;
1792 switch (bdev->dc.meta_dev_idx) {
1793 case DRBD_MD_INDEX_INTERNAL:
1794 case DRBD_MD_INDEX_FLEX_INT:
1795 s = drbd_get_capacity(bdev->backing_bdev)
1796 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1797 drbd_md_first_sector(bdev))
1798 : 0;
1799 break;
1800 case DRBD_MD_INDEX_FLEX_EXT:
1801 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1802 drbd_get_capacity(bdev->backing_bdev));
1803 /* clip at maximum size the meta device can support */
1804 s = min_t(sector_t, s,
1805 BM_EXT_TO_SECT(bdev->md.md_size_sect
1806 - bdev->md.bm_offset));
1807 break;
1808 default:
1809 s = min_t(sector_t, DRBD_MAX_SECTORS,
1810 drbd_get_capacity(bdev->backing_bdev));
1811 }
1812 return s;
1813 }
1814
1815 /**
1816 * drbd_md_ss__() - Return the sector number of our meta data super block
1817 * @mdev: DRBD device.
1818 * @bdev: Meta data block device.
1819 */
1820 static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
1821 struct drbd_backing_dev *bdev)
1822 {
1823 switch (bdev->dc.meta_dev_idx) {
1824 default: /* external, some index */
1825 return MD_RESERVED_SECT * bdev->dc.meta_dev_idx;
1826 case DRBD_MD_INDEX_INTERNAL:
1827 /* with drbd08, internal meta data is always "flexible" */
1828 case DRBD_MD_INDEX_FLEX_INT:
1829 /* sizeof(struct md_on_disk_07) == 4k
1830 * position: last 4k aligned block of 4k size */
1831 if (!bdev->backing_bdev) {
1832 if (__ratelimit(&drbd_ratelimit_state)) {
1833 dev_err(DEV, "bdev->backing_bdev==NULL\n");
1834 dump_stack();
1835 }
1836 return 0;
1837 }
1838 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL)
1839 - MD_AL_OFFSET;
1840 case DRBD_MD_INDEX_FLEX_EXT:
1841 return 0;
1842 }
1843 }
1844
1845 static inline void
1846 drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
1847 {
1848 unsigned long flags;
1849 spin_lock_irqsave(&q->q_lock, flags);
1850 list_add(&w->list, &q->q);
1851 up(&q->s); /* within the spinlock,
1852 see comment near end of drbd_worker() */
1853 spin_unlock_irqrestore(&q->q_lock, flags);
1854 }
1855
1856 static inline void
1857 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1858 {
1859 unsigned long flags;
1860 spin_lock_irqsave(&q->q_lock, flags);
1861 list_add_tail(&w->list, &q->q);
1862 up(&q->s); /* within the spinlock,
1863 see comment near end of drbd_worker() */
1864 spin_unlock_irqrestore(&q->q_lock, flags);
1865 }
1866
1867 static inline void wake_asender(struct drbd_tconn *tconn)
1868 {
1869 if (test_bit(SIGNAL_ASENDER, &tconn->flags))
1870 force_sig(DRBD_SIG, tconn->asender.task);
1871 }
1872
1873 static inline void request_ping(struct drbd_tconn *tconn)
1874 {
1875 set_bit(SEND_PING, &tconn->flags);
1876 wake_asender(tconn);
1877 }
1878
1879 static inline int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1880 enum drbd_packet cmd, struct p_header *h, size_t size,
1881 unsigned msg_flags)
1882 {
1883 return _conn_send_cmd(mdev->tconn, mdev->vnr, sock, cmd, h, size, msg_flags);
1884 }
1885
1886 static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
1887 enum drbd_packet cmd)
1888 {
1889 struct p_header h;
1890 return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
1891 }
1892
1893 static inline int drbd_send_ping(struct drbd_conf *mdev)
1894 {
1895 struct p_header h;
1896 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
1897 }
1898
1899 static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
1900 {
1901 struct p_header h;
1902 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
1903 }
1904
1905 static inline void drbd_thread_stop(struct drbd_thread *thi)
1906 {
1907 _drbd_thread_stop(thi, false, true);
1908 }
1909
1910 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1911 {
1912 _drbd_thread_stop(thi, false, false);
1913 }
1914
1915 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1916 {
1917 _drbd_thread_stop(thi, true, false);
1918 }
1919
1920 /* counts how many answer packets packets we expect from our peer,
1921 * for either explicit application requests,
1922 * or implicit barrier packets as necessary.
1923 * increased:
1924 * w_send_barrier
1925 * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
1926 * it is much easier and equally valid to count what we queue for the
1927 * worker, even before it actually was queued or send.
1928 * (drbd_make_request_common; recovery path on read io-error)
1929 * decreased:
1930 * got_BarrierAck (respective tl_clear, tl_clear_barrier)
1931 * _req_mod(req, DATA_RECEIVED)
1932 * [from receive_DataReply]
1933 * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
1934 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
1935 * for some reason it is NOT decreased in got_NegAck,
1936 * but in the resulting cleanup code from report_params.
1937 * we should try to remember the reason for that...
1938 * _req_mod(req, SEND_FAILED or SEND_CANCELED)
1939 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
1940 * [from tl_clear_barrier]
1941 */
1942 static inline void inc_ap_pending(struct drbd_conf *mdev)
1943 {
1944 atomic_inc(&mdev->ap_pending_cnt);
1945 }
1946
1947 #define ERR_IF_CNT_IS_NEGATIVE(which) \
1948 if (atomic_read(&mdev->which) < 0) \
1949 dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
1950 __func__ , __LINE__ , \
1951 atomic_read(&mdev->which))
1952
1953 #define dec_ap_pending(mdev) do { \
1954 typecheck(struct drbd_conf *, mdev); \
1955 if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \
1956 wake_up(&mdev->misc_wait); \
1957 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
1958
1959 /* counts how many resync-related answers we still expect from the peer
1960 * increase decrease
1961 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
1962 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
1963 * (or P_NEG_ACK with ID_SYNCER)
1964 */
1965 static inline void inc_rs_pending(struct drbd_conf *mdev)
1966 {
1967 atomic_inc(&mdev->rs_pending_cnt);
1968 }
1969
1970 #define dec_rs_pending(mdev) do { \
1971 typecheck(struct drbd_conf *, mdev); \
1972 atomic_dec(&mdev->rs_pending_cnt); \
1973 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
1974
1975 /* counts how many answers we still need to send to the peer.
1976 * increased on
1977 * receive_Data unless protocol A;
1978 * we need to send a P_RECV_ACK (proto B)
1979 * or P_WRITE_ACK (proto C)
1980 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
1981 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
1982 * receive_Barrier_* we need to send a P_BARRIER_ACK
1983 */
1984 static inline void inc_unacked(struct drbd_conf *mdev)
1985 {
1986 atomic_inc(&mdev->unacked_cnt);
1987 }
1988
1989 #define dec_unacked(mdev) do { \
1990 typecheck(struct drbd_conf *, mdev); \
1991 atomic_dec(&mdev->unacked_cnt); \
1992 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
1993
1994 #define sub_unacked(mdev, n) do { \
1995 typecheck(struct drbd_conf *, mdev); \
1996 atomic_sub(n, &mdev->unacked_cnt); \
1997 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
1998
1999
2000 static inline void put_net_conf(struct drbd_tconn *tconn)
2001 {
2002 if (atomic_dec_and_test(&tconn->net_cnt))
2003 wake_up(&tconn->net_cnt_wait);
2004 }
2005
2006 /**
2007 * get_net_conf() - Increase ref count on mdev->tconn->net_conf; Returns 0 if nothing there
2008 * @mdev: DRBD device.
2009 *
2010 * You have to call put_net_conf() when finished working with mdev->tconn->net_conf.
2011 */
2012 static inline int get_net_conf(struct drbd_tconn *tconn)
2013 {
2014 int have_net_conf;
2015
2016 atomic_inc(&tconn->net_cnt);
2017 have_net_conf = tconn->volume0->state.conn >= C_UNCONNECTED;
2018 if (!have_net_conf)
2019 put_net_conf(tconn);
2020 return have_net_conf;
2021 }
2022
2023 /**
2024 * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev
2025 * @M: DRBD device.
2026 *
2027 * You have to call put_ldev() when finished working with mdev->ldev.
2028 */
2029 #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
2030 #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
2031
2032 static inline void put_ldev(struct drbd_conf *mdev)
2033 {
2034 int i = atomic_dec_return(&mdev->local_cnt);
2035
2036 /* This may be called from some endio handler,
2037 * so we must not sleep here. */
2038
2039 __release(local);
2040 D_ASSERT(i >= 0);
2041 if (i == 0) {
2042 if (mdev->state.disk == D_DISKLESS)
2043 /* even internal references gone, safe to destroy */
2044 drbd_ldev_destroy(mdev);
2045 if (mdev->state.disk == D_FAILED)
2046 /* all application IO references gone. */
2047 drbd_go_diskless(mdev);
2048 wake_up(&mdev->misc_wait);
2049 }
2050 }
2051
2052 #ifndef __CHECKER__
2053 static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
2054 {
2055 int io_allowed;
2056
2057 /* never get a reference while D_DISKLESS */
2058 if (mdev->state.disk == D_DISKLESS)
2059 return 0;
2060
2061 atomic_inc(&mdev->local_cnt);
2062 io_allowed = (mdev->state.disk >= mins);
2063 if (!io_allowed)
2064 put_ldev(mdev);
2065 return io_allowed;
2066 }
2067 #else
2068 extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
2069 #endif
2070
2071 /* you must have an "get_ldev" reference */
2072 static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
2073 unsigned long *bits_left, unsigned int *per_mil_done)
2074 {
2075 /* this is to break it at compile time when we change that, in case we
2076 * want to support more than (1<<32) bits on a 32bit arch. */
2077 typecheck(unsigned long, mdev->rs_total);
2078
2079 /* note: both rs_total and rs_left are in bits, i.e. in
2080 * units of BM_BLOCK_SIZE.
2081 * for the percentage, we don't care. */
2082
2083 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2084 *bits_left = mdev->ov_left;
2085 else
2086 *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2087 /* >> 10 to prevent overflow,
2088 * +1 to prevent division by zero */
2089 if (*bits_left > mdev->rs_total) {
2090 /* doh. maybe a logic bug somewhere.
2091 * may also be just a race condition
2092 * between this and a disconnect during sync.
2093 * for now, just prevent in-kernel buffer overflow.
2094 */
2095 smp_rmb();
2096 dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
2097 drbd_conn_str(mdev->state.conn),
2098 *bits_left, mdev->rs_total, mdev->rs_failed);
2099 *per_mil_done = 0;
2100 } else {
2101 /* Make sure the division happens in long context.
2102 * We allow up to one petabyte storage right now,
2103 * at a granularity of 4k per bit that is 2**38 bits.
2104 * After shift right and multiplication by 1000,
2105 * this should still fit easily into a 32bit long,
2106 * so we don't need a 64bit division on 32bit arch.
2107 * Note: currently we don't support such large bitmaps on 32bit
2108 * arch anyways, but no harm done to be prepared for it here.
2109 */
2110 unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
2111 unsigned long left = *bits_left >> shift;
2112 unsigned long total = 1UL + (mdev->rs_total >> shift);
2113 unsigned long tmp = 1000UL - left * 1000UL/total;
2114 *per_mil_done = tmp;
2115 }
2116 }
2117
2118
2119 /* this throttles on-the-fly application requests
2120 * according to max_buffers settings;
2121 * maybe re-implement using semaphores? */
2122 static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
2123 {
2124 int mxb = 1000000; /* arbitrary limit on open requests */
2125 if (get_net_conf(mdev->tconn)) {
2126 mxb = mdev->tconn->net_conf->max_buffers;
2127 put_net_conf(mdev->tconn);
2128 }
2129 return mxb;
2130 }
2131
2132 static inline int drbd_state_is_stable(struct drbd_conf *mdev)
2133 {
2134 union drbd_state s = mdev->state;
2135
2136 /* DO NOT add a default clause, we want the compiler to warn us
2137 * for any newly introduced state we may have forgotten to add here */
2138
2139 switch ((enum drbd_conns)s.conn) {
2140 /* new io only accepted when there is no connection, ... */
2141 case C_STANDALONE:
2142 case C_WF_CONNECTION:
2143 /* ... or there is a well established connection. */
2144 case C_CONNECTED:
2145 case C_SYNC_SOURCE:
2146 case C_SYNC_TARGET:
2147 case C_VERIFY_S:
2148 case C_VERIFY_T:
2149 case C_PAUSED_SYNC_S:
2150 case C_PAUSED_SYNC_T:
2151 case C_AHEAD:
2152 case C_BEHIND:
2153 /* transitional states, IO allowed */
2154 case C_DISCONNECTING:
2155 case C_UNCONNECTED:
2156 case C_TIMEOUT:
2157 case C_BROKEN_PIPE:
2158 case C_NETWORK_FAILURE:
2159 case C_PROTOCOL_ERROR:
2160 case C_TEAR_DOWN:
2161 case C_WF_REPORT_PARAMS:
2162 case C_STARTING_SYNC_S:
2163 case C_STARTING_SYNC_T:
2164 break;
2165
2166 /* Allow IO in BM exchange states with new protocols */
2167 case C_WF_BITMAP_S:
2168 if (mdev->tconn->agreed_pro_version < 96)
2169 return 0;
2170 break;
2171
2172 /* no new io accepted in these states */
2173 case C_WF_BITMAP_T:
2174 case C_WF_SYNC_UUID:
2175 case C_MASK:
2176 /* not "stable" */
2177 return 0;
2178 }
2179
2180 switch ((enum drbd_disk_state)s.disk) {
2181 case D_DISKLESS:
2182 case D_INCONSISTENT:
2183 case D_OUTDATED:
2184 case D_CONSISTENT:
2185 case D_UP_TO_DATE:
2186 /* disk state is stable as well. */
2187 break;
2188
2189 /* no new io accepted during tansitional states */
2190 case D_ATTACHING:
2191 case D_FAILED:
2192 case D_NEGOTIATING:
2193 case D_UNKNOWN:
2194 case D_MASK:
2195 /* not "stable" */
2196 return 0;
2197 }
2198
2199 return 1;
2200 }
2201
2202 static inline int is_susp(union drbd_state s)
2203 {
2204 return s.susp || s.susp_nod || s.susp_fen;
2205 }
2206
2207 static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
2208 {
2209 int mxb = drbd_get_max_buffers(mdev);
2210
2211 if (is_susp(mdev->state))
2212 return false;
2213 if (test_bit(SUSPEND_IO, &mdev->flags))
2214 return false;
2215
2216 /* to avoid potential deadlock or bitmap corruption,
2217 * in various places, we only allow new application io
2218 * to start during "stable" states. */
2219
2220 /* no new io accepted when attaching or detaching the disk */
2221 if (!drbd_state_is_stable(mdev))
2222 return false;
2223
2224 /* since some older kernels don't have atomic_add_unless,
2225 * and we are within the spinlock anyways, we have this workaround. */
2226 if (atomic_read(&mdev->ap_bio_cnt) > mxb)
2227 return false;
2228 if (test_bit(BITMAP_IO, &mdev->flags))
2229 return false;
2230 return true;
2231 }
2232
2233 static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
2234 {
2235 bool rv = false;
2236
2237 spin_lock_irq(&mdev->tconn->req_lock);
2238 rv = may_inc_ap_bio(mdev);
2239 if (rv)
2240 atomic_add(count, &mdev->ap_bio_cnt);
2241 spin_unlock_irq(&mdev->tconn->req_lock);
2242
2243 return rv;
2244 }
2245
2246 static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
2247 {
2248 /* we wait here
2249 * as long as the device is suspended
2250 * until the bitmap is no longer on the fly during connection
2251 * handshake as long as we would exeed the max_buffer limit.
2252 *
2253 * to avoid races with the reconnect code,
2254 * we need to atomic_inc within the spinlock. */
2255
2256 wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count));
2257 }
2258
2259 static inline void dec_ap_bio(struct drbd_conf *mdev)
2260 {
2261 int mxb = drbd_get_max_buffers(mdev);
2262 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
2263
2264 D_ASSERT(ap_bio >= 0);
2265 /* this currently does wake_up for every dec_ap_bio!
2266 * maybe rather introduce some type of hysteresis?
2267 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2268 if (ap_bio < mxb)
2269 wake_up(&mdev->misc_wait);
2270 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2271 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2272 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
2273 }
2274 }
2275
2276 static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
2277 {
2278 int changed = mdev->ed_uuid != val;
2279 mdev->ed_uuid = val;
2280 return changed;
2281 }
2282
2283 static inline int drbd_queue_order_type(struct drbd_conf *mdev)
2284 {
2285 /* sorry, we currently have no working implementation
2286 * of distributed TCQ stuff */
2287 #ifndef QUEUE_ORDERED_NONE
2288 #define QUEUE_ORDERED_NONE 0
2289 #endif
2290 return QUEUE_ORDERED_NONE;
2291 }
2292
2293 static inline void drbd_md_flush(struct drbd_conf *mdev)
2294 {
2295 int r;
2296
2297 if (test_bit(MD_NO_FUA, &mdev->flags))
2298 return;
2299
2300 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2301 if (r) {
2302 set_bit(MD_NO_FUA, &mdev->flags);
2303 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2304 }
2305 }
2306
2307 #endif
This page took 0.29305 seconds and 5 git commands to generate.