libceph: allocate dummy osdmap in ceph_osdc_init()
[deliverable/linux.git] / include / linux / ceph / osdmap.h
CommitLineData
f24e9980
SW
1#ifndef _FS_CEPH_OSDMAP_H
2#define _FS_CEPH_OSDMAP_H
3
4#include <linux/rbtree.h>
a1ce3928 5#include <linux/ceph/types.h>
ef4859d6 6#include <linux/ceph/decode.h>
a1ce3928 7#include <linux/ceph/ceph_fs.h>
3d14c5d2 8#include <linux/crush/crush.h>
f24e9980
SW
9
10/*
11 * The osd map describes the current membership of the osd cluster and
12 * specifies the mapping of objects to placement groups and placement
13 * groups to (sets of) osds. That is, it completely specifies the
14 * (desired) distribution of all data objects in the system at some
15 * point in time.
16 *
17 * Each map version is identified by an epoch, which increases monotonically.
18 *
19 * The map can be updated either via an incremental map (diff) describing
20 * the change between two successive epochs, or as a fully encoded map.
21 */
5b191d99
SW
22struct ceph_pg {
23 uint64_t pool;
24 uint32_t seed;
25};
26
f984cb76
ID
27int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs);
28
04812acf
ID
29#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
30 together */
63244fa1 31#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
83ca14fd 32
f24e9980 33struct ceph_pg_pool_info {
4fc51be8 34 struct rb_node node;
4f6a7e5e 35 s64 id;
04812acf 36 u8 type; /* CEPH_POOL_TYPE_* */
4f6a7e5e 37 u8 size;
04812acf 38 u8 min_size;
4f6a7e5e
SW
39 u8 crush_ruleset;
40 u8 object_hash;
04812acf 41 u32 last_force_request_resend;
4f6a7e5e
SW
42 u32 pg_num, pgp_num;
43 int pg_num_mask, pgp_num_mask;
17a13e40
ID
44 s64 read_tier;
45 s64 write_tier; /* wins for read+write ops */
04812acf 46 u64 flags; /* CEPH_POOL_FLAG_* */
2844a76a 47 char *name;
f24e9980
SW
48};
49
2abebdbc
ID
50static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
51{
52 switch (pool->type) {
53 case CEPH_POOL_TYPE_REP:
54 return true;
55 case CEPH_POOL_TYPE_EC:
56 return false;
57 default:
58 BUG_ON(1);
59 }
60}
61
4f6a7e5e 62struct ceph_object_locator {
22116525 63 s64 pool;
4f6a7e5e
SW
64};
65
63244fa1
ID
66static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
67{
68 oloc->pool = -1;
69}
70
71static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
72{
73 return oloc->pool == -1;
74}
75
76static inline void ceph_oloc_copy(struct ceph_object_locator *dest,
77 const struct ceph_object_locator *src)
78{
79 dest->pool = src->pool;
80}
81
4295f221
ID
82/*
83 * Maximum supported by kernel client object name length
84 *
85 * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100)
86 */
87#define CEPH_MAX_OID_NAME_LEN 100
88
d30291b9
ID
89/*
90 * 51-char inline_name is long enough for all cephfs and all but one
91 * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
92 * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all
93 * other rbd requests fit into inline_name.
94 *
95 * Makes ceph_object_id 64 bytes on 64-bit.
96 */
97#define CEPH_OID_INLINE_LEN 52
98
99/*
100 * Both inline and external buffers have space for a NUL-terminator,
101 * which is carried around. It's not required though - RADOS object
102 * names don't have to be NUL-terminated and may contain NULs.
103 */
4295f221 104struct ceph_object_id {
d30291b9
ID
105 char *name;
106 char inline_name[CEPH_OID_INLINE_LEN];
4295f221
ID
107 int name_len;
108};
109
d30291b9
ID
110static inline void ceph_oid_init(struct ceph_object_id *oid)
111{
112 oid->name = oid->inline_name;
113 oid->name_len = 0;
114}
115
116static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
117{
118 return oid->name == oid->inline_name && !oid->name_len;
119}
120
121void ceph_oid_copy(struct ceph_object_id *dest,
122 const struct ceph_object_id *src);
123__printf(2, 3)
124void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...);
125__printf(3, 4)
126int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
127 const char *fmt, ...);
128void ceph_oid_destroy(struct ceph_object_id *oid);
129
f24e9980
SW
130struct ceph_pg_mapping {
131 struct rb_node node;
5b191d99 132 struct ceph_pg pgid;
35a935d7
ID
133
134 union {
135 struct {
136 int len;
137 int osds[];
138 } pg_temp;
9686f94c
ID
139 struct {
140 int osd;
141 } primary_temp;
35a935d7 142 };
f24e9980
SW
143};
144
145struct ceph_osdmap {
146 struct ceph_fsid fsid;
147 u32 epoch;
f24e9980
SW
148 struct ceph_timespec created, modified;
149
150 u32 flags; /* CEPH_OSDMAP_* */
151
152 u32 max_osd; /* size of osd_state, _offload, _addr arrays */
153 u8 *osd_state; /* CEPH_OSD_* */
154 u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
155 struct ceph_entity_addr *osd_addr;
156
157 struct rb_root pg_temp;
9686f94c
ID
158 struct rb_root primary_temp;
159
2cfa34f2
ID
160 u32 *osd_primary_affinity;
161
4fc51be8
SW
162 struct rb_root pg_pools;
163 u32 pool_max;
f24e9980
SW
164
165 /* the CRUSH map specifies the mapping of placement groups to
166 * the list of osds that store+replicate them. */
167 struct crush_map *crush;
9d521470
ID
168
169 struct mutex crush_scratch_mutex;
170 int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
f24e9980
SW
171};
172
246138fa
ID
173static inline int ceph_osd_exists(struct ceph_osdmap *map, int osd)
174{
175 return osd >= 0 && osd < map->max_osd &&
176 (map->osd_state[osd] & CEPH_OSD_EXISTS);
177}
178
f24e9980
SW
179static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
180{
246138fa
ID
181 return ceph_osd_exists(map, osd) &&
182 (map->osd_state[osd] & CEPH_OSD_UP);
183}
184
185static inline int ceph_osd_is_down(struct ceph_osdmap *map, int osd)
186{
187 return !ceph_osd_is_up(map, osd);
f24e9980
SW
188}
189
190static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
191{
192 return map && (map->flags & flag);
193}
194
195extern char *ceph_osdmap_state_str(char *str, int len, int state);
2cfa34f2 196extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
f24e9980
SW
197
198static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
199 int osd)
200{
201 if (osd >= map->max_osd)
202 return NULL;
203 return &map->osd_addr[osd];
204}
205
ef4859d6
AE
206static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
207{
208 __u8 version;
209
210 if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
3ef650d3 211 pr_warn("incomplete pg encoding\n");
ef4859d6
AE
212 return -EINVAL;
213 }
214 version = ceph_decode_8(p);
215 if (version > 1) {
3ef650d3 216 pr_warn("do not understand pg encoding %d > 1\n",
ef4859d6
AE
217 (int)version);
218 return -EINVAL;
219 }
220
221 pgid->pool = ceph_decode_64(p);
222 pgid->seed = ceph_decode_32(p);
223 *p += 4; /* skip deprecated preferred value */
224
225 return 0;
226}
227
e5253a7b 228struct ceph_osdmap *ceph_osdmap_alloc(void);
a2505d63 229extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
0c0a8de1
ID
230struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
231 struct ceph_osdmap *map);
f24e9980
SW
232extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
233
6f3bfd45
ID
234struct ceph_osds {
235 int osds[CEPH_PG_MAX_SIZE];
236 int size;
237 int primary; /* id, NOT index */
238};
239
240static inline void ceph_osds_init(struct ceph_osds *set)
241{
242 set->size = 0;
243 set->primary = -1;
244}
245
246void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src);
247
63244fa1
ID
248bool ceph_is_new_interval(const struct ceph_osds *old_acting,
249 const struct ceph_osds *new_acting,
250 const struct ceph_osds *old_up,
251 const struct ceph_osds *new_up,
252 int old_size,
253 int new_size,
254 int old_min_size,
255 int new_min_size,
256 u32 old_pg_num,
257 u32 new_pg_num,
258 bool old_sort_bitwise,
259 bool new_sort_bitwise,
260 const struct ceph_pg *pgid);
261bool ceph_osds_changed(const struct ceph_osds *old_acting,
262 const struct ceph_osds *new_acting,
263 bool any_change);
264
f24e9980 265/* calculate mapping of a file extent to an object */
d63b77f4 266extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
e8afad65 267 u64 off, u64 len,
d63b77f4 268 u64 *bno, u64 *oxoff, u64 *oxlen);
f24e9980 269
d9591f5e
ID
270int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
271 struct ceph_object_id *oid,
272 struct ceph_object_locator *oloc,
273 struct ceph_pg *raw_pgid);
7c13cb64 274
6f3bfd45
ID
275void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
276 const struct ceph_pg *raw_pgid,
277 struct ceph_osds *up,
278 struct ceph_osds *acting);
f81f1633
ID
279int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
280 const struct ceph_pg *raw_pgid);
f24e9980 281
ce7f6a27
ID
282extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
283 u64 id);
284
72afc71f 285extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
7669a2c9
YS
286extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
287
f24e9980 288#endif
This page took 0.432298 seconds and 5 git commands to generate.