libceph: add support for HASHPSPOOL pool flag
authorSage Weil <sage@inktank.com>
Tue, 26 Feb 2013 18:39:09 +0000 (10:39 -0800)
committerSage Weil <sage@inktank.com>
Tue, 26 Feb 2013 23:03:06 +0000 (15:03 -0800)
The legacy behavior adds the pgid seed and pool together as the input for
CRUSH.  That is problematic because each pool's PGs end up mapping to the
same OSDs: 1.5 == 2.4 == 3.3 == ...

Instead, if the HASHPSPOOL flag is set, we has the ps and pool together and
feed that into CRUSH.  This ensures that two adjacent pools will map to
an independent pseudorandom set of OSDs.

Advertise our support for this via a protocol feature flag.

Signed-off-by: Sage Weil <sage@inktank.com>
Reviewed-by: Alex Elder <elder@inktank.com>
include/linux/ceph/ceph_features.h
include/linux/ceph/osdmap.h
net/ceph/osdmap.c

index ab0a54286e0d6663e8ec5b9ecf8b4339b1c75ca1..76554cecaab24f71fd327db30e3af51143928008 100644 (file)
@@ -34,6 +34,7 @@
 #define CEPH_FEATURE_REPLY_CREATE_INODE   (1<<27)
 #define CEPH_FEATURE_OSD_HBMSGS     (1<<28)
 #define CEPH_FEATURE_MDSENC         (1<<29)
+#define CEPH_FEATURE_OSDHASHPSPOOL  (1<<30)
 
 /*
  * Features supported.
@@ -45,7 +46,8 @@
         CEPH_FEATURE_OSDENC |                  \
         CEPH_FEATURE_CRUSH_TUNABLES |          \
         CEPH_FEATURE_CRUSH_TUNABLES2 |         \
-        CEPH_FEATURE_REPLY_CREATE_INODE)
+        CEPH_FEATURE_REPLY_CREATE_INODE |      \
+        CEPH_FEATURE_OSDHASHPSPOOL)
 
 #define CEPH_FEATURES_REQUIRED_DEFAULT   \
        (CEPH_FEATURE_NOSRCADDR |        \
index 35985125f11840f0455e3fdd1302cfd0743c15d8..c819190d16423c4b82a619871a1e68babbd765ba 100644 (file)
@@ -23,6 +23,8 @@ struct ceph_pg {
        uint32_t seed;
 };
 
+#define CEPH_POOL_FLAG_HASHPSPOOL  1
+
 struct ceph_pg_pool_info {
        struct rb_node node;
        s64 id;
index 37847164450136c95200579def0adfa444438867..69bc4bf89e3e79bb47eddcac63367913a8645ef0 100644 (file)
@@ -1127,18 +1127,16 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        struct ceph_pg_mapping *pg;
        struct ceph_pg_pool_info *pool;
        int ruleno;
-       unsigned int poolid, ps, pps, t, r;
+       int r;
+       u32 pps;
 
-       poolid = pgid.pool;
-       ps = pgid.seed;
-
-       pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
+       pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
        if (!pool)
                return NULL;
 
        /* pg_temp? */
-       t = ceph_stable_mod(ps, pool->pg_num, pool->pgp_num_mask);
-       pgid.seed = t;
+       pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
+                                   pool->pgp_num_mask);
        pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
        if (pg) {
                *num = pg->len;
@@ -1149,20 +1147,35 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
                                 pool->type, pool->size);
        if (ruleno < 0) {
-               pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
-                      poolid, pool->crush_ruleset, pool->type,
+               pr_err("no crush rule pool %lld ruleset %d type %d size %d\n",
+                      pgid.pool, pool->crush_ruleset, pool->type,
                       pool->size);
                return NULL;
        }
 
-       pps = ceph_stable_mod(ps, pool->pgp_num, pool->pgp_num_mask);
-       pps += poolid;
+       if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
+               /* hash pool id and seed sothat pool PGs do not overlap */
+               pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
+                                    ceph_stable_mod(pgid.seed, pool->pgp_num,
+                                                    pool->pgp_num_mask),
+                                    pgid.pool);
+       } else {
+               /*
+                * legacy ehavior: add ps and pool together.  this is
+                * not a great approach because the PGs from each pool
+                * will overlap on top of each other: 0.5 == 1.4 ==
+                * 2.3 == ...
+                */
+               pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
+                                     pool->pgp_num_mask) +
+                       (unsigned)pgid.pool;
+       }
        r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
                          min_t(int, pool->size, *num),
                          osdmap->osd_weight);
        if (r < 0) {
-               pr_err("error %d from crush rule: pool %d ruleset %d type %d"
-                      " size %d\n", r, poolid, pool->crush_ruleset,
+               pr_err("error %d from crush rule: pool %lld ruleset %d type %d"
+                      " size %d\n", r, pgid.pool, pool->crush_ruleset,
                       pool->type, pool->size);
                return NULL;
        }
This page took 0.029746 seconds and 5 git commands to generate.