Fix: libc internal mutex races with run_as
[lttng-tools.git] / src / common / hashtable / rculfhash.c
index ebdc4ffaa8ee84842eb18c4c0588042fade9c66c..9baf4079877393a1289fdcca8a518286e530cddb 100644 (file)
 #include "rculfhash-internal.h"
 #include "urcu-flavor.h"
 
-/*
- * We need to lock pthread exit, which deadlocks __nptl_setxid in the runas
- * clone.  This work-around will be allowed to be removed when runas.c gets
- * changed to do an exec() before issuing seteuid/setegid. See
- * http://sourceware.org/bugzilla/show_bug.cgi?id=10184 for details.
- */
-pthread_mutex_t lttng_libc_state_lock = PTHREAD_MUTEX_INITIALIZER;
+#include <common/common.h>
 
 /*
  * Split-counters lazily update the global counter each 1024
@@ -570,6 +564,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
 
 static long nr_cpus_mask = -1;
 static long split_count_mask = -1;
+static int split_count_order = -1;
 
 #if defined(HAVE_SYSCONF)
 static void ht_init_nr_cpus_mask(void)
@@ -606,6 +601,8 @@ void alloc_split_items_count(struct cds_lfht *ht)
                        split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
                else
                        split_count_mask = nr_cpus_mask;
+               split_count_order =
+                       cds_lfht_get_count_order_ulong(split_count_mask + 1);
        }
 
        assert(split_count_mask >= 0);
@@ -624,7 +621,7 @@ void free_split_items_count(struct cds_lfht *ht)
        poison_free(ht->split_count);
 }
 
-#if defined(HAVE_SCHED_GETCPU)
+#if defined(HAVE_SCHED_GETCPU) && !defined(VALGRIND)
 static
 int ht_get_split_count_index(unsigned long hash)
 {
@@ -721,7 +718,7 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
         * Use bucket-local length for small table expand and for
         * environments lacking per-cpu data support.
         */
-       if (count >= (1UL << COUNT_COMMIT_ORDER))
+       if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order)))
                return;
        if (chain_len > 100)
                dbg_printf("WARNING: large chain length: %u.\n",
@@ -735,7 +732,9 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
                growth = cds_lfht_get_count_order_u32(chain_len
                                - (CHAIN_LEN_TARGET - 1));
                if ((ht->flags & CDS_LFHT_ACCOUNTING)
-                               && (size << growth) >= (1UL << COUNT_COMMIT_ORDER)) {
+                               && (size << growth)
+                                       >= (1UL << (COUNT_COMMIT_ORDER
+                                               + split_count_order))) {
                        /*
                         * If ideal growth expands the hash table size
                         * beyond the "small hash table" sizes, use the
@@ -745,8 +744,8 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
                         * the chain length is used to expand the hash
                         * table in every case.
                         */
-                       growth = COUNT_COMMIT_ORDER -
-                               cds_lfht_get_count_order_u32(size);
+                       growth = COUNT_COMMIT_ORDER + split_count_order
+                               - cds_lfht_get_count_order_ulong(size);
                        if (growth <= 0)
                                return;
                }
@@ -1776,7 +1775,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
        }
 #endif
        while (uatomic_read(&ht->in_progress_resize))
-               poll(NULL, 0, 100);     /* wait for 100ms */
+               (void) poll(NULL, 0, 100); /* wait for 100ms */
        ret = cds_lfht_delete_bucket(ht);
        if (ret)
                return ret;
@@ -1976,7 +1975,7 @@ void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
                        uatomic_dec(&ht->in_progress_resize);
                        return;
                }
-               work = malloc(sizeof(*work));
+               work = zmalloc(sizeof(*work));
                if (work == NULL) {
                        dbg_printf("error allocating resize work, bailing out\n");
                        uatomic_dec(&ht->in_progress_resize);
This page took 0.02635 seconds and 5 git commands to generate.