X-Git-Url: http://git.efficios.com/?p=lttng-tools.git;a=blobdiff_plain;f=src%2Fcommon%2Fhashtable%2Frculfhash.c;h=9baf4079877393a1289fdcca8a518286e530cddb;hp=ebdc4ffaa8ee84842eb18c4c0588042fade9c66c;hb=7567352fb68f5c3f49f549c579f5bd27c883bed2;hpb=9d8ad8e29d1f50e6a33c0e9a644c50b5e90364d2 diff --git a/src/common/hashtable/rculfhash.c b/src/common/hashtable/rculfhash.c index ebdc4ffaa..9baf40798 100644 --- a/src/common/hashtable/rculfhash.c +++ b/src/common/hashtable/rculfhash.c @@ -278,13 +278,7 @@ #include "rculfhash-internal.h" #include "urcu-flavor.h" -/* - * We need to lock pthread exit, which deadlocks __nptl_setxid in the runas - * clone. This work-around will be allowed to be removed when runas.c gets - * changed to do an exec() before issuing seteuid/setegid. See - * http://sourceware.org/bugzilla/show_bug.cgi?id=10184 for details. - */ -pthread_mutex_t lttng_libc_state_lock = PTHREAD_MUTEX_INITIALIZER; +#include /* * Split-counters lazily update the global counter each 1024 @@ -570,6 +564,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, static long nr_cpus_mask = -1; static long split_count_mask = -1; +static int split_count_order = -1; #if defined(HAVE_SYSCONF) static void ht_init_nr_cpus_mask(void) @@ -606,6 +601,8 @@ void alloc_split_items_count(struct cds_lfht *ht) split_count_mask = DEFAULT_SPLIT_COUNT_MASK; else split_count_mask = nr_cpus_mask; + split_count_order = + cds_lfht_get_count_order_ulong(split_count_mask + 1); } assert(split_count_mask >= 0); @@ -624,7 +621,7 @@ void free_split_items_count(struct cds_lfht *ht) poison_free(ht->split_count); } -#if defined(HAVE_SCHED_GETCPU) +#if defined(HAVE_SCHED_GETCPU) && !defined(VALGRIND) static int ht_get_split_count_index(unsigned long hash) { @@ -721,7 +718,7 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) * Use bucket-local length for small table expand and for * environments lacking per-cpu data support. */ - if (count >= (1UL << COUNT_COMMIT_ORDER)) + if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order))) return; if (chain_len > 100) dbg_printf("WARNING: large chain length: %u.\n", @@ -735,7 +732,9 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) growth = cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)); if ((ht->flags & CDS_LFHT_ACCOUNTING) - && (size << growth) >= (1UL << COUNT_COMMIT_ORDER)) { + && (size << growth) + >= (1UL << (COUNT_COMMIT_ORDER + + split_count_order))) { /* * If ideal growth expands the hash table size * beyond the "small hash table" sizes, use the @@ -745,8 +744,8 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) * the chain length is used to expand the hash * table in every case. */ - growth = COUNT_COMMIT_ORDER - - cds_lfht_get_count_order_u32(size); + growth = COUNT_COMMIT_ORDER + split_count_order + - cds_lfht_get_count_order_ulong(size); if (growth <= 0) return; } @@ -1776,7 +1775,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) } #endif while (uatomic_read(&ht->in_progress_resize)) - poll(NULL, 0, 100); /* wait for 100ms */ + (void) poll(NULL, 0, 100); /* wait for 100ms */ ret = cds_lfht_delete_bucket(ht); if (ret) return ret; @@ -1976,7 +1975,7 @@ void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) uatomic_dec(&ht->in_progress_resize); return; } - work = malloc(sizeof(*work)); + work = zmalloc(sizeof(*work)); if (work == NULL) { dbg_printf("error allocating resize work, bailing out\n"); uatomic_dec(&ht->in_progress_resize);