X-Git-Url: http://git.efficios.com/?p=lttng-tools.git;a=blobdiff_plain;f=src%2Fcommon%2Fhashtable%2Frculfhash.c;h=9baf4079877393a1289fdcca8a518286e530cddb;hp=b430a3dec76e681ebf561f33bb9fcc767c713037;hb=7567352fb68f5c3f49f549c579f5bd27c883bed2;hpb=3a1aff7a271016fa0104e45492ca94e7e06b2492 diff --git a/src/common/hashtable/rculfhash.c b/src/common/hashtable/rculfhash.c index b430a3dec..9baf40798 100644 --- a/src/common/hashtable/rculfhash.c +++ b/src/common/hashtable/rculfhash.c @@ -278,13 +278,7 @@ #include "rculfhash-internal.h" #include "urcu-flavor.h" -/* - * We need to lock pthread exit, which deadlocks __nptl_setxid in the runas - * clone. This work-around will be allowed to be removed when runas.c gets - * changed to do an exec() before issuing seteuid/setegid. See - * http://sourceware.org/bugzilla/show_bug.cgi?id=10184 for details. - */ -pthread_mutex_t lttng_libc_state_lock = PTHREAD_MUTEX_INITIALIZER; +#include /* * Split-counters lazily update the global counter each 1024 @@ -570,6 +564,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, static long nr_cpus_mask = -1; static long split_count_mask = -1; +static int split_count_order = -1; #if defined(HAVE_SYSCONF) static void ht_init_nr_cpus_mask(void) @@ -606,6 +601,8 @@ void alloc_split_items_count(struct cds_lfht *ht) split_count_mask = DEFAULT_SPLIT_COUNT_MASK; else split_count_mask = nr_cpus_mask; + split_count_order = + cds_lfht_get_count_order_ulong(split_count_mask + 1); } assert(split_count_mask >= 0); @@ -624,7 +621,7 @@ void free_split_items_count(struct cds_lfht *ht) poison_free(ht->split_count); } -#if defined(HAVE_SCHED_GETCPU) +#if defined(HAVE_SCHED_GETCPU) && !defined(VALGRIND) static int ht_get_split_count_index(unsigned long hash) { @@ -721,14 +718,39 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) * Use bucket-local length for small table expand and for * environments lacking per-cpu data support. */ - if (count >= (1UL << COUNT_COMMIT_ORDER)) + if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order))) return; if (chain_len > 100) dbg_printf("WARNING: large chain length: %u.\n", chain_len); - if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) - cds_lfht_resize_lazy_grow(ht, size, - cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); + if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) { + int growth; + + /* + * Ideal growth calculated based on chain length. + */ + growth = cds_lfht_get_count_order_u32(chain_len + - (CHAIN_LEN_TARGET - 1)); + if ((ht->flags & CDS_LFHT_ACCOUNTING) + && (size << growth) + >= (1UL << (COUNT_COMMIT_ORDER + + split_count_order))) { + /* + * If ideal growth expands the hash table size + * beyond the "small hash table" sizes, use the + * maximum small hash table size to attempt + * expanding the hash table. This only applies + * when node accounting is available, otherwise + * the chain length is used to expand the hash + * table in every case. + */ + growth = COUNT_COMMIT_ORDER + split_count_order + - cds_lfht_get_count_order_ulong(size); + if (growth <= 0) + return; + } + cds_lfht_resize_lazy_grow(ht, size, growth); + } } static @@ -1753,7 +1775,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) } #endif while (uatomic_read(&ht->in_progress_resize)) - poll(NULL, 0, 100); /* wait for 100ms */ + (void) poll(NULL, 0, 100); /* wait for 100ms */ ret = cds_lfht_delete_bucket(ht); if (ret) return ret; @@ -1953,7 +1975,7 @@ void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) uatomic_dec(&ht->in_progress_resize); return; } - work = malloc(sizeof(*work)); + work = zmalloc(sizeof(*work)); if (work == NULL) { dbg_printf("error allocating resize work, bailing out\n"); uatomic_dec(&ht->in_progress_resize);