From: Mathieu Desnoyers Date: Fri, 12 Apr 2024 19:43:26 +0000 (-0400) Subject: Revert "mempool: Track allocated items per range" X-Git-Url: https://git.efficios.com/?a=commitdiff_plain;h=2d9f5025ed0ee15cbb708921c3581b983b60da32;p=librseq.git Revert "mempool: Track allocated items per range" This reverts commit ffea0dea60059c855cb0b4e1f784352012ae0b37. Counting allocated items per range has no inherent use and adds a counter to the allocation/free fast path: we could not easily unmap ranges when the allocated items count reaches 0 anyway because the pool free list spawns across items from all ranges. Signed-off-by: Mathieu Desnoyers --- diff --git a/src/rseq-mempool.c b/src/rseq-mempool.c index 3249570..061d38f 100644 --- a/src/rseq-mempool.c +++ b/src/rseq-mempool.c @@ -138,8 +138,6 @@ struct rseq_mempool_range { void *mmap_addr; size_t mmap_len; - size_t allocated_items; - /* Track alloc/free. */ unsigned long *alloc_bitmap; }; @@ -1107,10 +1105,8 @@ room_left: addr = (void __rseq_percpu *) (range->base + item_offset); range->next_unused += pool->item_len; end: - if (addr) { - range->allocated_items++; + if (addr) set_alloc_slot(pool, range, item_offset); - } pthread_mutex_unlock(&pool->lock); if (addr) { if (zeroed) @@ -1175,13 +1171,6 @@ void librseq_mempool_percpu_free(void __rseq_percpu *_ptr, size_t stride) pthread_mutex_lock(&pool->lock); clear_alloc_slot(pool, range, item_offset); - if (!range->allocated_items) { - fprintf(stderr, "%s: Trying to free an item from an empty pool range within pool \"%s\" (%p), item offset: %zu, caller: %p.\n", - __func__, get_pool_name(pool), pool, item_offset, - (void *) __builtin_return_address(0)); - abort(); - } - range->allocated_items--; /* Add ptr to head of free list */ head = pool->free_list_head; if (pool->attr.poison_set)