* Tag pointers returned by:
* - rseq_mempool_percpu_malloc(),
* - rseq_mempool_percpu_zmalloc(),
+ * - rseq_mempool_percpu_malloc_init(),
* - rseq_mempool_set_percpu_malloc(),
- * - rseq_mempool_set_percpu_zmalloc().
+ * - rseq_mempool_set_percpu_zmalloc(),
+ * - rseq_mempool_set_percpu_malloc_init().
*
* and passed as parameter to:
* - rseq_percpu_ptr(),
void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool);
/*
- * rseq_mempool_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool.
+ * rseq_mempool_percpu_zmalloc: Allocate zero-initialized memory from a per-cpu pool.
*
* Allocate memory for an item within the pool, and zero-initialize its
* memory on all CPUs. See rseq_mempool_percpu_malloc for details.
*/
void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool);
+/*
+ * rseq_mempool_percpu_malloc_init: Allocate initialized memory from a per-cpu pool.
+ *
+ * Allocate memory for an item within the pool, and initialize its
+ * memory on all CPUs with content from @init_ptr of length @init_len.
+ * See rseq_mempool_percpu_malloc for details.
+ *
+ * Return NULL (errno=ENOMEM) if there is not enough space left in the
+ * pool to allocate an item. Return NULL (errno=EINVAL) if init_len is
+ * larger than the pool item_len.
+ *
+ * This API is MT-safe.
+ */
+void __rseq_percpu *rseq_mempool_percpu_malloc_init(struct rseq_mempool *pool,
+ void *init_ptr, size_t init_len);
+
/*
* rseq_mempool_malloc: Allocate memory from a global pool.
*
return (void *) rseq_mempool_percpu_zmalloc(pool);
}
+/*
+ * rseq_mempool_malloc_init: Allocate initialized memory from a global pool.
+ *
+ * Wrapper to allocate memory from a global pool, which can be
+ * used directly without per-cpu indexing. Would normally be used
+ * with pools created with max_nr_cpus=1.
+ */
+static inline
+void *rseq_mempool_malloc_init(struct rseq_mempool *pool,
+ void *init_ptr, size_t init_len)
+{
+ return (void *) rseq_mempool_percpu_malloc_init(pool, init_ptr, init_len);
+}
+
/*
* rseq_mempool_percpu_free: Free memory from a per-cpu pool.
*
*
* - rseq_mempool_percpu_malloc(),
* - rseq_mempool_percpu_zmalloc(),
+ * - rseq_mempool_percpu_malloc_init(),
* - rseq_mempool_set_percpu_malloc(),
- * - rseq_mempool_set_percpu_zmalloc().
+ * - rseq_mempool_set_percpu_zmalloc(),
+ * - rseq_mempool_set_percpu_malloc_init().
*
* The @stride optional argument to rseq_percpu_free() is a configurable
* stride, which must match the stride received by pool creation.
*
* - rseq_mempool_malloc(),
* - rseq_mempool_zmalloc(),
+ * - rseq_mempool_malloc_init(),
* - rseq_mempool_set_malloc(),
- * - rseq_mempool_set_zmalloc().
+ * - rseq_mempool_set_zmalloc(),
+ * - rseq_mempool_set_malloc_init().
*
* The @stride optional argument to rseq_free() is a configurable
* stride, which must match the stride received by pool creation. If
*
* - rseq_mempool_percpu_malloc(),
* - rseq_mempool_percpu_zmalloc(),
+ * - rseq_mempool_percpu_malloc_init(),
* - rseq_mempool_set_percpu_malloc(),
- * - rseq_mempool_set_percpu_zmalloc().
+ * - rseq_mempool_set_percpu_zmalloc(),
+ * - rseq_mempool_set_percpu_malloc_init().
*
* The macro rseq_percpu_ptr() preserves the type of the @ptr parameter
* for the returned pointer, but removes the __rseq_percpu annotation.
void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len);
/*
- * rseq_mempool_set_percpu_zmalloc: Allocated zero-initialized memory from a per-cpu pool set.
+ * rseq_mempool_set_percpu_zmalloc: Allocate zero-initialized memory from a per-cpu pool set.
*
* Allocate memory for an item within the pool, and zero-initialize its
* memory on all CPUs. See rseq_mempool_set_percpu_malloc for details.
*/
void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len);
+/*
+ * rseq_mempool_set_percpu_malloc_init: Allocate initialized memory from a per-cpu pool set.
+ *
+ * Allocate memory for an item within the pool, and initialize its
+ * memory on all CPUs with content from @init_ptr of length @len.
+ * See rseq_mempool_set_percpu_malloc for details.
+ *
+ * This API is MT-safe.
+ */
+void __rseq_percpu *rseq_mempool_set_percpu_malloc_init(struct rseq_mempool_set *pool_set,
+ void *init_ptr, size_t len);
+
/*
* rseq_mempool_set_malloc: Allocate memory from a global pool set.
*
return (void *) rseq_mempool_set_percpu_zmalloc(pool_set, len);
}
+/*
+ * rseq_mempool_set_malloc_init: Allocate initialized memory from a global pool set.
+ *
+ * Wrapper to allocate memory from a global pool, which can be
+ * used directly without per-cpu indexing. Would normally be used
+ * with pools created with max_nr_cpus=1.
+ */
+static inline
+void *rseq_mempool_set_malloc_init(struct rseq_mempool_set *pool_set, void *init_ptr, size_t len)
+{
+ return (void *) rseq_mempool_set_percpu_malloc_init(pool_set, init_ptr, len);
+}
+
+
/*
* rseq_mempool_init_numa: Move pages to the NUMA node associated to their CPU topology.
*
}
}
+static
+void rseq_percpu_init_item(struct rseq_mempool *pool,
+ struct rseq_mempool_range *range, uintptr_t item_offset,
+ void *init_ptr, size_t init_len)
+{
+ int i;
+
+ for (i = 0; i < pool->attr.max_nr_cpus; i++) {
+ char *p = __rseq_pool_range_percpu_ptr(range, i,
+ item_offset, pool->attr.stride);
+ memcpy(p, init_ptr, init_len);
+ }
+}
+
static
void rseq_percpu_poison_item(struct rseq_mempool *pool,
struct rseq_mempool_range *range, uintptr_t item_offset)
}
static
-void __rseq_percpu *__rseq_percpu_malloc(struct rseq_mempool *pool, bool zeroed)
+void __rseq_percpu *__rseq_percpu_malloc(struct rseq_mempool *pool,
+ bool zeroed, void *init_ptr, size_t init_len)
{
struct rseq_mempool_range *range;
struct free_list_node *node;
uintptr_t item_offset;
void __rseq_percpu *addr;
+ if (init_len > pool->item_len) {
+ errno = EINVAL;
+ return NULL;
+ }
pthread_mutex_lock(&pool->lock);
/* Get first entry from free list. */
node = pool->free_list_head;
if (addr)
set_alloc_slot(pool, range, item_offset);
pthread_mutex_unlock(&pool->lock);
- if (zeroed && addr)
- rseq_percpu_zero_item(pool, range, item_offset);
+ if (addr) {
+ if (zeroed)
+ rseq_percpu_zero_item(pool, range, item_offset);
+ else if (init_ptr) {
+ rseq_percpu_init_item(pool, range, item_offset,
+ init_ptr, init_len);
+ }
+ }
return addr;
}
void __rseq_percpu *rseq_mempool_percpu_malloc(struct rseq_mempool *pool)
{
- return __rseq_percpu_malloc(pool, false);
+ return __rseq_percpu_malloc(pool, false, NULL, 0);
}
void __rseq_percpu *rseq_mempool_percpu_zmalloc(struct rseq_mempool *pool)
{
- return __rseq_percpu_malloc(pool, true);
+ return __rseq_percpu_malloc(pool, true, NULL, 0);
+}
+
+void __rseq_percpu *rseq_mempool_percpu_malloc_init(struct rseq_mempool *pool,
+ void *init_ptr, size_t len)
+{
+ return __rseq_percpu_malloc(pool, false, init_ptr, len);
}
/* Always inline for __builtin_return_address(0). */
}
static
-void __rseq_percpu *__rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set, size_t len, bool zeroed)
+void __rseq_percpu *__rseq_mempool_set_malloc(struct rseq_mempool_set *pool_set,
+ void *init_ptr, size_t len, bool zeroed)
{
int order, min_order = POOL_SET_MIN_ENTRY;
struct rseq_mempool *pool;
found:
pthread_mutex_unlock(&pool_set->lock);
if (pool) {
- addr = __rseq_percpu_malloc(pool, zeroed);
+ addr = __rseq_percpu_malloc(pool, zeroed, init_ptr, len);
if (addr == NULL && errno == ENOMEM) {
/*
* If the allocation failed, try again with a
void __rseq_percpu *rseq_mempool_set_percpu_malloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_mempool_set_malloc(pool_set, len, false);
+ return __rseq_mempool_set_malloc(pool_set, NULL, len, false);
}
void __rseq_percpu *rseq_mempool_set_percpu_zmalloc(struct rseq_mempool_set *pool_set, size_t len)
{
- return __rseq_mempool_set_malloc(pool_set, len, true);
+ return __rseq_mempool_set_malloc(pool_set, NULL, len, true);
+}
+
+void __rseq_percpu *rseq_mempool_set_percpu_malloc_init(struct rseq_mempool_set *pool_set,
+ void *init_ptr, size_t len)
+{
+ return __rseq_mempool_set_malloc(pool_set, init_ptr, len, true);
}
struct rseq_mempool_attr *rseq_mempool_attr_create(void)