2010-04-09 16:57:01 +07:00
|
|
|
/*
|
|
|
|
* mm/percpu-km.c - kernel memory based chunk allocation
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 SUSE Linux Products GmbH
|
|
|
|
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
|
|
|
|
*
|
|
|
|
* This file is released under the GPLv2.
|
|
|
|
*
|
|
|
|
* Chunks are allocated as a contiguous kernel memory using gfp
|
|
|
|
* allocation. This is to be used on nommu architectures.
|
|
|
|
*
|
|
|
|
* To use percpu-km,
|
|
|
|
*
|
|
|
|
* - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig.
|
|
|
|
*
|
|
|
|
* - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's
|
|
|
|
* not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work
|
|
|
|
* fine.
|
|
|
|
*
|
|
|
|
* - NUMA is not supported. When setting up the first chunk,
|
|
|
|
* @cpu_distance_fn should be NULL or report all CPUs to be nearer
|
|
|
|
* than or at LOCAL_DISTANCE.
|
|
|
|
*
|
|
|
|
* - It's best if the chunk size is power of two multiple of
|
|
|
|
* PAGE_SIZE. Because each chunk is allocated as a contiguous
|
|
|
|
* kernel memory block using alloc_pages(), memory will be wasted if
|
|
|
|
* chunk size is not aligned. percpu-km code will whine about it.
|
|
|
|
*/
|
|
|
|
|
2010-09-03 23:22:48 +07:00
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
|
2010-04-09 16:57:01 +07:00
|
|
|
#error "contiguous percpu allocation is incompatible with paged first chunk"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <linux/log2.h>
|
|
|
|
|
2014-09-03 01:46:02 +07:00
|
|
|
static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
|
2018-02-17 01:07:19 +07:00
|
|
|
int page_start, int page_end, gfp_t gfp)
|
2010-04-09 16:57:01 +07:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-03 01:46:02 +07:00
|
|
|
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
|
|
|
|
int page_start, int page_end)
|
2010-04-09 16:57:01 +07:00
|
|
|
{
|
|
|
|
/* nada */
|
|
|
|
}
|
|
|
|
|
2018-02-17 01:07:19 +07:00
|
|
|
static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
|
2010-04-09 16:57:01 +07:00
|
|
|
{
|
|
|
|
const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
|
|
|
|
struct pcpu_chunk *chunk;
|
|
|
|
struct page *pages;
|
2018-12-18 23:42:27 +07:00
|
|
|
unsigned long flags;
|
2010-04-09 16:57:01 +07:00
|
|
|
int i;
|
|
|
|
|
2018-02-17 01:07:19 +07:00
|
|
|
chunk = pcpu_alloc_chunk(gfp);
|
2010-04-09 16:57:01 +07:00
|
|
|
if (!chunk)
|
|
|
|
return NULL;
|
|
|
|
|
2018-02-17 01:09:58 +07:00
|
|
|
pages = alloc_pages(gfp, order_base_2(nr_pages));
|
2010-04-09 16:57:01 +07:00
|
|
|
if (!pages) {
|
|
|
|
pcpu_free_chunk(chunk);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr_pages; i++)
|
|
|
|
pcpu_set_page_chunk(nth_page(pages, i), chunk);
|
|
|
|
|
|
|
|
chunk->data = pages;
|
|
|
|
chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
|
2014-09-03 01:46:02 +07:00
|
|
|
|
2018-12-18 23:42:27 +07:00
|
|
|
spin_lock_irqsave(&pcpu_lock, flags);
|
percpu: replace area map allocator with bitmap
The percpu memory allocator is experiencing scalability issues when
allocating and freeing large numbers of counters as in BPF.
Additionally, there is a corner case where iteration is triggered over
all chunks if the contig_hint is the right size, but wrong alignment.
This patch replaces the area map allocator with a basic bitmap allocator
implementation. Each subsequent patch will introduce new features and
replace full scanning functions with faster non-scanning options when
possible.
Implementation:
This patchset removes the area map allocator in favor of a bitmap
allocator backed by metadata blocks. The primary goal is to provide
consistency in performance and memory footprint with a focus on small
allocations (< 64 bytes). The bitmap removes the heavy memmove from the
freeing critical path and provides a consistent memory footprint. The
metadata blocks provide a bound on the amount of scanning required by
maintaining a set of hints.
In an effort to make freeing fast, the metadata is updated on the free
path if the new free area makes a page free, a block free, or spans
across blocks. This causes the chunk's contig hint to potentially be
smaller than what it could allocate by up to the smaller of a page or a
block. If the chunk's contig hint is contained within a block, a check
occurs and the hint is kept accurate. Metadata is always kept accurate
on allocation, so there will not be a situation where a chunk has a
later contig hint than available.
Evaluation:
I have primarily done testing against a simple workload of allocation of
1 million objects (2^20) of varying size. Deallocation was done by in
order, alternating, and in reverse. These numbers were collected after
rebasing ontop of a80099a152. I present the worst-case numbers here:
Area Map Allocator:
Object Size | Alloc Time (ms) | Free Time (ms)
----------------------------------------------
4B | 310 | 4770
16B | 557 | 1325
64B | 436 | 273
256B | 776 | 131
1024B | 3280 | 122
Bitmap Allocator:
Object Size | Alloc Time (ms) | Free Time (ms)
----------------------------------------------
4B | 490 | 70
16B | 515 | 75
64B | 610 | 80
256B | 950 | 100
1024B | 3520 | 200
This data demonstrates the inability for the area map allocator to
handle less than ideal situations. In the best case of reverse
deallocation, the area map allocator was able to perform within range
of the bitmap allocator. In the worst case situation, freeing took
nearly 5 seconds for 1 million 4-byte objects. The bitmap allocator
dramatically improves the consistency of the free path. The small
allocations performed nearly identical regardless of the freeing
pattern.
While it does add to the allocation latency, the allocation scenario
here is optimal for the area map allocator. The area map allocator runs
into trouble when it is allocating in chunks where the latter half is
full. It is difficult to replicate this, so I present a variant where
the pages are second half filled. Freeing was done sequentially. Below
are the numbers for this scenario:
Area Map Allocator:
Object Size | Alloc Time (ms) | Free Time (ms)
----------------------------------------------
4B | 4118 | 4892
16B | 1651 | 1163
64B | 598 | 285
256B | 771 | 158
1024B | 3034 | 160
Bitmap Allocator:
Object Size | Alloc Time (ms) | Free Time (ms)
----------------------------------------------
4B | 481 | 67
16B | 506 | 69
64B | 636 | 75
256B | 892 | 90
1024B | 3262 | 147
The data shows a parabolic curve of performance for the area map
allocator. This is due to the memmove operation being the dominant cost
with the lower object sizes as more objects are packed in a chunk and at
higher object sizes, the traversal of the chunk slots is the dominating
cost. The bitmap allocator suffers this problem as well. The above data
shows the inability to scale for the allocation path with the area map
allocator and that the bitmap allocator demonstrates consistent
performance in general.
The second problem of additional scanning can result in the area map
allocator completing in 52 minutes when trying to allocate 1 million
4-byte objects with 8-byte alignment. The same workload takes
approximately 16 seconds to complete for the bitmap allocator.
V2:
Fixed a bug in pcpu_alloc_first_chunk end_offset was setting the bitmap
using bytes instead of bits.
Added a comment to pcpu_cnt_pop_pages to explain bitmap_weight.
Signed-off-by: Dennis Zhou <dennisszhou@gmail.com>
Reviewed-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2017-07-13 01:27:32 +07:00
|
|
|
pcpu_chunk_populated(chunk, 0, nr_pages, false);
|
2018-12-18 23:42:27 +07:00
|
|
|
spin_unlock_irqrestore(&pcpu_lock, flags);
|
2014-09-03 01:46:02 +07:00
|
|
|
|
2017-06-20 06:28:31 +07:00
|
|
|
pcpu_stats_chunk_alloc();
|
2017-06-20 06:28:32 +07:00
|
|
|
trace_percpu_create_chunk(chunk->base_addr);
|
2017-06-20 06:28:31 +07:00
|
|
|
|
2010-04-09 16:57:01 +07:00
|
|
|
return chunk;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
|
|
|
|
{
|
|
|
|
const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
|
|
|
|
|
2017-06-29 21:56:26 +07:00
|
|
|
if (!chunk)
|
|
|
|
return;
|
|
|
|
|
2017-06-20 06:28:31 +07:00
|
|
|
pcpu_stats_chunk_dealloc();
|
2017-06-20 06:28:32 +07:00
|
|
|
trace_percpu_destroy_chunk(chunk->base_addr);
|
2017-06-20 06:28:31 +07:00
|
|
|
|
2017-06-29 21:56:26 +07:00
|
|
|
if (chunk->data)
|
2010-04-09 16:57:01 +07:00
|
|
|
__free_pages(chunk->data, order_base_2(nr_pages));
|
|
|
|
pcpu_free_chunk(chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct page *pcpu_addr_to_page(void *addr)
|
|
|
|
{
|
|
|
|
return virt_to_page(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
|
|
|
|
{
|
|
|
|
size_t nr_pages, alloc_pages;
|
|
|
|
|
|
|
|
/* all units must be in a single group */
|
|
|
|
if (ai->nr_groups != 1) {
|
2016-03-18 04:19:53 +07:00
|
|
|
pr_crit("can't handle more than one group\n");
|
2010-04-09 16:57:01 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
|
|
|
|
alloc_pages = roundup_pow_of_two(nr_pages);
|
|
|
|
|
|
|
|
if (alloc_pages > nr_pages)
|
2016-03-18 04:19:53 +07:00
|
|
|
pr_warn("wasting %zu pages per chunk\n",
|
2016-03-18 04:19:50 +07:00
|
|
|
alloc_pages - nr_pages);
|
2010-04-09 16:57:01 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|