aboutsummaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 766ba2ce06b..15807bf9d28 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -389,12 +389,12 @@ nocache:
addr = ALIGN(first->va_end, align);
if (addr < vstart)
goto nocache;
- if (addr + size - 1 < addr)
+ if (addr + size < addr)
goto overflow;
} else {
addr = ALIGN(vstart, align);
- if (addr + size - 1 < addr)
+ if (addr + size < addr)
goto overflow;
n = vmap_area_root.rb_node;
@@ -421,7 +421,7 @@ nocache:
if (addr + cached_hole_size < first->va_start)
cached_hole_size = first->va_start - addr;
addr = ALIGN(first->va_end, align);
- if (addr + size - 1 < addr)
+ if (addr + size < addr)
goto overflow;
if (list_is_last(&first->list, &vmap_area_list))
@@ -553,13 +553,22 @@ static void vmap_debug_free_range(unsigned long start, unsigned long end)
* code, and it will be simple to change the scale factor if we find that it
* becomes a problem on bigger systems.
*/
+
+int sysctl_lazy_vfree_pages = 32UL * 1024 * 1024 / PAGE_SIZE;
+
+/*
+ * lazy_vfree_tlb_flush_all_threshold is the maximum size of TLB flush by
+ * area. Beyond that the whole TLB will be flushed.
+ */
+int sysctl_lazy_vfree_tlb_flush_all_threshold = SZ_512M;
+
static unsigned long lazy_max_pages(void)
{
unsigned int log;
log = fls(num_online_cpus());
- return log * (32UL * 1024 * 1024 / PAGE_SIZE);
+ return log * sysctl_lazy_vfree_pages;
}
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
@@ -627,8 +636,13 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (nr)
atomic_sub(nr, &vmap_lazy_nr);
- if (nr || force_flush)
- flush_tlb_kernel_range(*start, *end);
+ if (nr || force_flush) {
+ if (nr > (sysctl_lazy_vfree_tlb_flush_all_threshold >> PAGE_SHIFT))
+ flush_tlb_all();
+ else
+ list_for_each_entry(va, &valist, purge_list)
+ flush_tlb_kernel_range(va->va_start, va->va_end);
+ }
if (nr) {
spin_lock(&vmap_area_lock);
@@ -2618,7 +2632,7 @@ static int s_show(struct seq_file *m, void *p)
return 0;
if (!(va->flags & VM_VM_AREA)) {
- seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
+ seq_printf(m, "0x%p-0x%p %7ld vm_map_ram\n",
(void *)va->va_start, (void *)va->va_end,
va->va_end - va->va_start);
return 0;
@@ -2626,7 +2640,7 @@ static int s_show(struct seq_file *m, void *p)
v = va->vm;
- seq_printf(m, "0x%pK-0x%pK %7ld",
+ seq_printf(m, "0x%p-0x%p %7ld",
v->addr, v->addr + v->size, v->size);
if (v->caller)