diff options
| author | Ali Al-Asadi <alasadi56@gmail.com> | 2017-09-16 18:03:39 +0200 |
|---|---|---|
| committer | Ali Al-Asadi <alasadi56@gmail.com> | 2017-09-16 18:03:57 +0200 |
| commit | 6d7a43ef2973fd127bf910a521d4808661fe7174 (patch) | |
| tree | 0d60991bc8253ef8f3cd8dd7db7f3f6b6607a39c /mm/page_alloc.c | |
| parent | 3d01b8790e7e69fb347bacdaafb85f7f1896067b (diff) | |
| parent | 870ce5261ecd42ac7035ad5e345e14ce0c7837fe (diff) | |
msm8960: get all the needed changes for kernelo8.0
Merge branch 'lineage-15.0' of https://github.com/tathanhlam66/android_kernel_htc_msm8960 into o8.0
Change-Id: Ib0698d0a99afbfe73e1bfccddcfb29df5d085045
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5dc2d64f9c9..bfe751e5a27 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -199,9 +199,21 @@ static char * const zone_names[MAX_NR_ZONES] = { "Movable", }; +/* + * Try to keep at least this much lowmem free. Do not allow normal + * allocations below this point, only high priority ones. Automatically + * tuned according to the amount of memory in the system. + */ int min_free_kbytes = 1024; int min_free_order_shift = 1; +/* + * Extra memory for the system to try freeing. Used to temporarily + * free memory, to make space for new workloads. Anyone can allocate + * down to the min watermarks controlled by min_free_kbytes above. + */ +int extra_free_kbytes = 0; + static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; @@ -5193,6 +5205,7 @@ static void setup_per_zone_lowmem_reserve(void) static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -5204,11 +5217,14 @@ static void __setup_per_zone_wmarks(void) } for_each_zone(zone) { - u64 tmp; + u64 min, low; spin_lock_irqsave(&zone->lock, flags); - tmp = (u64)pages_min * zone->present_pages; - do_div(tmp, lowmem_pages); + min = (u64)pages_min * zone->present_pages; + do_div(min, lowmem_pages); + low = (u64)pages_low * zone->present_pages; + do_div(low, vm_total_pages); + if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't @@ -5232,12 +5248,13 @@ static void __setup_per_zone_wmarks(void) * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->watermark[WMARK_MIN] = tmp; + zone->watermark[WMARK_MIN] = min; } - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); - + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + low + (min >> 2); + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + low + (min >> 1); setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lock, flags); } |
