aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/msm/kgsl_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/msm/kgsl_pool.c')
-rw-r--r--drivers/gpu/msm/kgsl_pool.c158
1 files changed, 31 insertions, 127 deletions
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index f5402fdc..214bd50a 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -21,21 +21,18 @@
#include "kgsl_device.h"
#include "kgsl_pool.h"
-/**
- * struct kgsl_page_pool - Structure to hold information for the pool
- * @pool_order: Page order describing the size of the page
- * @page_count: Number of pages currently present in the pool
- * @reserved_pages: Number of pages reserved at init for the pool
- * @allocation_allowed: Tells if reserved pool gets exhausted, can we allocate
- * from system memory
- * @list_lock: Spinlock for page list in the pool
- * @page_list: List of pages held/reserved in this pool
+/*
+ * Maximum pool size in terms of pages
+ * = (Number of pools * Max size per pool)
*/
+#define KGSL_POOL_MAX_PAGES (2 * 4096)
+
+/* Set the max pool size to 8192 pages */
+static unsigned int kgsl_pool_max_pages = KGSL_POOL_MAX_PAGES;
+
struct kgsl_page_pool {
unsigned int pool_order;
int page_count;
- unsigned int reserved_pages;
- bool allocation_allowed;
spinlock_t list_lock;
struct list_head page_list;
};
@@ -43,34 +40,15 @@ struct kgsl_page_pool {
static struct kgsl_page_pool kgsl_pools[] = {
{
.pool_order = 0,
- .reserved_pages = 2048,
- .allocation_allowed = true,
.list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[0].list_lock),
.page_list = LIST_HEAD_INIT(kgsl_pools[0].page_list),
},
#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
{
- .pool_order = 1,
- .reserved_pages = 1024,
- .allocation_allowed = true,
+ .pool_order = 4,
.list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[1].list_lock),
.page_list = LIST_HEAD_INIT(kgsl_pools[1].page_list),
},
- {
- .pool_order = 4,
- .reserved_pages = 256,
- .allocation_allowed = false,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[2].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[2].page_list),
- },
- {
- .pool_order = 8,
- .reserved_pages = 32,
- .allocation_allowed = false,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[3].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[3].page_list),
- },
-
#endif
};
@@ -90,28 +68,10 @@ _kgsl_get_pool_from_order(unsigned int order)
return NULL;
}
-/* Map the page into kernel and zero it out */
-static void
-_kgsl_pool_zero_page(struct page *p, unsigned int pool_order)
-{
- int i;
-
- for (i = 0; i < (1 << pool_order); i++) {
- struct page *page = nth_page(p, i);
- void *addr = kmap_atomic(page);
-
- memset(addr, 0, PAGE_SIZE);
- dmac_flush_range(addr, addr + PAGE_SIZE);
- kunmap_atomic(addr);
- }
-}
-
/* Add a page to specified pool */
static void
_kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
- _kgsl_pool_zero_page(p, pool->pool_order);
-
spin_lock(&pool->list_lock);
list_add_tail(&p->lru, &pool->page_list);
pool->page_count++;
@@ -196,28 +156,20 @@ _kgsl_pool_shrink(struct kgsl_page_pool *pool, int num_pages)
* (current_pool_size - target_pages) pages from pool
* starting from higher order pool.
*/
-static unsigned long
-kgsl_pool_reduce(unsigned int target_pages, bool exit)
+static int
+kgsl_pool_reduce(unsigned int target_pages)
{
int total_pages = 0;
int i;
int nr_removed;
struct kgsl_page_pool *pool;
- unsigned long pcount = 0;
+ unsigned int pcount = 0;
total_pages = kgsl_pool_size_total();
for (i = (KGSL_NUM_POOLS - 1); i >= 0; i--) {
pool = &kgsl_pools[i];
- /*
- * Only reduce the pool sizes for pools which are allowed to
- * allocate memory unless we are at close, in which case the
- * reserved memory for all pools needs to be freed
- */
- if (!pool->allocation_allowed && !exit)
- continue;
-
total_pages -= pcount;
nr_removed = total_pages - target_pages;
@@ -296,16 +248,6 @@ void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
kgsl_pool_free_page(p);
}
}
-static int kgsl_pool_idx_lookup(unsigned int order)
-{
- int i;
-
- for (i = 0; i < KGSL_NUM_POOLS; i++)
- if (order == kgsl_pools[i].pool_order)
- return i;
-
- return -ENOMEM;
-}
/**
* kgsl_pool_alloc_page() - Allocate a page of requested size
@@ -316,56 +258,37 @@ static int kgsl_pool_idx_lookup(unsigned int order)
*
* Return total page count on success and negative value on failure
*/
-int kgsl_pool_alloc_page(int *page_size, struct page **pages,
- unsigned int pages_len, unsigned int *align)
+int kgsl_pool_alloc_page(int page_size, struct page **pages,
+ unsigned int pages_len)
{
int j;
int pcount = 0;
struct kgsl_page_pool *pool;
struct page *page = NULL;
struct page *p = NULL;
- int order = get_order(*page_size);
- int pool_idx;
- if ((pages == NULL) || pages_len < (*page_size >> PAGE_SHIFT))
+ if ((pages == NULL) || pages_len < (page_size >> PAGE_SHIFT))
return -EINVAL;
- pool = _kgsl_get_pool_from_order(order);
+ pool = _kgsl_get_pool_from_order(get_order(page_size));
+
if (pool == NULL)
return -EINVAL;
- pool_idx = kgsl_pool_idx_lookup(order);
page = _kgsl_pool_get_page(pool);
/* Allocate a new page if not allocated from pool */
if (page == NULL) {
- gfp_t gfp_mask = kgsl_gfp_mask(order);
-
- /* Only allocate non-reserved memory for certain pools */
- if (!pool->allocation_allowed && pool_idx > 0) {
- *page_size = PAGE_SIZE <<
- kgsl_pools[pool_idx-1].pool_order;
- *align = ilog2(*page_size);
- return -EAGAIN;
- }
+ gfp_t gfp_mask = kgsl_gfp_mask(get_order(page_size));
- page = alloc_pages(gfp_mask, order);
-
- if (!page) {
- if (pool_idx > 0) {
- /* Retry with lower order pages */
- *page_size = PAGE_SIZE <<
- kgsl_pools[pool_idx-1].pool_order;
- *align = ilog2(*page_size);
- return -EAGAIN;
- } else
- return -ENOMEM;
- }
+ page = alloc_pages(gfp_mask,
+ get_order(page_size));
- _kgsl_pool_zero_page(page, order);
+ if (!page)
+ return -ENOMEM;
}
- for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
+ for (j = 0; j < (page_size >> PAGE_SHIFT); j++) {
p = nth_page(page, j);
pages[pcount] = p;
pcount++;
@@ -384,34 +307,18 @@ void kgsl_pool_free_page(struct page *page)
page_order = compound_order(page);
- pool = _kgsl_get_pool_from_order(page_order);
- if (pool != NULL) {
- _kgsl_pool_add_page(pool, page);
- return;
+ if (kgsl_pool_size_total() < kgsl_pool_max_pages) {
+ pool = _kgsl_get_pool_from_order(page_order);
+ if (pool != NULL) {
+ _kgsl_pool_add_page(pool, page);
+ return;
+ }
}
/* Give back to system as not added to pool */
__free_pages(page, page_order);
}
-static void kgsl_pool_reserve_pages(void)
-{
- int i, j;
-
- for (i = 0; i < KGSL_NUM_POOLS; i++) {
- struct page *page;
-
- for (j = 0; j < kgsl_pools[i].reserved_pages; j++) {
- int order = kgsl_pools[i].pool_order;
- gfp_t gfp_mask = kgsl_gfp_mask(order);
-
- page = alloc_pages(gfp_mask, order);
- if (page != NULL)
- _kgsl_pool_add_page(&kgsl_pools[i], page);
- }
- }
-}
-
/* Functions for the shrinker */
static unsigned long
@@ -426,7 +333,7 @@ kgsl_pool_shrink_scan_objects(struct shrinker *shrinker,
int target_pages = (nr > total_pages) ? 0 : (total_pages - nr);
/* Reduce pool size to target_pages */
- return kgsl_pool_reduce(target_pages, false);
+ return kgsl_pool_reduce(target_pages);
}
static unsigned long
@@ -447,9 +354,6 @@ static struct shrinker kgsl_pool_shrinker = {
void kgsl_init_page_pools(void)
{
- /* Reserve the appropriate number of pages for each pool */
- kgsl_pool_reserve_pages();
-
/* Initialize shrinker */
register_shrinker(&kgsl_pool_shrinker);
}
@@ -457,7 +361,7 @@ void kgsl_init_page_pools(void)
void kgsl_exit_page_pools(void)
{
/* Release all pages in pools, if any.*/
- kgsl_pool_reduce(0, true);
+ kgsl_pool_reduce(0);
/* Unregister shrinker */
unregister_shrinker(&kgsl_pool_shrinker);