aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/ion/Kconfig18
-rw-r--r--drivers/gpu/ion/Makefile5
-rw-r--r--drivers/gpu/ion/ion.c1900
-rw-r--r--drivers/gpu/ion/ion_carveout_heap.c266
-rw-r--r--drivers/gpu/ion/ion_chunk_heap.c178
-rw-r--r--drivers/gpu/ion/ion_cma_heap.c241
-rw-r--r--drivers/gpu/ion/ion_cma_secure_heap.c697
-rw-r--r--drivers/gpu/ion/ion_cp_heap.c887
-rw-r--r--drivers/gpu/ion/ion_heap.c454
-rw-r--r--drivers/gpu/ion/ion_page_pool.c210
-rw-r--r--drivers/gpu/ion/ion_priv.h398
-rw-r--r--drivers/gpu/ion/ion_removed_heap.c351
-rw-r--r--drivers/gpu/ion/ion_system_heap.c546
-rw-r--r--drivers/gpu/ion/ion_system_mapper.c114
-rw-r--r--drivers/gpu/ion/msm/Makefile1
-rw-r--r--drivers/gpu/ion/msm/ion_cp_common.h136
-rw-r--r--drivers/gpu/ion/msm/ion_iommu_map.c549
-rw-r--r--drivers/gpu/ion/msm/msm_ion.c1061
-rw-r--r--drivers/gpu/ion/msm/secure_buffer.c298
-rw-r--r--drivers/gpu/ion/msm_ion_priv.h125
-rw-r--r--drivers/gpu/ion/tegra/Makefile1
-rw-r--r--drivers/gpu/ion/tegra/tegra_ion.c96
-rw-r--r--drivers/gpu/msm/adreno.c12
-rw-r--r--drivers/gpu/msm/adreno_a4xx_snapshot.c8
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c3
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c133
-rw-r--r--drivers/gpu/msm/kgsl_iommu.h24
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h14
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c5
30 files changed, 110 insertions, 8623 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 41cd73177e1..d025f5eb5db 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1,3 +1,3 @@
-obj-y += drm/ vga/ ion/
+obj-y += drm/ vga/
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-$(CONFIG_MSM_KGSL) += msm/
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
deleted file mode 100644
index 5bb254b1655..00000000000
--- a/drivers/gpu/ion/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-menuconfig ION
- tristate "Ion Memory Manager"
- select GENERIC_ALLOCATOR
- select DMA_SHARED_BUFFER
- help
- Chose this option to enable the ION Memory Manager.
-
-config ION_TEGRA
- tristate "Ion for Tegra"
- depends on ARCH_TEGRA && ION
- help
- Choose this option if you wish to use ion on an nVidia Tegra.
-
-config ION_MSM
- tristate "Ion for MSM"
- depends on ARCH_MSM && ION
- help
- Choose this option if you wish to use ion on an MSM target.
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
deleted file mode 100644
index 108abe67b45..00000000000
--- a/drivers/gpu/ion/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
- ion_carveout_heap.o ion_chunk_heap.o
-obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
-obj-$(CONFIG_ION_TEGRA) += tegra/
-obj-$(CONFIG_ION_MSM) += ion_cp_heap.o ion_removed_heap.o msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
deleted file mode 100644
index eab7d35a779..00000000000
--- a/drivers/gpu/ion/ion.c
+++ /dev/null
@@ -1,1900 +0,0 @@
-/*
-
- * drivers/gpu/ion/ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/fs.h>
-#include <linux/anon_inodes.h>
-#include <linux/ion.h>
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/list_sort.h>
-#include <linux/memblock.h>
-#include <linux/miscdevice.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/dma-buf.h>
-#include <linux/idr.h>
-#include <linux/msm_ion.h>
-#include <trace/events/kmem.h>
-
-
-#include "ion_priv.h"
-
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- char *name;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
-{
- return ((buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
-}
-
-bool ion_buffer_cached(struct ion_buffer *buffer)
-{
- return !!(buffer->flags & ION_FLAG_CACHED);
-}
-
-/* this function should only be called while dev->lock is held */
-static void ion_buffer_add(struct ion_device *dev,
- struct ion_buffer *buffer)
-{
- struct rb_node **p = &dev->buffers.rb_node;
- struct rb_node *parent = NULL;
- struct ion_buffer *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_buffer, node);
-
- if (buffer < entry) {
- p = &(*p)->rb_left;
- } else if (buffer > entry) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: buffer already found.", __func__);
- BUG();
- }
- }
-
- rb_link_node(&buffer->node, parent, p);
- rb_insert_color(&buffer->node, &dev->buffers);
-}
-
-static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
-
-/* this function should only be called while dev->lock is held */
-static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- struct ion_device *dev,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
-{
- struct ion_buffer *buffer;
- struct sg_table *table;
- struct scatterlist *sg;
- int i, ret;
-
- buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
-
- buffer->heap = heap;
- buffer->flags = flags;
- kref_init(&buffer->ref);
-
- ret = heap->ops->allocate(heap, buffer, len, align, flags);
-
- if (ret) {
- if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
- goto err2;
-
- ion_heap_freelist_drain(heap, 0);
- ret = heap->ops->allocate(heap, buffer, len, align,
- flags);
- if (ret)
- goto err2;
- }
-
- buffer->dev = dev;
- buffer->size = len;
- buffer->flags = flags;
- INIT_LIST_HEAD(&buffer->vmas);
-
- table = heap->ops->map_dma(heap, buffer);
- if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
- table = ERR_PTR(-EINVAL);
- if (IS_ERR(table)) {
- heap->ops->free(buffer);
- kfree(buffer);
- return ERR_PTR(PTR_ERR(table));
- }
- buffer->sg_table = table;
- if (ion_buffer_fault_user_mappings(buffer)) {
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
- i) {
- if (sg_dma_len(sg) == PAGE_SIZE)
- continue;
- pr_err("%s: cached mappings that will be faulted in "
- "must have pagewise sg_lists\n", __func__);
- ret = -EINVAL;
- goto err;
- }
-
- ret = ion_buffer_alloc_dirty(buffer);
- if (ret)
- goto err;
- }
-
- mutex_init(&buffer->lock);
- /* this will set up dma addresses for the sglist -- it is not
- technically correct as per the dma api -- a specific
- device isn't really taking ownership here. However, in practice on
- our systems the only dma_address space is physical addresses.
- Additionally, we can't afford the overhead of invalidating every
- allocation via dma_map_sg. The implicit contract here is that
- memory comming from the heaps is ready for dma, ie if it has a
- cached mapping that mapping has been invalidated */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (sg_dma_address(sg) == 0)
- sg_dma_address(sg) = sg_phys(sg);
- }
- mutex_lock(&dev->buffer_lock);
- ion_buffer_add(dev, buffer);
- mutex_unlock(&dev->buffer_lock);
- return buffer;
-
-err:
- heap->ops->unmap_dma(heap, buffer);
- heap->ops->free(buffer);
-err2:
- kfree(buffer);
- return ERR_PTR(ret);
-}
-
-void ion_buffer_destroy(struct ion_buffer *buffer)
-{
- if (WARN_ON(buffer->kmap_cnt > 0))
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
-
- buffer->heap->ops->free(buffer);
- if (buffer->flags & ION_FLAG_CACHED)
- kfree(buffer->dirty);
- kfree(buffer);
-}
-
-static void _ion_buffer_destroy(struct kref *kref)
-{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
- struct ion_heap *heap = buffer->heap;
- struct ion_device *dev = buffer->dev;
-
- mutex_lock(&dev->buffer_lock);
- rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->buffer_lock);
-
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- ion_heap_freelist_add(heap, buffer);
- else
- ion_buffer_destroy(buffer);
-}
-
-static void ion_buffer_get(struct ion_buffer *buffer)
-{
- kref_get(&buffer->ref);
-}
-
-static int ion_buffer_put(struct ion_buffer *buffer)
-{
- return kref_put(&buffer->ref, _ion_buffer_destroy);
-}
-
-static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
-{
- mutex_lock(&buffer->lock);
- buffer->handle_count++;
- mutex_unlock(&buffer->lock);
-}
-
-static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
-{
- /*
- * when a buffer is removed from a handle, if it is not in
- * any other handles, copy the taskcomm and the pid of the
- * process it's being removed from into the buffer. At this
- * point there will be no way to track what processes this buffer is
- * being used by, it only exists as a dma_buf file descriptor.
- * The taskcomm and pid can provide a debug hint as to where this fd
- * is in the system
- */
- mutex_lock(&buffer->lock);
- buffer->handle_count--;
- BUG_ON(buffer->handle_count < 0);
- if (!buffer->handle_count) {
- struct task_struct *task;
-
- task = current->group_leader;
- get_task_comm(buffer->task_comm, task);
- buffer->pid = task_pid_nr(task);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle;
-
- handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
- if (!handle)
- return ERR_PTR(-ENOMEM);
- kref_init(&handle->ref);
- RB_CLEAR_NODE(&handle->node);
- handle->client = client;
- ion_buffer_get(buffer);
- ion_buffer_add_to_handle(buffer);
- handle->buffer = buffer;
-
- return handle;
-}
-
-static void ion_handle_kmap_put(struct ion_handle *);
-
-static void ion_handle_destroy(struct kref *kref)
-{
- struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- struct ion_client *client = handle->client;
- struct ion_buffer *buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
- while (handle->kmap_cnt)
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
-
- idr_remove(&client->idr, handle->id);
- if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &client->handles);
-
- ion_buffer_remove_from_handle(buffer);
- ion_buffer_put(buffer);
-
- kfree(handle);
-}
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
-{
- return handle->buffer;
-}
-
-static void ion_handle_get(struct ion_handle *handle)
-{
- kref_get(&handle->ref);
-}
-
-int ion_handle_put(struct ion_handle *handle)
-{
- struct ion_client *client = handle->client;
- int ret;
-
- mutex_lock(&client->lock);
- ret = kref_put(&handle->ref, ion_handle_destroy);
- mutex_unlock(&client->lock);
-
- return ret;
-}
-
-static struct ion_handle *ion_handle_lookup(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct rb_node *n;
-
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- if (handle->buffer == buffer)
- return handle;
- }
- return ERR_PTR(-EINVAL);
-}
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = idr_find(&client->idr, id);
- if (handle)
- ion_handle_get(handle);
- mutex_unlock(&client->lock);
-
- return handle ? handle : ERR_PTR(-EINVAL);
-}
-
-static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
-{
- WARN_ON(!mutex_is_locked(&client->lock));
- return (idr_find(&client->idr, handle->id) == handle);
-}
-
-static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
-{
- int id;
- struct rb_node **p = &client->handles.rb_node;
- struct rb_node *parent = NULL;
- struct ion_handle *entry;
-
- id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
- if (id < 0)
- return id;
-
- handle->id = id;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_handle, node);
-
- if (handle < entry)
- p = &(*p)->rb_left;
- else if (handle > entry)
- p = &(*p)->rb_right;
- else
- WARN(1, "%s: buffer already found.", __func__);
- }
-
- rb_link_node(&handle->node, parent, p);
- rb_insert_color(&handle->node, &client->handles);
-
- return 0;
-}
-
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags)
-{
- struct ion_handle *handle;
- struct ion_device *dev = client->dev;
- struct ion_buffer *buffer = NULL;
- struct ion_heap *heap;
- int ret;
- unsigned long secure_allocation = flags & ION_FLAG_SECURE;
- const unsigned int MAX_DBG_STR_LEN = 64;
- char dbg_str[MAX_DBG_STR_LEN];
- unsigned int dbg_str_idx = 0;
-
- dbg_str[0] = '\0';
-
- /*
- * For now, we don't want to fault in pages individually since
- * clients are already doing manual cache maintenance. In
- * other words, the implicit caching infrastructure is in
- * place (in code) but should not be used.
- */
- flags |= ION_FLAG_CACHED_NEEDS_SYNC;
-
- pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
- len, align, heap_id_mask, flags);
- /*
- * traverse the list of heaps available in this system in priority
- * order. If the heap type is supported by the client, and matches the
- * request of the caller allocate from it. Repeat until allocate has
- * succeeded or all heaps have been tried
- */
- if (WARN_ON(!len))
- return ERR_PTR(-EINVAL);
-
- len = PAGE_ALIGN(len);
-
- down_read(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- /* if the caller didn't specify this heap id */
- if (!((1 << heap->id) & heap_id_mask))
- continue;
- /* Do not allow un-secure heap if secure is specified */
- if (secure_allocation &&
- !ion_heap_allow_secure_allocation(heap->type))
- continue;
- trace_ion_alloc_buffer_start(client->name, heap->name, len,
- heap_id_mask, flags);
- buffer = ion_buffer_create(heap, dev, len, align, flags);
- trace_ion_alloc_buffer_end(client->name, heap->name, len,
- heap_id_mask, flags);
- if (!IS_ERR(buffer))
- break;
-
- trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
- heap_id_mask, flags,
- PTR_ERR(buffer));
- if (dbg_str_idx < MAX_DBG_STR_LEN) {
- unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
- int ret_value = snprintf(&dbg_str[dbg_str_idx],
- len_left, "%s ", heap->name);
- if (ret_value >= len_left) {
- /* overflow */
- dbg_str[MAX_DBG_STR_LEN-1] = '\0';
- dbg_str_idx = MAX_DBG_STR_LEN;
- } else if (ret_value >= 0) {
- dbg_str_idx += ret_value;
- } else {
- /* error */
- dbg_str[MAX_DBG_STR_LEN-1] = '\0';
- }
- }
- }
- up_read(&dev->lock);
-
- if (buffer == NULL) {
- trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_id_mask, flags, -ENODEV);
- return ERR_PTR(-ENODEV);
- }
-
- if (IS_ERR(buffer)) {
- trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
- heap_id_mask, flags,
- PTR_ERR(buffer));
- pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n",
- len, align, dbg_str, client->name);
- return ERR_PTR(PTR_ERR(buffer));
- }
-
- handle = ion_handle_create(client, buffer);
-
- /*
- * ion_buffer_create will create a buffer with a ref_cnt of 1,
- * and ion_handle_create will take a second reference, drop one here
- */
- ion_buffer_put(buffer);
-
- if (IS_ERR(handle))
- return handle;
-
- mutex_lock(&client->lock);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
- return handle;
-}
-EXPORT_SYMBOL(ion_alloc);
-
-void ion_free(struct ion_client *client, struct ion_handle *handle)
-{
- bool valid_handle;
-
- BUG_ON(client != handle->client);
-
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to free.\n", __func__);
- mutex_unlock(&client->lock);
- return;
- }
- mutex_unlock(&client->lock);
- ion_handle_put(handle);
-}
-EXPORT_SYMBOL(ion_free);
-
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_buffer *buffer;
- int ret;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
-
- buffer = handle->buffer;
-
- if (!buffer->heap->ops->phys) {
- pr_err("%s: ion_phys is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -ENODEV;
- }
- mutex_unlock(&client->lock);
- ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
- return ret;
-}
-EXPORT_SYMBOL(ion_phys);
-
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
-{
- void *vaddr;
-
- if (buffer->kmap_cnt) {
- buffer->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
- return ERR_PTR(-EINVAL);
- if (IS_ERR(vaddr))
- return vaddr;
- buffer->vaddr = vaddr;
- buffer->kmap_cnt++;
- return vaddr;
-}
-
-static void *ion_handle_kmap_get(struct ion_handle *handle)
-{
- struct ion_buffer *buffer = handle->buffer;
- void *vaddr;
-
- if (handle->kmap_cnt) {
- handle->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR(vaddr))
- return vaddr;
- handle->kmap_cnt++;
- return vaddr;
-}
-
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
-{
- buffer->kmap_cnt--;
- if (!buffer->kmap_cnt) {
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->vaddr = NULL;
- }
-}
-
-static void ion_handle_kmap_put(struct ion_handle *handle)
-{
- struct ion_buffer *buffer = handle->buffer;
-
- handle->kmap_cnt--;
- if (!handle->kmap_cnt)
- ion_buffer_kmap_put(buffer);
-}
-
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- void *vaddr;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
-
- buffer = handle->buffer;
-
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&buffer->lock);
- vaddr = ion_handle_kmap_get(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return vaddr;
-}
-EXPORT_SYMBOL(ion_map_kernel);
-
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-static int ion_debug_client_show(struct seq_file *s, void *unused)
-{
- struct ion_client *client = s->private;
- struct rb_node *n;
-
- seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
- "heap_name", "size_in_bytes", "handle refcount",
- "buffer");
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
-
- seq_printf(s, "%16.16s: %16zx : %16d : %12p",
- handle->buffer->heap->name,
- handle->buffer->size,
- atomic_read(&handle->ref.refcount),
- handle->buffer);
-
- seq_printf(s, "\n");
- }
- mutex_unlock(&client->lock);
- return 0;
-}
-
-static int ion_debug_client_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_client_show, inode->i_private);
-}
-
-static const struct file_operations debug_client_fops = {
- .open = ion_debug_client_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static bool startswith(const char *string, const char *prefix)
-{
- size_t l1 = strlen(string);
- size_t l2 = strlen(prefix);
- if (l2 > l1)
- return false;
- return strncmp(string, prefix, min(l1, l2)) == 0;
-}
-
-static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
-{
- int serial = -1;
- struct rb_node *node;
- for (node = rb_first(root); node; node = rb_next(node)) {
- int n;
- char *serial_string;
- struct ion_client *client = rb_entry(node, struct ion_client,
- node);
- if (!startswith(client->name, name))
- continue;
- serial_string = strrchr(client->name, '-');
- if (!serial_string)
- continue;
- serial_string++;
- sscanf(serial_string, "%d", &n);
- serial = max(serial, n);
- }
- return serial + 1;
-}
-
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name)
-{
- struct ion_client *client;
- struct task_struct *task;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct ion_client *entry;
- pid_t pid;
- int client_serial;
-
- if (!name) {
- pr_err("%s: Name cannot be null\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- get_task_struct(current->group_leader);
- task_lock(current->group_leader);
- pid = task_pid_nr(current->group_leader);
- /* don't bother to store task struct for kernel threads,
- they can't be killed anyway */
- if (current->group_leader->flags & PF_KTHREAD) {
- put_task_struct(current->group_leader);
- task = NULL;
- } else {
- task = current->group_leader;
- }
- task_unlock(current->group_leader);
-
- client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
- if (!client) {
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
- }
-
- client->dev = dev;
- client->handles = RB_ROOT;
- idr_init(&client->idr);
- mutex_init(&client->lock);
-
- client->task = task;
- client->pid = pid;
-
- down_write(&dev->lock);
- client_serial = ion_get_client_serial(&dev->clients, name);
- client->name = kasprintf(GFP_KERNEL, "%s-%d", name, client_serial);
- if (!client->name) {
- up_write(&dev->lock);
- put_task_struct(current->group_leader);
- kfree(client);
- return ERR_PTR(-ENOMEM);
- }
- p = &dev->clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
-
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->clients);
-
-
- client->debug_root = debugfs_create_file(client->name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
- if (!client->debug_root) {
- char buf[256], *path;
- path = dentry_path(dev->clients_debug_root, buf, 256);
- pr_err("Failed to created client debugfs at %s/%s\n",
- path, client->name);
- }
-
- up_write(&dev->lock);
-
- return client;
-}
-EXPORT_SYMBOL(ion_client_create);
-
-void ion_client_destroy(struct ion_client *client)
-{
- struct ion_device *dev = client->dev;
- struct rb_node *n;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- while ((n = rb_first(&client->handles))) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- ion_handle_destroy(&handle->ref);
- }
-
- idr_destroy(&client->idr);
-
- down_write(&dev->lock);
- if (client->task)
- put_task_struct(client->task);
- rb_erase(&client->node, &dev->clients);
- debugfs_remove_recursive(client->debug_root);
-
- up_write(&dev->lock);
-
- kfree(client->name);
- kfree(client);
-}
-EXPORT_SYMBOL(ion_client_destroy);
-
-int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
- unsigned long *flags)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to %s.\n",
- __func__, __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- *flags = buffer->flags;
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ion_handle_get_flags);
-
-int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
- unsigned long *size)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to %s.\n",
- __func__, __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- *size = buffer->size;
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ion_handle_get_size);
-
-struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- struct sg_table *table;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- table = buffer->sg_table;
- mutex_unlock(&client->lock);
- return table;
-}
-EXPORT_SYMBOL(ion_sg_table);
-
-struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
- size_t chunk_size, size_t total_size)
-{
- struct sg_table *table;
- int i, n_chunks, ret;
- struct scatterlist *sg;
-
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return ERR_PTR(-ENOMEM);
-
- n_chunks = DIV_ROUND_UP(total_size, chunk_size);
- pr_debug("creating sg_table with %d chunks\n", n_chunks);
-
- ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
- if (ret)
- goto err0;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- dma_addr_t addr = buffer_base + i * chunk_size;
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = chunk_size;
- }
-
- return table;
-err0:
- kfree(table);
- return ERR_PTR(ret);
-}
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction direction);
-
-static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
-{
- struct dma_buf *dmabuf = attachment->dmabuf;
- struct ion_buffer *buffer = dmabuf->priv;
-
- ion_buffer_sync_for_device(buffer, attachment->dev, direction);
- return buffer->sg_table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
-{
-}
-
-static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
-{
- unsigned long pages = buffer->sg_table->nents;
- unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
-
- buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
- if (!buffer->dirty)
- return -ENOMEM;
- return 0;
-}
-
-struct ion_vma_list {
- struct list_head list;
- struct vm_area_struct *vma;
-};
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
- struct ion_vma_list *vma_list;
-
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
-
- if (!ion_buffer_fault_user_mappings(buffer))
- return;
-
- mutex_lock(&buffer->lock);
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (!test_bit(i, buffer->dirty))
- continue;
- dma_sync_sg_for_device(dev, sg, 1, dir);
- clear_bit(i, buffer->dirty);
- }
- list_for_each_entry(vma_list, &buffer->vmas, list) {
- struct vm_area_struct *vma = vma_list->vma;
-
- zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
- NULL);
- }
- mutex_unlock(&buffer->lock);
-}
-
-int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct scatterlist *sg;
- int i;
-
- mutex_lock(&buffer->lock);
- set_bit(vmf->pgoff, buffer->dirty);
-
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (i != vmf->pgoff)
- continue;
- dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
- vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- sg_page(sg));
- break;
- }
- mutex_unlock(&buffer->lock);
- return VM_FAULT_NOPAGE;
-}
-
-static void ion_vm_open(struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list;
-
- vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
- if (!vma_list)
- return;
- vma_list->vma = vma;
- mutex_lock(&buffer->lock);
- list_add(&vma_list->list, &buffer->vmas);
- mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %p\n", __func__, vma);
-}
-
-static void ion_vm_close(struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list, *tmp;
-
- pr_debug("%s\n", __func__);
- mutex_lock(&buffer->lock);
- list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
- if (vma_list->vma != vma)
- continue;
- list_del(&vma_list->list);
- kfree(vma_list);
- pr_debug("%s: deleting %p\n", __func__, vma);
- break;
- }
- mutex_unlock(&buffer->lock);
-
- if (buffer->heap->ops->unmap_user)
- buffer->heap->ops->unmap_user(buffer->heap, buffer);
-}
-
-struct vm_operations_struct ion_vma_ops = {
- .open = ion_vm_open,
- .close = ion_vm_close,
- .fault = ion_vm_fault,
-};
-
-static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- int ret = 0;
-
- if (!buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping "
- "to userspace\n", __func__);
- return -EINVAL;
- }
-
- if (ion_buffer_fault_user_mappings(buffer)) {
- vma->vm_private_data = buffer;
- vma->vm_ops = &ion_vma_ops;
- vma->vm_flags |= VM_MIXEDMAP;
- ion_vm_open(vma);
- return 0;
- }
-
- if (!(buffer->flags & ION_FLAG_CACHED))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- mutex_lock(&buffer->lock);
- /* now map it to userspace */
- ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
- mutex_unlock(&buffer->lock);
-
- if (ret)
- pr_err("%s: failure mapping buffer to userspace\n",
- __func__);
-
- return ret;
-}
-
-static void ion_dma_buf_release(struct dma_buf *dmabuf)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- ion_buffer_put(buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
-{
- return;
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
- enum dma_data_direction direction)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- void *vaddr;
-
- if (!buffer->heap->ops->map_kernel) {
- pr_err("%s: map kernel is not implemented by this heap.\n",
- __func__);
- return -ENODEV;
- }
-
- mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
- mutex_unlock(&buffer->lock);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
- return 0;
-}
-
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
- enum dma_data_direction direction)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
-}
-
-struct dma_buf_ops dma_buf_ops = {
- .map_dma_buf = ion_map_dma_buf,
- .unmap_dma_buf = ion_unmap_dma_buf,
- .mmap = ion_mmap,
- .release = ion_dma_buf_release,
- .begin_cpu_access = ion_dma_buf_begin_cpu_access,
- .end_cpu_access = ion_dma_buf_end_cpu_access,
- .kmap_atomic = ion_dma_buf_kmap,
- .kunmap_atomic = ion_dma_buf_kunmap,
- .kmap = ion_dma_buf_kmap,
- .kunmap = ion_dma_buf_kunmap,
-};
-
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- struct dma_buf *dmabuf;
- bool valid_handle;
-
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to share.\n", __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- ion_buffer_get(buffer);
- mutex_unlock(&client->lock);
-
- dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
- if (IS_ERR(dmabuf)) {
- ion_buffer_put(buffer);
- return dmabuf;
- }
-
- return dmabuf;
-}
-EXPORT_SYMBOL(ion_share_dma_buf);
-
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
-{
- struct dma_buf *dmabuf;
- int fd;
-
- dmabuf = ion_share_dma_buf(client, handle);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- fd = dma_buf_fd(dmabuf, O_CLOEXEC);
- if (fd < 0)
- dma_buf_put(dmabuf);
- return fd;
-}
-EXPORT_SYMBOL(ion_share_dma_buf_fd);
-
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
- struct ion_handle *handle;
- int ret;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return ERR_PTR(PTR_ERR(dmabuf));
- /* if this memory came from ion */
-
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not import dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return ERR_PTR(-EINVAL);
- }
- buffer = dmabuf->priv;
-
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR(handle)) {
- ion_handle_get(handle);
- mutex_unlock(&client->lock);
- goto end;
- }
- mutex_unlock(&client->lock);
-
- handle = ion_handle_create(client, buffer);
- if (IS_ERR(handle))
- goto end;
-
- mutex_lock(&client->lock);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
-end:
- dma_buf_put(dmabuf);
- return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf);
-
-static int ion_sync_for_device(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- /* if this memory came from ion */
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not sync dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return -EINVAL;
- }
- buffer = dmabuf->priv;
-
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
- dma_buf_put(dmabuf);
- return 0;
-}
-
-static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ion_client *client = filp->private_data;
-
- switch (cmd) {
- case ION_IOC_ALLOC:
- {
- struct ion_allocation_data data;
- struct ion_handle *handle;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- handle = ion_alloc(client, data.len, data.align,
- data.heap_mask, data.flags);
-
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- data.handle = (ion_user_handle_t)handle->id;
-
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- ion_free(client, handle);
- return -EFAULT;
- }
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle_data data;
- struct ion_handle *handle;
-
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_handle_data)))
- return -EFAULT;
- handle = ion_handle_get_by_id(client, (int)data.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ion_free(client, handle);
- ion_handle_put(handle);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_fd_data data;
- struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
-
- handle = ion_handle_get_by_id(client, (int)data.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
- return -EFAULT;
- if (data.fd < 0)
- return data.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_fd_data data;
- struct ion_handle *handle;
- int ret = 0;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- handle = ion_import_dma_buf(client, data.fd);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
- else
- data.handle = (ion_user_handle_t)handle->id;
-
- if (copy_to_user((void __user *)arg, &data,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- if (ret < 0)
- return ret;
- break;
- }
- case ION_IOC_SYNC:
- {
- struct ion_fd_data data;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- ion_sync_for_device(client, data.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- struct ion_device *dev = client->dev;
- struct ion_custom_data data;
-
- if (!dev->custom_ioctl)
- return -ENOTTY;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_custom_data)))
- return -EFAULT;
- return dev->custom_ioctl(client, data.cmd, data.arg);
- }
- case ION_IOC_CLEAN_CACHES:
- return client->dev->custom_ioctl(client,
- ION_IOC_CLEAN_CACHES, arg);
- case ION_IOC_INV_CACHES:
- return client->dev->custom_ioctl(client,
- ION_IOC_INV_CACHES, arg);
- case ION_IOC_CLEAN_INV_CACHES:
- return client->dev->custom_ioctl(client,
- ION_IOC_CLEAN_INV_CACHES, arg);
- default:
- return -ENOTTY;
- }
- return 0;
-}
-
-static int ion_release(struct inode *inode, struct file *file)
-{
- struct ion_client *client = file->private_data;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_destroy(client);
- return 0;
-}
-
-static int ion_open(struct inode *inode, struct file *file)
-{
- struct miscdevice *miscdev = file->private_data;
- struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
- struct ion_client *client;
- char debug_name[64];
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, debug_name);
- if (IS_ERR(client))
- return PTR_ERR(client);
- file->private_data = client;
-
- return 0;
-}
-
-static const struct file_operations ion_fops = {
- .owner = THIS_MODULE,
- .open = ion_open,
- .release = ion_release,
- .unlocked_ioctl = ion_ioctl,
-};
-
-static size_t ion_debug_heap_total(struct ion_client *client,
- unsigned int id)
-{
- size_t size = 0;
- struct rb_node *n;
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n,
- struct ion_handle,
- node);
- if (handle->buffer->heap->id == id)
- size += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
- return size;
-}
-
-/**
- * Create a mem_map of the heap.
- * @param s seq_file to log error message to.
- * @param heap The heap to create mem_map for.
- * @param mem_map The mem map to be created.
- */
-void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
- struct list_head *mem_map)
-{
- struct ion_device *dev = heap->dev;
- struct rb_node *cnode;
- size_t size;
- struct ion_client *client;
-
- if (!heap->ops->phys)
- return;
-
- down_read(&dev->lock);
- for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
- struct rb_node *hnode;
- client = rb_entry(cnode, struct ion_client, node);
-
- mutex_lock(&client->lock);
- for (hnode = rb_first(&client->handles);
- hnode;
- hnode = rb_next(hnode)) {
- struct ion_handle *handle = rb_entry(
- hnode, struct ion_handle, node);
- if (handle->buffer->heap == heap) {
- struct mem_map_data *data =
- kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- goto inner_error;
- heap->ops->phys(heap, handle->buffer,
- &(data->addr), &size);
- data->size = (unsigned long) size;
- data->addr_end = data->addr + data->size - 1;
- data->client_name = kstrdup(client->name,
- GFP_KERNEL);
- if (!data->client_name) {
- kfree(data);
- goto inner_error;
- }
- list_add(&data->node, mem_map);
- }
- }
- mutex_unlock(&client->lock);
- }
- up_read(&dev->lock);
- return;
-
-inner_error:
- seq_puts(s,
- "ERROR: out of memory. Part of memory map will not be logged\n");
- mutex_unlock(&client->lock);
- up_read(&dev->lock);
-}
-
-/**
- * Free the memory allocated by ion_debug_mem_map_create
- * @param mem_map The mem map to free.
- */
-static void ion_debug_mem_map_destroy(struct list_head *mem_map)
-{
- if (mem_map) {
- struct mem_map_data *data, *tmp;
- list_for_each_entry_safe(data, tmp, mem_map, node) {
- list_del(&data->node);
- kfree(data->client_name);
- kfree(data);
- }
- }
-}
-
-static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct mem_map_data *d1, *d2;
- d1 = list_entry(a, struct mem_map_data, node);
- d2 = list_entry(b, struct mem_map_data, node);
- if (d1->addr == d2->addr)
- return d1->size - d2->size;
- return d1->addr - d2->addr;
-}
-
-/**
- * Print heap debug information.
- * @param s seq_file to log message to.
- * @param heap pointer to heap that we will print debug information for.
- */
-static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
-{
- if (heap->ops->print_debug) {
- struct list_head mem_map = LIST_HEAD_INIT(mem_map);
- ion_debug_mem_map_create(s, heap, &mem_map);
- list_sort(NULL, &mem_map, mem_map_cmp);
- heap->ops->print_debug(heap, s, &mem_map);
- ion_debug_mem_map_destroy(&mem_map);
- }
-}
-
-static int ion_debug_heap_show(struct seq_file *s, void *unused)
-{
- struct ion_heap *heap = s->private;
- struct ion_device *dev = heap->dev;
- struct rb_node *n;
- size_t total_size = 0;
- size_t total_orphaned_size = 0;
-
- seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
- seq_printf(s, "----------------------------------------------------\n");
-
- down_read(&dev->lock);
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- size_t size = ion_debug_heap_total(client, heap->id);
- if (!size)
- continue;
- if (client->task) {
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16.s %16u %16zu\n", task_comm,
- client->pid, size);
- } else {
- seq_printf(s, "%16.s %16u %16zu\n", client->name,
- client->pid, size);
- }
- }
- up_read(&dev->lock);
- seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "orphaned allocations (info is from last known client):"
- "\n");
- mutex_lock(&dev->buffer_lock);
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
- node);
- if (buffer->heap->id != heap->id)
- continue;
- total_size += buffer->size;
- if (!buffer->handle_count) {
- seq_printf(s, "%16.s %16u %16zu %d %d\n",
- buffer->task_comm, buffer->pid, buffer->size,
- buffer->kmap_cnt,
- atomic_read(&buffer->ref.refcount));
- total_orphaned_size += buffer->size;
- }
- }
- mutex_unlock(&dev->buffer_lock);
- seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "%16.s %16zu\n", "total orphaned",
- total_orphaned_size);
- seq_printf(s, "%16.s %16zu\n", "total ", total_size);
- seq_printf(s, "----------------------------------------------------\n");
-
- if (heap->debug_show)
- heap->debug_show(heap, s, unused);
-
- ion_heap_print_debug(s, heap);
- return 0;
-}
-
-static int ion_debug_heap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_heap_show, inode->i_private);
-}
-
-static const struct file_operations debug_heap_fops = {
- .open = ion_debug_heap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-#ifdef DEBUG_HEAP_SHRINKER
-static int debug_shrink_set(void *data, u64 val)
-{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
-
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
-
- if (!val)
- return 0;
-
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- sc.nr_to_scan = objs;
-
- heap->shrinker.shrink(&heap->shrinker, &sc);
- return 0;
-}
-
-static int debug_shrink_get(void *data, u64 *val)
-{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
-
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
-
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- *val = objs;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
- debug_shrink_set, "%llu\n");
-#endif
-
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
-{
- struct dentry *debug_file;
-
- if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
- !heap->ops->unmap_dma)
- pr_err("%s: can not add heap with invalid ops struct.\n",
- __func__);
-
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- ion_heap_init_deferred_free(heap);
-
- heap->dev = dev;
- down_write(&dev->lock);
- /* use negative heap->id to reverse the priority -- when traversing
- the list later attempt higher id numbers first */
- plist_node_init(&heap->node, -heap->id);
- plist_add(&heap->node, &dev->heaps);
- debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
-
- if (!debug_file) {
- char buf[256], *path;
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to created heap debugfs at %s/%s\n",
- path, heap->name);
- }
-
-#ifdef DEBUG_HEAP_SHRINKER
- if (heap->shrinker.shrink) {
- char debug_name[64];
-
- snprintf(debug_name, 64, "%s_shrink", heap->name);
- debug_file = debugfs_create_file(
- debug_name, 0644, dev->heaps_debug_root, heap,
- &debug_shrink_fops);
- if (!debug_file) {
- char buf[256], *path;
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to created heap shrinker debugfs at %s/%s\n",
- path, debug_name);
- }
- }
-#endif
- up_write(&dev->lock);
-}
-
-int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
- void *data)
-{
- int ret_val = 0;
- struct ion_heap *heap;
-
- /*
- * traverse the list of heaps available in this system
- * and find the heap that is specified.
- */
- down_write(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- if (!ion_heap_allow_heap_secure(heap->type))
- continue;
- if (ION_HEAP(heap->id) != heap_id)
- continue;
- if (heap->ops->secure_heap)
- ret_val = heap->ops->secure_heap(heap, version, data);
- else
- ret_val = -EINVAL;
- break;
- }
- up_write(&dev->lock);
- return ret_val;
-}
-EXPORT_SYMBOL(ion_secure_heap);
-
-int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
- int (*f)(struct ion_heap *heap, void *data))
-{
- int ret_val = -EINVAL;
- struct ion_heap *heap;
- struct ion_device *dev = client->dev;
- /*
- * traverse the list of heaps available in this system
- * and find the heap that is specified.
- */
- down_write(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- if (ION_HEAP(heap->id) != heap_id)
- continue;
- ret_val = f(heap, data);
- break;
- }
- up_write(&dev->lock);
- return ret_val;
-}
-EXPORT_SYMBOL(ion_walk_heaps);
-
-int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
- void *data)
-{
- int ret_val = 0;
- struct ion_heap *heap;
-
- /*
- * traverse the list of heaps available in this system
- * and find the heap that is specified.
- */
- down_write(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- if (!ion_heap_allow_heap_secure(heap->type))
- continue;
- if (ION_HEAP(heap->id) != heap_id)
- continue;
- if (heap->ops->secure_heap)
- ret_val = heap->ops->unsecure_heap(heap, version, data);
- else
- ret_val = -EINVAL;
- break;
- }
- up_write(&dev->lock);
- return ret_val;
-}
-EXPORT_SYMBOL(ion_unsecure_heap);
-
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg))
-{
- struct ion_device *idev;
- int ret;
-
- idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
- if (!idev)
- return ERR_PTR(-ENOMEM);
-
- idev->dev.minor = MISC_DYNAMIC_MINOR;
- idev->dev.name = "ion";
- idev->dev.fops = &ion_fops;
- idev->dev.parent = NULL;
- ret = misc_register(&idev->dev);
- if (ret) {
- pr_err("ion: failed to register misc device.\n");
- return ERR_PTR(ret);
- }
-
- idev->debug_root = debugfs_create_dir("ion", NULL);
- if (!idev->debug_root) {
- pr_err("ion: failed to create debugfs root directory.\n");
- goto debugfs_done;
- }
- idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
- if (!idev->heaps_debug_root) {
- pr_err("ion: failed to create debugfs heaps directory.\n");
- goto debugfs_done;
- }
- idev->clients_debug_root = debugfs_create_dir("clients",
- idev->debug_root);
- if (!idev->clients_debug_root)
- pr_err("ion: failed to create debugfs clients directory.\n");
-
-debugfs_done:
-
- idev->custom_ioctl = custom_ioctl;
- idev->buffers = RB_ROOT;
- mutex_init(&idev->buffer_lock);
- init_rwsem(&idev->lock);
- plist_head_init(&idev->heaps);
- idev->clients = RB_ROOT;
- return idev;
-}
-
-void ion_device_destroy(struct ion_device *dev)
-{
- misc_deregister(&dev->dev);
- debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
- kfree(dev);
-}
-
-void __init ion_reserve(struct ion_platform_data *data)
-{
- int i;
-
- for (i = 0; i < data->nr; i++) {
- if (data->heaps[i].size == 0)
- continue;
-
- if (data->heaps[i].base == 0) {
- phys_addr_t paddr;
- paddr = memblock_alloc_base(data->heaps[i].size,
- data->heaps[i].align,
- MEMBLOCK_ALLOC_ANYWHERE);
- if (!paddr) {
- pr_err("%s: error allocating memblock for "
- "heap %d\n",
- __func__, i);
- continue;
- }
- data->heaps[i].base = paddr;
- } else {
- int ret = memblock_reserve(data->heaps[i].base,
- data->heaps[i].size);
- if (ret)
- pr_err("memblock reserve of %zx@%pa failed\n",
- data->heaps[i].size,
- &data->heaps[i].base);
- }
- pr_info("%s: %s reserved base %pa size %zu\n", __func__,
- data->heaps[i].name,
- &data->heaps[i].base,
- data->heaps[i].size);
- }
-}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
deleted file mode 100644
index 1f8997c9698..00000000000
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * drivers/gpu/ion/ion_carveout_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/spinlock.h>
-
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/seq_file.h>
-#include "ion_priv.h"
-
-#include <asm/cacheflush.h>
-#include <linux/io.h>
-#include <linux/msm_ion.h>
-
-struct ion_carveout_heap {
- struct ion_heap heap;
- struct gen_pool *pool;
- ion_phys_addr_t base;
- unsigned long allocated_bytes;
- unsigned long total_size;
-};
-
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
- unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool,
- size, ilog2(align));
-
- if (!offset) {
- if ((carveout_heap->total_size -
- carveout_heap->allocated_bytes) >= size)
- pr_debug("%s: heap %s has enough memory (%lx) but"
- " the allocation of size %lx still failed."
- " Memory is probably fragmented.",
- __func__, heap->name,
- carveout_heap->total_size -
- carveout_heap->allocated_bytes, size);
- return ION_CARVEOUT_ALLOCATE_FAIL;
- }
-
- carveout_heap->allocated_bytes += size;
- return offset;
-}
-
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
- return;
- gen_pool_free(carveout_heap->pool, addr, size);
- carveout_heap->allocated_bytes -= size;
-}
-
-static int ion_carveout_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- *addr = buffer->priv_phys;
- *len = buffer->size;
- return 0;
-}
-
-static int ion_carveout_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- buffer->priv_phys = ion_carveout_allocate(heap, size, align);
- return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_carveout_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
-
- ion_carveout_free(heap, buffer->priv_phys, buffer->size);
- buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- size_t chunk_size = buffer->size;
-
- if (ION_IS_CACHED(buffer->flags))
- chunk_size = PAGE_SIZE;
-
- return ion_create_chunked_sg_table(buffer->priv_phys, chunk_size,
- buffer->size);
-}
-
-void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- if (buffer->sg_table)
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
- buffer->sg_table = 0;
-}
-
-void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- void *ret_value;
-
- if (ION_IS_CACHED(buffer->flags))
- ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
- else
- ret_value = ioremap(buffer->priv_phys, buffer->size);
-
- return ret_value;
-}
-
-void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- iounmap(buffer->vaddr);
- buffer->vaddr = NULL;
-
- return;
-}
-
-int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- int ret_value = 0;
-
- if (!ION_IS_CACHED(buffer->flags))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- ret_value = remap_pfn_range(vma, vma->vm_start,
- __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-
- return ret_value;
-}
-
-static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct list_head *mem_map)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- seq_printf(s, "total bytes currently allocated: %lx\n",
- carveout_heap->allocated_bytes);
- seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
-
- if (mem_map) {
- unsigned long base = carveout_heap->base;
- unsigned long size = carveout_heap->total_size;
- unsigned long end = base+size;
- unsigned long last_end = base;
- struct mem_map_data *data;
-
- seq_printf(s, "\nMemory Map\n");
- seq_printf(s, "%16.s %14.s %14.s %14.s\n",
- "client", "start address", "end address",
- "size (hex)");
-
- list_for_each_entry(data, mem_map, node) {
- const char *client_name = "(null)";
-
- if (last_end < data->addr) {
- phys_addr_t da;
-
- da = data->addr-1;
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- "FREE", &last_end, &da,
- (unsigned long)data->addr-last_end,
- (unsigned long)data->addr-last_end);
- }
-
- if (data->client_name)
- client_name = data->client_name;
-
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- client_name, &data->addr,
- &data->addr_end,
- data->size, data->size);
- last_end = data->addr_end+1;
- }
- if (last_end < end) {
- seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
- last_end, end-1, end-last_end, end-last_end);
- }
- }
- return 0;
-}
-
-static struct ion_heap_ops carveout_heap_ops = {
- .allocate = ion_carveout_heap_allocate,
- .free = ion_carveout_heap_free,
- .phys = ion_carveout_heap_phys,
- .map_user = ion_carveout_heap_map_user,
- .map_kernel = ion_carveout_heap_map_kernel,
- .unmap_kernel = ion_carveout_heap_unmap_kernel,
- .map_dma = ion_carveout_heap_map_dma,
- .unmap_dma = ion_carveout_heap_unmap_dma,
- .print_debug = ion_carveout_print_debug,
-};
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_carveout_heap *carveout_heap;
- int ret;
-
- carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
- if (!carveout_heap)
- return ERR_PTR(-ENOMEM);
-
- carveout_heap->pool = gen_pool_create(12, -1);
- if (!carveout_heap->pool) {
- kfree(carveout_heap);
- return ERR_PTR(-ENOMEM);
- }
- carveout_heap->base = heap_data->base;
- ret = gen_pool_add(carveout_heap->pool, carveout_heap->base,
- heap_data->size, -1);
- if (ret < 0) {
- gen_pool_destroy(carveout_heap->pool);
- kfree(carveout_heap);
- return ERR_PTR(-EINVAL);
- }
- carveout_heap->heap.ops = &carveout_heap_ops;
- carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
- carveout_heap->allocated_bytes = 0;
- carveout_heap->total_size = heap_data->size;
-
- return &carveout_heap->heap;
-}
-
-void ion_carveout_heap_destroy(struct ion_heap *heap)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- gen_pool_destroy(carveout_heap->pool);
- kfree(carveout_heap);
- carveout_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/gpu/ion/ion_chunk_heap.c
deleted file mode 100644
index b2021a1b456..00000000000
--- a/drivers/gpu/ion/ion_chunk_heap.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * drivers/gpu/ion/ion_chunk_heap.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-//#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-struct ion_chunk_heap {
- struct ion_heap heap;
- struct gen_pool *pool;
- ion_phys_addr_t base;
- unsigned long chunk_size;
- unsigned long size;
- unsigned long allocated;
-};
-
-static int ion_chunk_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table;
- struct scatterlist *sg;
- int ret, i;
- unsigned long num_chunks;
-
- if (ion_buffer_fault_user_mappings(buffer))
- return -ENOMEM;
-
- num_chunks = ALIGN(size, chunk_heap->chunk_size) /
- chunk_heap->chunk_size;
- buffer->size = num_chunks * chunk_heap->chunk_size;
-
- if (buffer->size > chunk_heap->size - chunk_heap->allocated)
- return -ENOMEM;
-
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
- ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ret;
- }
-
- sg = table->sgl;
- for (i = 0; i < num_chunks; i++) {
- unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
- chunk_heap->chunk_size);
- if (!paddr)
- goto err;
- sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
- sg = sg_next(sg);
- }
-
- buffer->priv_virt = table;
- chunk_heap->allocated += buffer->size;
- return 0;
-err:
- sg = table->sgl;
- for (i -= 1; i >= 0; i--) {
- gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
- sg_dma_len(sg));
- sg = sg_next(sg);
- }
- sg_free_table(table);
- kfree(table);
- return -ENOMEM;
-}
-
-static void ion_chunk_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table = buffer->priv_virt;
- struct scatterlist *sg;
- int i;
-
- ion_heap_buffer_zero(buffer);
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, sg, 1, DMA_BIDIRECTIONAL);
- gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
- sg_dma_len(sg));
- }
- chunk_heap->allocated -= buffer->size;
- sg_free_table(table);
- kfree(table);
-}
-
-struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-static struct ion_heap_ops chunk_heap_ops = {
- .allocate = ion_chunk_heap_allocate,
- .free = ion_chunk_heap_free,
- .map_dma = ion_chunk_heap_map_dma,
- .unmap_dma = ion_chunk_heap_unmap_dma,
- .map_user = ion_heap_map_user,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
-};
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_chunk_heap *chunk_heap;
- struct scatterlist sg;
-
- chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
- if (!chunk_heap)
- return ERR_PTR(-ENOMEM);
-
- chunk_heap->chunk_size = (unsigned long)heap_data->priv;
- chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
- PAGE_SHIFT, -1);
- if (!chunk_heap->pool) {
- kfree(chunk_heap);
- return ERR_PTR(-ENOMEM);
- }
- chunk_heap->base = heap_data->base;
- chunk_heap->size = heap_data->size;
- chunk_heap->allocated = 0;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, phys_to_page(heap_data->base), heap_data->size, 0);
- dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
- gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
- chunk_heap->heap.ops = &chunk_heap_ops;
- chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
- chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_info("%s: base %pa size %zd align %pa\n", __func__,
- &chunk_heap->base, heap_data->size, &heap_data->align);
-
- return &chunk_heap->heap;
-}
-
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
-
- gen_pool_destroy(chunk_heap->pool);
- kfree(chunk_heap);
- chunk_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
deleted file mode 100644
index 7868610e2b6..00000000000
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * drivers/gpu/ion/ion_cma_heap.c
- *
- * Copyright (C) Linaro 2012
- * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/ion.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-#include <linux/msm_ion.h>
-
-#include <asm/cacheflush.h>
-
-/* for ion_heap_ops structure */
-#include "ion_priv.h"
-
-#define ION_CMA_ALLOCATE_FAILED -1
-
-struct ion_cma_buffer_info {
- void *cpu_addr;
- dma_addr_t handle;
- struct sg_table *table;
- bool is_cached;
-};
-
-static int cma_heap_has_outer_cache;
-/*
- * Create scatter-list for the already allocated DMA buffer.
- * This function could be replace by dma_common_get_sgtable
- * as soon as it will avalaible.
- */
-int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
-{
- struct page *page = phys_to_page(handle);
- int ret;
-
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
-
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- return 0;
-}
-
-/* ION CMA heap operations functions */
-static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
- unsigned long flags)
-{
- struct device *dev = heap->priv;
- struct ion_cma_buffer_info *info;
-
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
- info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
- if (!info) {
- dev_err(dev, "Can't allocate buffer info\n");
- return ION_CMA_ALLOCATE_FAILED;
- }
-
- if (!ION_IS_CACHED(flags))
- info->cpu_addr = dma_alloc_writecombine(dev, len,
- &(info->handle), GFP_KERNEL);
- else
- info->cpu_addr = dma_alloc_nonconsistent(dev, len,
- &(info->handle), GFP_KERNEL);
-
- if (!info->cpu_addr) {
- dev_err(dev, "Fail to allocate buffer\n");
- goto err;
- }
-
- info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!info->table) {
- dev_err(dev, "Fail to allocate sg table\n");
- goto err;
- }
-
- info->is_cached = ION_IS_CACHED(flags);
-
- ion_cma_get_sgtable(dev,
- info->table, info->cpu_addr, info->handle, len);
-
- /* keep this for memory release */
- buffer->priv_virt = info;
- dev_dbg(dev, "Allocate buffer %p\n", buffer);
- return 0;
-
-err:
- kfree(info);
- return ION_CMA_ALLOCATE_FAILED;
-}
-
-static void ion_cma_free(struct ion_buffer *buffer)
-{
- struct device *dev = buffer->heap->priv;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Release buffer %p\n", buffer);
- /* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
- sg_free_table(info->table);
- /* release sg table */
- kfree(info->table);
- kfree(info);
-}
-
-/* return physical address in addr */
-static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct device *dev = heap->priv;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
- &info->handle);
-
- *addr = info->handle;
- *len = buffer->size;
-
- return 0;
-}
-
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return info->table;
-}
-
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct device *dev = buffer->heap->priv;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- if (info->is_cached)
- return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
- info->handle, buffer->size);
- else
- return dma_mmap_writecombine(dev, vma, info->cpu_addr,
- info->handle, buffer->size);
-}
-
-static void *ion_cma_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return info->cpu_addr;
-}
-
-static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct list_head *mem_map)
-{
- if (mem_map) {
- struct mem_map_data *data;
-
- seq_printf(s, "\nMemory Map\n");
- seq_printf(s, "%16.s %14.s %14.s %14.s\n",
- "client", "start address", "end address",
- "size (hex)");
-
- list_for_each_entry(data, mem_map, node) {
- const char *client_name = "(null)";
-
-
- if (data->client_name)
- client_name = data->client_name;
-
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- client_name, &data->addr,
- &data->addr_end,
- data->size, data->size);
- }
- }
- return 0;
-}
-
-static struct ion_heap_ops ion_cma_ops = {
- .allocate = ion_cma_allocate,
- .free = ion_cma_free,
- .map_dma = ion_cma_heap_map_dma,
- .unmap_dma = ion_cma_heap_unmap_dma,
- .phys = ion_cma_phys,
- .map_user = ion_cma_mmap,
- .map_kernel = ion_cma_map_kernel,
- .unmap_kernel = ion_cma_unmap_kernel,
- .print_debug = ion_cma_print_debug,
-};
-
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
-{
- struct ion_heap *heap;
-
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
-
- if (!heap)
- return ERR_PTR(-ENOMEM);
-
- heap->ops = &ion_cma_ops;
- /* set device as private heaps data, later it will be
- * used to make the link with reserved CMA memory */
- heap->priv = data->priv;
- heap->type = ION_HEAP_TYPE_DMA;
- cma_heap_has_outer_cache = data->has_outer_cache;
- return heap;
-}
-
-void ion_cma_heap_destroy(struct ion_heap *heap)
-{
- kfree(heap);
-}
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
deleted file mode 100644
index 35b782acdd6..00000000000
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
- * drivers/gpu/ion/ion_secure_cma_heap.c
- *
- * Copyright (C) Linaro 2012
- * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/ion.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-#include <linux/msm_ion.h>
-
-#include <asm/cacheflush.h>
-
-/* for ion_heap_ops structure */
-#include "ion_priv.h"
-#include "msm/ion_cp_common.h"
-
-#define ION_CMA_ALLOCATE_FAILED NULL
-
-struct ion_secure_cma_buffer_info {
- dma_addr_t phys;
- struct sg_table *table;
- bool is_cached;
-};
-
-struct ion_cma_alloc_chunk {
- void *cpu_addr;
- struct list_head entry;
- dma_addr_t handle;
- unsigned long chunk_size;
- atomic_t cnt;
-};
-
-struct ion_cma_secure_heap {
- struct device *dev;
- /*
- * Protects against races between threads allocating memory/adding to
- * pool at the same time. (e.g. thread 1 adds to pool, thread 2
- * allocates thread 1's memory before thread 1 knows it needs to
- * allocate more.
- * Admittedly this is fairly coarse grained right now but the chance for
- * contention on this lock is unlikely right now. This can be changed if
- * this ever changes in the future
- */
- struct mutex alloc_lock;
- /*
- * protects the list of memory chunks in this pool
- */
- struct mutex chunk_lock;
- struct ion_heap heap;
- /*
- * Bitmap for allocation. This contains the aggregate of all chunks. */
- unsigned long *bitmap;
- /*
- * List of all allocated chunks
- *
- * This is where things get 'clever'. Individual allocations from
- * dma_alloc_coherent must be allocated and freed in one chunk.
- * We don't just want to limit the allocations to those confined
- * within a single chunk (if clients allocate n small chunks we would
- * never be able to use the combined size). The bitmap allocator is
- * used to find the contiguous region and the parts of the chunks are
- * marked off as used. The chunks won't be freed in the shrinker until
- * the usage is actually zero.
- */
- struct list_head chunks;
- int npages;
- ion_phys_addr_t base;
- struct work_struct work;
- unsigned long last_alloc;
- struct shrinker shrinker;
- atomic_t total_allocated;
- atomic_t total_pool_size;
- unsigned long heap_size;
- unsigned long default_prefetch_size;
-};
-
-static void ion_secure_pool_pages(struct work_struct *work);
-
-/*
- * Create scatter-list for the already allocated DMA buffer.
- * This function could be replace by dma_common_get_sgtable
- * as soon as it will avalaible.
- */
-int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- dma_addr_t handle, size_t size)
-{
- struct page *page = phys_to_page(handle);
- int ret;
-
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
-
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- sg_dma_address(sgt->sgl) = handle;
- return 0;
-}
-
-static int ion_secure_cma_add_to_pool(
- struct ion_cma_secure_heap *sheap,
- unsigned long len)
-{
- void *cpu_addr;
- dma_addr_t handle;
- DEFINE_DMA_ATTRS(attrs);
- int ret = 0;
- struct ion_cma_alloc_chunk *chunk;
-
- mutex_lock(&sheap->chunk_lock);
-
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
- if (!chunk) {
- ret = -ENOMEM;
- goto out;
- }
-
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-
- cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL,
- &attrs);
-
- if (!cpu_addr) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- chunk->cpu_addr = cpu_addr;
- chunk->handle = handle;
- chunk->chunk_size = len;
- atomic_set(&chunk->cnt, 0);
- list_add(&chunk->entry, &sheap->chunks);
- atomic_add(len, &sheap->total_pool_size);
- /* clear the bitmap to indicate this region can be allocated from */
- bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
- len >> PAGE_SHIFT);
- goto out;
-
-out_free:
- kfree(chunk);
-out:
- mutex_unlock(&sheap->chunk_lock);
- return ret;
-}
-
-static void ion_secure_pool_pages(struct work_struct *work)
-{
- struct ion_cma_secure_heap *sheap = container_of(work,
- struct ion_cma_secure_heap, work);
-
- ion_secure_cma_add_to_pool(sheap, sheap->last_alloc);
-}
-/*
- * @s1: start of the first region
- * @l1: length of the first region
- * @s2: start of the second region
- * @l2: length of the second region
- *
- * Returns the total number of bytes that intersect.
- *
- * s1 is the region we are trying to clear so s2 may be subsumed by s1 but the
- * maximum size to clear should only ever be l1
- *
- */
-static unsigned int intersect(unsigned long s1, unsigned long l1,
- unsigned long s2, unsigned long l2)
-{
- unsigned long base1 = s1;
- unsigned long end1 = s1 + l1;
- unsigned long base2 = s2;
- unsigned long end2 = s2 + l2;
-
- /* Case 0: The regions don't overlap at all */
- if (!(base1 < end2 && base2 < end1))
- return 0;
-
- /* Case 1: region 2 is subsumed by region 1 */
- if (base1 <= base2 && end2 <= end1)
- return l2;
-
- /* case 2: region 1 is subsumed by region 2 */
- if (base2 <= base1 && end1 <= end2)
- return l1;
-
- /* case 3: region1 overlaps region2 on the bottom */
- if (base2 < end1 && base2 > base1)
- return end1 - base2;
-
- /* case 4: region 2 overlaps region1 on the bottom */
- if (base1 < end2 && base1 > base2)
- return end2 - base1;
-
- pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n",
- s1, l1, s2, l2);
- BUG();
-}
-
-int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
-{
- unsigned long len = (unsigned long)data;
- struct ion_cma_secure_heap *sheap =
- container_of(heap, struct ion_cma_secure_heap, heap);
- unsigned long diff;
-
- if ((int) heap->type != ION_HEAP_TYPE_SECURE_DMA)
- return -EINVAL;
-
- if (len == 0)
- len = sheap->default_prefetch_size;
-
- /*
- * Only prefetch as much space as there is left in the pool so
- * check against the current free size of the heap.
- * This is slightly racy if someone else is allocating at the same
- * time. CMA has a restricted size for the heap so worst case
- * the prefetch doesn't work because the allocation fails.
- */
- diff = sheap->heap_size - atomic_read(&sheap->total_pool_size);
-
- if (len > diff)
- len = diff;
-
- sheap->last_alloc = len;
- schedule_work(&sheap->work);
-
- return 0;
-}
-
-static void bad_math_dump(unsigned long len, int total_overlap,
- struct ion_cma_secure_heap *sheap,
- bool alloc, dma_addr_t paddr)
-{
- struct list_head *entry;
-
- pr_err("Bad math! expected total was %lx actual was %x\n",
- len, total_overlap);
- pr_err("attempted %s address was %pa len %lx\n",
- alloc ? "allocation" : "free", &paddr, len);
- pr_err("chunks:\n");
- list_for_each(entry, &sheap->chunks) {
- struct ion_cma_alloc_chunk *chunk =
- container_of(entry,
- struct ion_cma_alloc_chunk, entry);
- pr_info("--- pa %pa len %lx\n",
- &chunk->handle, chunk->chunk_size);
- }
- BUG();
-
-}
-
-static int ion_secure_cma_alloc_from_pool(
- struct ion_cma_secure_heap *sheap,
- dma_addr_t *phys,
- unsigned long len)
-{
- dma_addr_t paddr;
- unsigned long page_no;
- int ret = 0;
- int total_overlap = 0;
- struct list_head *entry;
-
- mutex_lock(&sheap->chunk_lock);
-
- page_no = bitmap_find_next_zero_area(sheap->bitmap,
- sheap->npages, 0, len >> PAGE_SHIFT, 0);
- if (page_no >= sheap->npages) {
- ret = -ENOMEM;
- goto out;
- }
- bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT);
- paddr = sheap->base + (page_no << PAGE_SHIFT);
-
-
- list_for_each(entry, &sheap->chunks) {
- struct ion_cma_alloc_chunk *chunk = container_of(entry,
- struct ion_cma_alloc_chunk, entry);
- int overlap = intersect(chunk->handle,
- chunk->chunk_size, paddr, len);
-
- atomic_add(overlap, &chunk->cnt);
- total_overlap += overlap;
- }
-
- if (total_overlap != len)
- bad_math_dump(len, total_overlap, sheap, 1, paddr);
-
- *phys = paddr;
-out:
- mutex_unlock(&sheap->chunk_lock);
- return ret;
-}
-
-static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
- struct ion_cma_alloc_chunk *chunk)
-{
- /* This region is 'allocated' and not available to allocate from */
- bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT,
- chunk->chunk_size >> PAGE_SHIFT);
- dma_free_coherent(sheap->dev, chunk->chunk_size, chunk->cpu_addr,
- chunk->handle);
- atomic_sub(chunk->chunk_size, &sheap->total_pool_size);
- list_del(&chunk->entry);
- kfree(chunk);
-
-}
-
-int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
-{
- struct ion_cma_secure_heap *sheap =
- container_of(heap, struct ion_cma_secure_heap, heap);
- struct list_head *entry, *_n;
-
- mutex_lock(&sheap->chunk_lock);
- list_for_each_safe(entry, _n, &sheap->chunks) {
- struct ion_cma_alloc_chunk *chunk = container_of(entry,
- struct ion_cma_alloc_chunk, entry);
-
- if (atomic_read(&chunk->cnt) == 0)
- ion_secure_cma_free_chunk(sheap, chunk);
- }
- mutex_unlock(&sheap->chunk_lock);
-
- return 0;
-}
-
-static int ion_secure_cma_shrinker(struct shrinker *shrinker,
- struct shrink_control *sc)
-{
- struct ion_cma_secure_heap *sheap = container_of(shrinker,
- struct ion_cma_secure_heap, shrinker);
- int nr_to_scan = sc->nr_to_scan;
- struct list_head *entry, *_n;
-
- if (nr_to_scan == 0)
- return atomic_read(&sheap->total_pool_size);
-
- /*
- * CMA pages can only be used for movable allocation so don't free if
- * the allocation isn't movable
- */
- if (!(sc->gfp_mask & __GFP_MOVABLE))
- return atomic_read(&sheap->total_pool_size);
-
- /*
- * Allocation path may invoke the shrinker. Proceeding any further
- * would cause a deadlock in several places so don't shrink if that
- * happens.
- */
- if (!mutex_trylock(&sheap->chunk_lock))
- return -1;
-
- list_for_each_safe(entry, _n, &sheap->chunks) {
- struct ion_cma_alloc_chunk *chunk = container_of(entry,
- struct ion_cma_alloc_chunk, entry);
-
- if (nr_to_scan < 0)
- break;
-
- if (atomic_read(&chunk->cnt) == 0) {
- nr_to_scan -= chunk->chunk_size;
- ion_secure_cma_free_chunk(sheap, chunk);
- }
- }
- mutex_unlock(&sheap->chunk_lock);
-
- return atomic_read(&sheap->total_pool_size);
-}
-
-static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap,
- dma_addr_t handle,
- unsigned long len)
-{
- struct list_head *entry, *_n;
- int total_overlap = 0;
-
- mutex_lock(&sheap->chunk_lock);
- bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
- len >> PAGE_SHIFT);
-
- list_for_each_safe(entry, _n, &sheap->chunks) {
- struct ion_cma_alloc_chunk *chunk = container_of(entry,
- struct ion_cma_alloc_chunk, entry);
- int overlap = intersect(chunk->handle,
- chunk->chunk_size, handle, len);
-
- /*
- * Don't actually free this from the pool list yet, let either
- * an explicit drain call or the shrinkers take care of the
- * pool.
- */
- atomic_sub_return(overlap, &chunk->cnt);
- BUG_ON(atomic_read(&chunk->cnt) < 0);
-
- total_overlap += overlap;
- }
-
- BUG_ON(atomic_read(&sheap->total_pool_size) < 0);
-
- if (total_overlap != len)
- bad_math_dump(len, total_overlap, sheap, 0, handle);
-
- mutex_unlock(&sheap->chunk_lock);
-}
-
-/* ION CMA heap operations functions */
-static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
- struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
- unsigned long flags)
-{
- struct ion_cma_secure_heap *sheap =
- container_of(heap, struct ion_cma_secure_heap, heap);
- struct ion_secure_cma_buffer_info *info;
- int ret;
-
- dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
-
- info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
- if (!info) {
- dev_err(sheap->dev, "Can't allocate buffer info\n");
- return ION_CMA_ALLOCATE_FAILED;
- }
-
- mutex_lock(&sheap->alloc_lock);
- ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
-
- if (ret) {
- ret = ion_secure_cma_add_to_pool(sheap, len);
- if (ret) {
- dev_err(sheap->dev, "Fail to allocate buffer\n");
- goto err;
- }
- ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
- if (ret) {
- /*
- * We just added memory to the pool, we shouldn't be
- * failing to get memory
- */
- BUG();
- }
- }
- mutex_unlock(&sheap->alloc_lock);
-
- atomic_add(len, &sheap->total_allocated);
- info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!info->table) {
- dev_err(sheap->dev, "Fail to allocate sg table\n");
- goto err;
- }
-
- ion_secure_cma_get_sgtable(sheap->dev,
- info->table, info->phys, len);
-
- /* keep this for memory release */
- buffer->priv_virt = info;
- dev_dbg(sheap->dev, "Allocate buffer %p\n", buffer);
- return info;
-
-err:
- kfree(info);
- return ION_CMA_ALLOCATE_FAILED;
-}
-
-static int ion_secure_cma_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
- unsigned long flags)
-{
- unsigned long secure_allocation = flags & ION_FLAG_SECURE;
- struct ion_secure_cma_buffer_info *buf = NULL;
-
- if (!secure_allocation) {
- pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
- __func__, heap->name, flags);
- return -ENOMEM;
- }
-
- if (ION_IS_CACHED(flags)) {
- pr_err("%s: cannot allocate cached memory from secure heap %s\n",
- __func__, heap->name);
- return -ENOMEM;
- }
-
-
- buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
-
- if (buf) {
- int ret;
-
- if (!msm_secure_v2_is_supported()) {
- pr_debug("%s: securing buffers is not supported on this platform\n",
- __func__);
- ret = 1;
- } else {
- ret = msm_ion_secure_table(buf->table, 0, 0);
- }
- if (ret) {
- /*
- * Don't treat the secure buffer failing here as an
- * error for backwards compatibility reasons. If
- * the secure fails, the map will also fail so there
- * is no security risk.
- */
- pr_debug("%s: failed to secure buffer\n", __func__);
- }
- return 0;
- } else {
- return -ENOMEM;
- }
-}
-
-
-static void ion_secure_cma_free(struct ion_buffer *buffer)
-{
- struct ion_cma_secure_heap *sheap =
- container_of(buffer->heap, struct ion_cma_secure_heap, heap);
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(sheap->dev, "Release buffer %p\n", buffer);
- if (msm_secure_v2_is_supported())
- msm_ion_unsecure_table(info->table);
- atomic_sub(buffer->size, &sheap->total_allocated);
- BUG_ON(atomic_read(&sheap->total_allocated) < 0);
- /* release memory */
- ion_secure_cma_free_from_pool(sheap, info->phys, buffer->size);
- /* release sg table */
- sg_free_table(info->table);
- kfree(info->table);
- kfree(info);
-}
-
-static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_cma_secure_heap *sheap =
- container_of(heap, struct ion_cma_secure_heap, heap);
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(sheap->dev, "Return buffer %p physical address 0x%pa\n", buffer,
- &info->phys);
-
- *addr = info->phys;
- *len = buffer->size;
-
- return 0;
-}
-
-struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
-
- return info->table;
-}
-
-void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-static int ion_secure_cma_mmap(struct ion_heap *mapper,
- struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- pr_info("%s: mmaping from secure heap %s disallowed\n",
- __func__, mapper->name);
- return -EINVAL;
-}
-
-static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- pr_info("%s: kernel mapping from secure heap %s disallowed\n",
- __func__, heap->name);
- return NULL;
-}
-
-static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct list_head *mem_map)
-{
- struct ion_cma_secure_heap *sheap =
- container_of(heap, struct ion_cma_secure_heap, heap);
-
- if (mem_map) {
- struct mem_map_data *data;
-
- seq_printf(s, "\nMemory Map\n");
- seq_printf(s, "%16.s %14.s %14.s %14.s\n",
- "client", "start address", "end address",
- "size (hex)");
-
- list_for_each_entry(data, mem_map, node) {
- const char *client_name = "(null)";
-
-
- if (data->client_name)
- client_name = data->client_name;
-
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- client_name, &data->addr,
- &data->addr_end,
- data->size, data->size);
- }
- }
- seq_printf(s, "Total allocated: %x\n",
- atomic_read(&sheap->total_allocated));
- seq_printf(s, "Total pool size: %x\n",
- atomic_read(&sheap->total_pool_size));
- return 0;
-}
-
-static struct ion_heap_ops ion_secure_cma_ops = {
- .allocate = ion_secure_cma_allocate,
- .free = ion_secure_cma_free,
- .map_dma = ion_secure_cma_heap_map_dma,
- .unmap_dma = ion_secure_cma_heap_unmap_dma,
- .phys = ion_secure_cma_phys,
- .map_user = ion_secure_cma_mmap,
- .map_kernel = ion_secure_cma_map_kernel,
- .unmap_kernel = ion_secure_cma_unmap_kernel,
- .print_debug = ion_secure_cma_print_debug,
-};
-
-struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
-{
- struct ion_cma_secure_heap *sheap;
- int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long);
-
- sheap = kzalloc(sizeof(*sheap), GFP_KERNEL);
- if (!sheap)
- return ERR_PTR(-ENOMEM);
-
- sheap->dev = data->priv;
- mutex_init(&sheap->chunk_lock);
- mutex_init(&sheap->alloc_lock);
- sheap->heap.ops = &ion_secure_cma_ops;
- sheap->heap.type = ION_HEAP_TYPE_SECURE_DMA;
- sheap->npages = data->size >> PAGE_SHIFT;
- sheap->base = data->base;
- sheap->heap_size = data->size;
- sheap->bitmap = kmalloc(map_size, GFP_KERNEL);
- INIT_LIST_HEAD(&sheap->chunks);
- INIT_WORK(&sheap->work, ion_secure_pool_pages);
- sheap->shrinker.seeks = DEFAULT_SEEKS;
- sheap->shrinker.batch = 0;
- sheap->shrinker.shrink = ion_secure_cma_shrinker;
- sheap->default_prefetch_size = sheap->heap_size;
- register_shrinker(&sheap->shrinker);
-
- if (!sheap->bitmap) {
- kfree(sheap);
- return ERR_PTR(-ENOMEM);
- }
-
- if (data->extra_data) {
- struct ion_cma_pdata *extra = data->extra_data;
- sheap->default_prefetch_size = extra->default_prefetch_size;
- }
-
- /*
- * we initially mark everything in the allocator as being free so that
- * allocations can come in later
- */
- bitmap_fill(sheap->bitmap, sheap->npages);
-
- return &sheap->heap;
-}
-
-void ion_secure_cma_heap_destroy(struct ion_heap *heap)
-{
- struct ion_cma_secure_heap *sheap =
- container_of(heap, struct ion_cma_secure_heap, heap);
-
- kfree(sheap);
-}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
deleted file mode 100644
index 5dd15bf03c2..00000000000
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ /dev/null
@@ -1,887 +0,0 @@
-/*
- * drivers/gpu/ion/ion_cp_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/msm_ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/seq_file.h>
-#include <linux/iommu.h>
-#include <linux/dma-mapping.h>
-#include <trace/events/kmem.h>
-
-#include <mach/scm.h>
-
-#include "ion_priv.h"
-
-#include <asm/cacheflush.h>
-
-#include "msm/ion_cp_common.h"
-/**
- * struct ion_cp_heap - container for the heap and shared heap data
-
- * @heap: the heap information structure
- * @pool: memory pool to allocate from.
- * @base: the base address of the memory pool.
- * @permission_type: Identifier for the memory used by SCM for protecting
- * and unprotecting memory.
- * @secure_base: Base address used when securing a heap that is shared.
- * @secure_size: Size used when securing a heap that is shared.
- * @lock: mutex to protect shared access.
- * @heap_protected: Indicates whether heap has been protected or not.
- * @allocated_bytes: the total number of allocated bytes from the pool.
- * @total_size: the total size of the memory pool.
- * @request_region: function pointer to call when first mapping of memory
- * occurs.
- * @release_region: function pointer to call when last mapping of memory
- * unmapped.
- * @bus_id: token used with request/release region.
- * @kmap_cached_count: the total number of times this heap has been mapped in
- * kernel space (cached).
- * @kmap_uncached_count:the total number of times this heap has been mapped in
- * kernel space (un-cached).
- * @umap_count: the total number of times this heap has been mapped in
- * user space.
- * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
-*/
-struct ion_cp_heap {
- struct ion_heap heap;
- struct gen_pool *pool;
- ion_phys_addr_t base;
- unsigned int permission_type;
- ion_phys_addr_t secure_base;
- size_t secure_size;
- struct mutex lock;
- unsigned int heap_protected;
- unsigned long allocated_bytes;
- unsigned long total_size;
- int (*heap_request_region)(void *);
- int (*heap_release_region)(void *);
- void *bus_id;
- unsigned long kmap_cached_count;
- unsigned long kmap_uncached_count;
- unsigned long umap_count;
- unsigned int has_outer_cache;
- atomic_t protect_cnt;
- void *cpu_addr;
- size_t heap_size;
- dma_addr_t handle;
- int cma;
- int allow_non_secure_allocation;
-};
-
-enum {
- HEAP_NOT_PROTECTED = 0,
- HEAP_PROTECTED = 1,
-};
-
-/* SCM related code for locking down memory for content protection */
-
-#define SCM_CP_LOCK_CMD_ID 0x1
-#define SCM_CP_PROTECT 0x1
-#define SCM_CP_UNPROTECT 0x0
-
-struct cp_lock_msg {
- unsigned int start;
- unsigned int end;
- unsigned int permission_type;
- unsigned char lock;
-} __attribute__ ((__packed__));
-
-#define DMA_ALLOC_TRIES 5
-
-static int allocate_heap_memory(struct ion_heap *heap)
-{
- struct device *dev = heap->priv;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- int ret;
- int tries = 0;
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-
-
- if (cp_heap->cpu_addr)
- return 0;
-
- while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
- cp_heap->cpu_addr = dma_alloc_attrs(dev,
- cp_heap->heap_size,
- &(cp_heap->handle),
- 0,
- &attrs);
- if (!cp_heap->cpu_addr) {
- trace_ion_cp_alloc_retry(tries);
- msleep(20);
- }
- }
-
- if (!cp_heap->cpu_addr)
- goto out;
-
- cp_heap->base = cp_heap->handle;
-
- cp_heap->pool = gen_pool_create(12, -1);
- if (!cp_heap->pool)
- goto out_free;
-
- ret = gen_pool_add(cp_heap->pool, cp_heap->base,
- cp_heap->heap_size, -1);
- if (ret < 0)
- goto out_pool;
-
- return 0;
-
-out_pool:
- gen_pool_destroy(cp_heap->pool);
-out_free:
- dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
- cp_heap->handle);
-out:
- return ION_CP_ALLOCATE_FAIL;
-}
-
-static void free_heap_memory(struct ion_heap *heap)
-{
- struct device *dev = heap->priv;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
-
- /* release memory */
- dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
- cp_heap->handle);
- gen_pool_destroy(cp_heap->pool);
- cp_heap->pool = NULL;
- cp_heap->cpu_addr = 0;
-}
-
-
-
-/**
- * Get the total number of kernel mappings.
- * Must be called with heap->lock locked.
- */
-static unsigned long ion_cp_get_total_kmap_count(
- const struct ion_cp_heap *cp_heap)
-{
- return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
-}
-
-static int ion_on_first_alloc(struct ion_heap *heap)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- int ret_value;
-
- if (cp_heap->cma) {
- ret_value = allocate_heap_memory(heap);
- if (ret_value)
- return 1;
- }
- return 0;
-}
-
-static void ion_on_last_free(struct ion_heap *heap)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
-
- if (cp_heap->cma)
- free_heap_memory(heap);
-}
-
-/**
- * Protects memory if heap is unsecured heap.
- * Must be called with heap->lock locked.
- */
-static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- int ret_value = 0;
-
- if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
- /* Make sure we are in C state when the heap is protected. */
- if (!cp_heap->allocated_bytes)
- if (ion_on_first_alloc(heap))
- goto out;
-
- ret_value = ion_cp_protect_mem(cp_heap->secure_base,
- cp_heap->secure_size, cp_heap->permission_type,
- version, data);
- if (ret_value) {
- pr_err("Failed to protect memory for heap %s - "
- "error code: %d\n", heap->name, ret_value);
-
- if (!cp_heap->allocated_bytes)
- ion_on_last_free(heap);
-
- atomic_dec(&cp_heap->protect_cnt);
- } else {
- cp_heap->heap_protected = HEAP_PROTECTED;
- pr_debug("Protected heap %s @ 0x%pa\n",
- heap->name, &cp_heap->base);
- }
- }
-out:
- pr_debug("%s: protect count is %d\n", __func__,
- atomic_read(&cp_heap->protect_cnt));
- BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
- return ret_value;
-}
-
-/**
- * Unprotects memory if heap is secure heap.
- * Must be called with heap->lock locked.
- */
-static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
-
- if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
- int error_code = ion_cp_unprotect_mem(
- cp_heap->secure_base, cp_heap->secure_size,
- cp_heap->permission_type, version, data);
- if (error_code) {
- pr_err("Failed to un-protect memory for heap %s - "
- "error code: %d\n", heap->name, error_code);
- } else {
- cp_heap->heap_protected = HEAP_NOT_PROTECTED;
- pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
- (unsigned int) cp_heap->base);
-
- if (!cp_heap->allocated_bytes)
- ion_on_last_free(heap);
- }
- }
- pr_debug("%s: protect count is %d\n", __func__,
- atomic_read(&cp_heap->protect_cnt));
- BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
-}
-
-ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align,
- unsigned long flags)
-{
- unsigned long offset;
- unsigned long secure_allocation = flags & ION_FLAG_SECURE;
- unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS;
-
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
-
- mutex_lock(&cp_heap->lock);
- if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
- mutex_unlock(&cp_heap->lock);
- pr_err("ION cannot allocate un-secure memory from protected"
- " heap %s\n", heap->name);
- return ION_CP_ALLOCATE_FAIL;
- }
-
- if (!force_contig && !secure_allocation &&
- !cp_heap->allow_non_secure_allocation) {
- mutex_unlock(&cp_heap->lock);
- pr_debug("%s: non-secure allocation disallowed from this heap\n",
- __func__);
- return ION_CP_ALLOCATE_FAIL;
- }
-
- /*
- * The check above already checked for non-secure allocations when the
- * heap is protected. HEAP_PROTECTED implies that this must be a secure
- * allocation. If the heap is protected and there are userspace or
- * cached kernel mappings, something has gone wrong in the security
- * model.
- */
- if (cp_heap->heap_protected == HEAP_PROTECTED) {
- BUG_ON(cp_heap->umap_count != 0);
- BUG_ON(cp_heap->kmap_cached_count != 0);
- }
-
- /*
- * if this is the first reusable allocation, transition
- * the heap
- */
- if (!cp_heap->allocated_bytes)
- if (ion_on_first_alloc(heap)) {
- mutex_unlock(&cp_heap->lock);
- return ION_RESERVED_ALLOCATE_FAIL;
- }
-
- cp_heap->allocated_bytes += size;
- mutex_unlock(&cp_heap->lock);
-
- offset = gen_pool_alloc_aligned(cp_heap->pool,
- size, ilog2(align));
-
- if (!offset) {
- mutex_lock(&cp_heap->lock);
- cp_heap->allocated_bytes -= size;
- if ((cp_heap->total_size -
- cp_heap->allocated_bytes) >= size)
- pr_debug("%s: heap %s has enough memory (%lx) but"
- " the allocation of size %lx still failed."
- " Memory is probably fragmented.\n",
- __func__, heap->name,
- cp_heap->total_size -
- cp_heap->allocated_bytes, size);
- if (!cp_heap->allocated_bytes &&
- cp_heap->heap_protected == HEAP_NOT_PROTECTED)
- ion_on_last_free(heap);
- mutex_unlock(&cp_heap->lock);
-
- return ION_CP_ALLOCATE_FAIL;
- }
-
- return offset;
-}
-
-void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
-
- if (addr == ION_CP_ALLOCATE_FAIL)
- return;
- gen_pool_free(cp_heap->pool, addr, size);
-
- mutex_lock(&cp_heap->lock);
- cp_heap->allocated_bytes -= size;
-
- if (!cp_heap->allocated_bytes &&
- cp_heap->heap_protected == HEAP_NOT_PROTECTED)
- ion_on_last_free(heap);
-
- mutex_unlock(&cp_heap->lock);
-}
-
-static int ion_cp_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_cp_buffer *buf = buffer->priv_virt;
-
- *addr = buf->buffer;
- *len = buffer->size;
- return 0;
-}
-
-static int ion_cp_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- struct ion_cp_buffer *buf;
- phys_addr_t addr;
-
- /*
- * we never want Ion to fault pages in for us with this
- * heap. We want to set up the mappings ourselves in .map_user
- */
- flags |= ION_FLAG_CACHED_NEEDS_SYNC;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ION_CP_ALLOCATE_FAIL;
-
- addr = ion_cp_allocate(heap, size, align, flags);
- if (addr == ION_CP_ALLOCATE_FAIL)
- return -ENOMEM;
-
- buf->buffer = addr;
- buf->want_delayed_unsecure = 0;
- atomic_set(&buf->secure_cnt, 0);
- mutex_init(&buf->lock);
- buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0;
- buffer->priv_virt = buf;
-
- return 0;
-}
-
-static void ion_cp_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
- struct ion_cp_buffer *buf = buffer->priv_virt;
-
- ion_cp_free(heap, buf->buffer, buffer->size);
- WARN_ON(atomic_read(&buf->secure_cnt));
- WARN_ON(atomic_read(&buf->map_cnt));
- kfree(buf);
-
- buffer->priv_virt = NULL;
-}
-
-struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
-{
- size_t chunk_size = buffer->size;
- struct ion_cp_buffer *buf = buffer->priv_virt;
-
- if (ION_IS_CACHED(buffer->flags))
- chunk_size = PAGE_SIZE;
- else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
- chunk_size = SZ_1M;
-
- return ion_create_chunked_sg_table(buf->buffer, chunk_size,
- buffer->size);
-}
-
-struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return ion_cp_heap_create_sg_table(buffer);
-}
-
-void ion_cp_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- if (buffer->sg_table)
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
- buffer->sg_table = 0;
-}
-
-/**
- * Call request region for SMI memory of this is the first mapping.
- */
-static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
-{
- int ret_value = 0;
- if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
- if (cp_heap->heap_request_region)
- ret_value = cp_heap->heap_request_region(
- cp_heap->bus_id);
- return ret_value;
-}
-
-/**
- * Call release region for SMI memory of this is the last un-mapping.
- */
-static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
-{
- int ret_value = 0;
- if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
- if (cp_heap->heap_release_region)
- ret_value = cp_heap->heap_release_region(
- cp_heap->bus_id);
- return ret_value;
-}
-
-void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- void *ret_value = NULL;
- struct ion_cp_buffer *buf = buffer->priv_virt;
-
- mutex_lock(&cp_heap->lock);
- if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
- ((cp_heap->heap_protected == HEAP_PROTECTED) &&
- !ION_IS_CACHED(buffer->flags))) {
-
- if (ion_cp_request_region(cp_heap)) {
- mutex_unlock(&cp_heap->lock);
- return NULL;
- }
-
- if (cp_heap->cma) {
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct page **pages = vmalloc(
- sizeof(struct page *) * npages);
- int i;
- pgprot_t pgprot;
-
- if (!pages) {
- mutex_unlock(&cp_heap->lock);
- return ERR_PTR(-ENOMEM);
- }
-
- if (ION_IS_CACHED(buffer->flags))
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- for (i = 0; i < npages; i++) {
- pages[i] = phys_to_page(buf->buffer +
- i * PAGE_SIZE);
- }
- ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
- vfree(pages);
- } else {
- if (ION_IS_CACHED(buffer->flags))
- ret_value = ioremap_cached(buf->buffer,
- buffer->size);
- else
- ret_value = ioremap(buf->buffer,
- buffer->size);
- }
-
- if (!ret_value) {
- ion_cp_release_region(cp_heap);
- } else {
- if (ION_IS_CACHED(buffer->flags))
- ++cp_heap->kmap_cached_count;
- else
- ++cp_heap->kmap_uncached_count;
- atomic_inc(&buf->map_cnt);
- }
- }
- mutex_unlock(&cp_heap->lock);
- return ret_value;
-}
-
-void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- struct ion_cp_buffer *buf = buffer->priv_virt;
-
- if (cp_heap->cma)
- vunmap(buffer->vaddr);
- else
- iounmap(buffer->vaddr);
-
- buffer->vaddr = NULL;
-
- mutex_lock(&cp_heap->lock);
- if (ION_IS_CACHED(buffer->flags))
- --cp_heap->kmap_cached_count;
- else
- --cp_heap->kmap_uncached_count;
-
- atomic_dec(&buf->map_cnt);
- ion_cp_release_region(cp_heap);
- mutex_unlock(&cp_heap->lock);
-
- return;
-}
-
-int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- int ret_value = -EAGAIN;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- struct ion_cp_buffer *buf = buffer->priv_virt;
-
- mutex_lock(&cp_heap->lock);
- if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
- if (ion_cp_request_region(cp_heap)) {
- mutex_unlock(&cp_heap->lock);
- return -EINVAL;
- }
-
- if (!ION_IS_CACHED(buffer->flags))
- vma->vm_page_prot = pgprot_writecombine(
- vma->vm_page_prot);
-
- ret_value = remap_pfn_range(vma, vma->vm_start,
- __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-
- if (ret_value) {
- ion_cp_release_region(cp_heap);
- } else {
- atomic_inc(&buf->map_cnt);
- ++cp_heap->umap_count;
- }
-
- }
- mutex_unlock(&cp_heap->lock);
- return ret_value;
-}
-
-void ion_cp_heap_unmap_user(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- struct ion_cp_buffer *buf = buffer->priv_virt;
-
- mutex_lock(&cp_heap->lock);
- --cp_heap->umap_count;
- atomic_dec(&buf->map_cnt);
- ion_cp_release_region(cp_heap);
- mutex_unlock(&cp_heap->lock);
-}
-
-static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct list_head *mem_map)
-{
- unsigned long total_alloc;
- unsigned long total_size;
- unsigned long umap_count;
- unsigned long kmap_count;
- unsigned long heap_protected;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
-
- mutex_lock(&cp_heap->lock);
- total_alloc = cp_heap->allocated_bytes;
- total_size = cp_heap->total_size;
- umap_count = cp_heap->umap_count;
- kmap_count = ion_cp_get_total_kmap_count(cp_heap);
- heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
- mutex_unlock(&cp_heap->lock);
-
- seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
- seq_printf(s, "total heap size: %lx\n", total_size);
- seq_printf(s, "umapping count: %lx\n", umap_count);
- seq_printf(s, "kmapping count: %lx\n", kmap_count);
- seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
-
- if (mem_map) {
- unsigned long base = cp_heap->base;
- unsigned long size = cp_heap->total_size;
- unsigned long end = base+size;
- unsigned long last_end = base;
- struct mem_map_data *data;
-
- seq_printf(s, "\nMemory Map\n");
- seq_printf(s, "%16.s %14.s %14.s %14.s\n",
- "client", "start address", "end address",
- "size (hex)");
-
- list_for_each_entry(data, mem_map, node) {
- const char *client_name = "(null)";
-
- if (last_end < data->addr) {
- phys_addr_t da;
-
- da = data->addr-1;
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- "FREE", &last_end, &da,
- (unsigned long)data->addr-last_end,
- (unsigned long)data->addr-last_end);
- }
-
- if (data->client_name)
- client_name = data->client_name;
-
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- client_name, &data->addr,
- &data->addr_end,
- data->size, data->size);
- last_end = data->addr_end+1;
- }
- if (last_end < end) {
- seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
- last_end, end-1, end-last_end, end-last_end);
- }
- }
-
- return 0;
-}
-
-int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
-{
- int ret_value;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- mutex_lock(&cp_heap->lock);
- if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
- ret_value = ion_cp_protect(heap, version, data);
- } else {
- pr_err("ION cannot secure heap with outstanding mappings: "
- "User space: %lu, kernel space (cached): %lu\n",
- cp_heap->umap_count, cp_heap->kmap_cached_count);
- ret_value = -EINVAL;
- }
-
- mutex_unlock(&cp_heap->lock);
- return ret_value;
-}
-
-int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
-{
- int ret_value = 0;
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- mutex_lock(&cp_heap->lock);
- ion_cp_unprotect(heap, version, data);
- mutex_unlock(&cp_heap->lock);
- return ret_value;
-}
-
-static struct ion_heap_ops cp_heap_ops = {
- .allocate = ion_cp_heap_allocate,
- .free = ion_cp_heap_free,
- .phys = ion_cp_heap_phys,
- .map_user = ion_cp_heap_map_user,
- .unmap_user = ion_cp_heap_unmap_user,
- .map_kernel = ion_cp_heap_map_kernel,
- .unmap_kernel = ion_cp_heap_unmap_kernel,
- .map_dma = ion_cp_heap_map_dma,
- .unmap_dma = ion_cp_heap_unmap_dma,
- .print_debug = ion_cp_print_debug,
- .secure_heap = ion_cp_secure_heap,
- .unsecure_heap = ion_cp_unsecure_heap,
-};
-
-struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_cp_heap *cp_heap;
- int ret;
-
- cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
- if (!cp_heap)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&cp_heap->lock);
-
-
- cp_heap->allocated_bytes = 0;
- cp_heap->umap_count = 0;
- cp_heap->kmap_cached_count = 0;
- cp_heap->kmap_uncached_count = 0;
- cp_heap->total_size = heap_data->size;
- cp_heap->heap.ops = &cp_heap_ops;
- cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
- cp_heap->heap_protected = HEAP_NOT_PROTECTED;
- cp_heap->secure_base = heap_data->base;
- cp_heap->secure_size = heap_data->size;
- cp_heap->has_outer_cache = heap_data->has_outer_cache;
- cp_heap->heap_size = heap_data->size;
-
- atomic_set(&cp_heap->protect_cnt, 0);
- if (heap_data->extra_data) {
- struct ion_cp_heap_pdata *extra_data =
- heap_data->extra_data;
- cp_heap->permission_type = extra_data->permission_type;
- if (extra_data->secure_size) {
- cp_heap->secure_base = extra_data->secure_base;
- cp_heap->secure_size = extra_data->secure_size;
- }
- if (extra_data->setup_region)
- cp_heap->bus_id = extra_data->setup_region();
- if (extra_data->request_region)
- cp_heap->heap_request_region =
- extra_data->request_region;
- if (extra_data->release_region)
- cp_heap->heap_release_region =
- extra_data->release_region;
- cp_heap->cma = extra_data->is_cma;
- cp_heap->allow_non_secure_allocation =
- extra_data->allow_nonsecure_alloc;
-
- }
-
- if (cp_heap->cma) {
- cp_heap->pool = NULL;
- cp_heap->cpu_addr = 0;
- cp_heap->heap.priv = heap_data->priv;
- } else {
- cp_heap->pool = gen_pool_create(12, -1);
- if (!cp_heap->pool)
- goto free_heap;
-
- cp_heap->base = heap_data->base;
- ret = gen_pool_add(cp_heap->pool, cp_heap->base,
- heap_data->size, -1);
- if (ret < 0)
- goto destroy_pool;
-
- }
- return &cp_heap->heap;
-
-destroy_pool:
- gen_pool_destroy(cp_heap->pool);
-
-free_heap:
- kfree(cp_heap);
-
- return ERR_PTR(-ENOMEM);
-}
-
-void ion_cp_heap_destroy(struct ion_heap *heap)
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
-
- gen_pool_destroy(cp_heap->pool);
- kfree(cp_heap);
- cp_heap = NULL;
-}
-
-void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
- unsigned long *size) \
-{
- struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- *base = cp_heap->base;
- *size = cp_heap->total_size;
-}
-
-static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
- unsigned int permission_type)
-{
- struct cp_lock_msg cmd;
- cmd.start = phy_base;
- cmd.end = phy_base + size;
- cmd.permission_type = permission_type;
- cmd.lock = SCM_CP_PROTECT;
-
- return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
- &cmd, sizeof(cmd), NULL, 0);
-}
-
-static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
- unsigned int permission_type)
-{
- struct cp_lock_msg cmd;
- cmd.start = phy_base;
- cmd.end = phy_base + size;
- cmd.permission_type = permission_type;
- cmd.lock = SCM_CP_UNPROTECT;
-
- return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
- &cmd, sizeof(cmd), NULL, 0);
-}
-
-int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type, int version,
- void *data)
-{
- switch (version) {
- case ION_CP_V1:
- return ion_cp_protect_mem_v1(phy_base, size, permission_type);
- default:
- return -EINVAL;
- }
-}
-
-int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type, int version,
- void *data)
-{
- switch (version) {
- case ION_CP_V1:
- return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
- default:
- return -EINVAL;
- }
-}
-
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
deleted file mode 100644
index 7a2308cccf0..00000000000
--- a/drivers/gpu/ion/ion_heap.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
- * drivers/gpu/ion/ion_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/freezer.h>
-#include <linux/ion.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/rtmutex.h>
-#include <linux/sched.h>
-#include <linux/scatterlist.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include "ion_priv.h"
-
-void *ion_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct scatterlist *sg;
- int i, j;
- void *vaddr;
- pgprot_t pgprot;
- struct sg_table *table = buffer->sg_table;
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct page **pages = vmalloc(sizeof(struct page *) * npages);
- struct page **tmp = pages;
-
- if (!pages)
- return 0;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
- struct page *page = sg_page(sg);
- BUG_ON(i >= npages);
- for (j = 0; j < npages_this_entry; j++) {
- *(tmp++) = page++;
- }
- }
- vaddr = vmap(pages, npages, VM_MAP, pgprot);
- vfree(pages);
-
- return vaddr;
-}
-
-void ion_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- vunmap(buffer->vaddr);
-}
-
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct sg_table *table = buffer->sg_table;
- unsigned long addr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
- struct scatterlist *sg;
- int i;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long remainder = vma->vm_end - addr;
- unsigned long len = sg_dma_len(sg);
-
- if (offset >= sg_dma_len(sg)) {
- offset -= sg_dma_len(sg);
- continue;
- } else if (offset) {
- page += offset / PAGE_SIZE;
- len = sg_dma_len(sg) - offset;
- offset = 0;
- }
- len = min(len, remainder);
- remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
- addr += len;
- if (addr >= vma->vm_end)
- return 0;
- }
- return 0;
-}
-
-#define MAX_VMAP_RETRIES 10
-
-/**
- * An optimized page-zero'ing function. vmaps arrays of pages in large
- * chunks to minimize the number of memsets and vmaps/vunmaps.
- *
- * Note that the `pages' array should be composed of all 4K pages.
- */
-int ion_heap_pages_zero(struct page **pages, int num_pages)
-{
- int i, j, k, npages_to_vmap;
- void *ptr = NULL;
- /*
- * It's cheaper just to use writecombine memory and skip the
- * cache vs. using a cache memory and trying to flush it afterwards
- */
- pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- /*
- * As an optimization, we manually zero out all of the pages
- * in one fell swoop here. To safeguard against insufficient
- * vmalloc space, we only vmap `npages_to_vmap' at a time,
- * starting with a conservative estimate of 1/8 of the total
- * number of vmalloc pages available.
- */
- npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
- >> PAGE_SHIFT;
- for (i = 0; i < num_pages; i += npages_to_vmap) {
- npages_to_vmap = min(npages_to_vmap, num_pages - i);
- for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
- ++j) {
- ptr = vmap(&pages[i], npages_to_vmap,
- VM_IOREMAP, pgprot);
- if (ptr)
- break;
- else
- npages_to_vmap >>= 1;
- }
- if (!ptr)
- return -ENOMEM;
-
- memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
- /*
- * invalidate the cache to pick up the zeroing
- */
- for (k = 0; k < npages_to_vmap; k++) {
- void *p = kmap_atomic(pages[i + k]);
- phys_addr_t phys = page_to_phys(
- pages[i + k]);
-
- dmac_inv_range(p, p + PAGE_SIZE);
- outer_inv_range(phys, phys + PAGE_SIZE);
- kunmap_atomic(p);
- }
- vunmap(ptr);
- }
-
- return 0;
-}
-
-static int ion_heap_alloc_pages_mem(int page_tbl_size,
- struct pages_mem *pages_mem)
-{
- struct page **pages;
- pages_mem->free_fn = kfree;
- if (page_tbl_size > SZ_8K) {
- /*
- * Do fallback to ensure we have a balance between
- * performance and availability.
- */
- pages = kmalloc(page_tbl_size,
- __GFP_COMP | __GFP_NORETRY |
- __GFP_NO_KSWAPD | __GFP_NOWARN);
- if (!pages) {
- pages = vmalloc(page_tbl_size);
- pages_mem->free_fn = vfree;
- }
- } else {
- pages = kmalloc(page_tbl_size, GFP_KERNEL);
- }
-
- if (!pages)
- return -ENOMEM;
-
- pages_mem->pages = pages;
- return 0;
-}
-
-static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
-{
- pages_mem->free_fn(pages_mem->pages);
-}
-
-int ion_heap_high_order_page_zero(struct page *page, int order)
-{
- int i, ret;
- struct pages_mem pages_mem;
- int npages = 1 << order;
- int page_tbl_size = sizeof(struct page *) * npages;
-
- if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
- return -ENOMEM;
-
- for (i = 0; i < (1 << order); ++i)
- pages_mem.pages[i] = page + i;
-
- ret = ion_heap_pages_zero(pages_mem.pages, npages);
- ion_heap_free_pages_mem(&pages_mem);
- return ret;
-}
-
-int ion_heap_buffer_zero(struct ion_buffer *buffer)
-{
- struct sg_table *table = buffer->sg_table;
- struct scatterlist *sg;
- int i, j, ret = 0, npages = 0, page_tbl_size = 0;
- struct pages_mem pages_mem;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- unsigned long len = sg_dma_len(sg);
- int nrpages = len >> PAGE_SHIFT;
- page_tbl_size += sizeof(struct page *) * nrpages;
- }
-
- if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
- return -ENOMEM;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long len = sg_dma_len(sg);
-
- for (j = 0; j < len / PAGE_SIZE; j++)
- pages_mem.pages[npages++] = page + j;
- }
-
- ret = ion_heap_pages_zero(pages_mem.pages, npages);
- ion_heap_free_pages_mem(&pages_mem);
- return ret;
-}
-
-int ion_heap_buffer_zero_old(struct ion_buffer *buffer)
-{
- struct sg_table *table = buffer->sg_table;
- pgprot_t pgprot;
- struct scatterlist *sg;
- struct vm_struct *vm_struct;
- int i, j, ret = 0;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!vm_struct)
- return -ENOMEM;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long len = sg_dma_len(sg);
-
- for (j = 0; j < len / PAGE_SIZE; j++) {
- struct page *sub_page = page + j;
- struct page **pages = &sub_page;
- ret = map_vm_area(vm_struct, pgprot, &pages);
- if (ret)
- goto end;
- memset(vm_struct->addr, 0, PAGE_SIZE);
- unmap_kernel_range((unsigned long)vm_struct->addr,
- PAGE_SIZE);
- }
- }
-end:
- free_vm_area(vm_struct);
- return ret;
-}
-
-void ion_heap_free_page(struct ion_buffer *buffer, struct page *page,
- unsigned int order)
-{
- int i;
-
- if (!ion_buffer_fault_user_mappings(buffer)) {
- __free_pages(page, order);
- return;
- }
- for (i = 0; i < (1 << order); i++)
- __free_page(page + i);
-}
-
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
-{
- rt_mutex_lock(&heap->lock);
- list_add(&buffer->list, &heap->free_list);
- heap->free_list_size += buffer->size;
- rt_mutex_unlock(&heap->lock);
- wake_up(&heap->waitqueue);
-}
-
-size_t ion_heap_freelist_size(struct ion_heap *heap)
-{
- size_t size;
-
- rt_mutex_lock(&heap->lock);
- size = heap->free_list_size;
- rt_mutex_unlock(&heap->lock);
-
- return size;
-}
-
-static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
- bool skip_pools)
-{
- struct ion_buffer *buffer, *tmp;
- size_t total_drained = 0;
-
- if (ion_heap_freelist_size(heap) == 0)
- return 0;
-
- rt_mutex_lock(&heap->lock);
- if (size == 0)
- size = heap->free_list_size;
-
- list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
- if (total_drained >= size)
- break;
- list_del(&buffer->list);
- heap->free_list_size -= buffer->size;
- if (skip_pools)
- buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
- total_drained += buffer->size;
- ion_buffer_destroy(buffer);
- }
- rt_mutex_unlock(&heap->lock);
-
- return total_drained;
-}
-
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
-{
- return _ion_heap_freelist_drain(heap, size, false);
-}
-
-size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, size_t size)
-{
- return _ion_heap_freelist_drain(heap, size, true);
-}
-
-int ion_heap_deferred_free(void *data)
-{
- struct ion_heap *heap = data;
-
- while (true) {
- struct ion_buffer *buffer;
-
- wait_event_freezable(heap->waitqueue,
- ion_heap_freelist_size(heap) > 0);
-
- rt_mutex_lock(&heap->lock);
- if (list_empty(&heap->free_list)) {
- rt_mutex_unlock(&heap->lock);
- continue;
- }
- buffer = list_first_entry(&heap->free_list, struct ion_buffer,
- list);
- list_del(&buffer->list);
- heap->free_list_size -= buffer->size;
- rt_mutex_unlock(&heap->lock);
- ion_buffer_destroy(buffer);
- }
-
- return 0;
-}
-
-int ion_heap_init_deferred_free(struct ion_heap *heap)
-{
- struct sched_param param = { .sched_priority = 0 };
-
- INIT_LIST_HEAD(&heap->free_list);
- heap->free_list_size = 0;
- rt_mutex_init(&heap->lock);
- init_waitqueue_head(&heap->waitqueue);
- heap->task = kthread_run(ion_heap_deferred_free, heap,
- "%s", heap->name);
- sched_setscheduler(heap->task, SCHED_IDLE, &param);
- if (IS_ERR(heap->task)) {
- pr_err("%s: creating thread for deferred free failed\n",
- __func__);
- return PTR_RET(heap->task);
- }
- return 0;
-}
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_heap *heap = NULL;
-
- switch (heap_data->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- heap = ion_system_contig_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- heap = ion_system_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- heap = ion_carveout_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CHUNK:
- heap = ion_chunk_heap_create(heap_data);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap_data->type);
- return ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
- __func__, heap_data->name, heap_data->type,
- &heap_data->base, heap_data->size);
- return ERR_PTR(-EINVAL);
- }
-
- heap->name = heap_data->name;
- heap->id = heap_data->id;
- heap->priv = heap_data->priv;
- return heap;
-}
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
- if (!heap)
- return;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- ion_system_contig_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- ion_system_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- ion_carveout_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CHUNK:
- ion_chunk_heap_destroy(heap);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap->type);
- }
-}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
deleted file mode 100644
index cc2a36d1ee6..00000000000
--- a/drivers/gpu/ion/ion_page_pool.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * drivers/gpu/ion/ion_mem_pool.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-struct ion_page_pool_item {
- struct page *page;
- struct list_head list;
-};
-
-static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
-{
- struct page *page;
- struct scatterlist sg;
-
- page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
-
- if (!page)
- return NULL;
-
- if (pool->gfp_mask & __GFP_ZERO)
- if (ion_heap_high_order_page_zero(page, pool->order))
- goto error_free_pages;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
- sg_dma_address(&sg) = sg_phys(&sg);
- dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
-
- return page;
-error_free_pages:
- __free_pages(page, pool->order);
- return NULL;
-}
-
-static void ion_page_pool_free_pages(struct ion_page_pool *pool,
- struct page *page)
-{
- __free_pages(page, pool->order);
-}
-
-static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
-{
- struct ion_page_pool_item *item;
-
- item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
- if (!item)
- return -ENOMEM;
-
- mutex_lock(&pool->mutex);
- item->page = page;
- if (PageHighMem(page)) {
- list_add_tail(&item->list, &pool->high_items);
- pool->high_count++;
- } else {
- list_add_tail(&item->list, &pool->low_items);
- pool->low_count++;
- }
- mutex_unlock(&pool->mutex);
- return 0;
-}
-
-static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
-{
- struct ion_page_pool_item *item;
- struct page *page;
-
- if (high) {
- BUG_ON(!pool->high_count);
- item = list_first_entry(&pool->high_items,
- struct ion_page_pool_item, list);
- pool->high_count--;
- } else {
- BUG_ON(!pool->low_count);
- item = list_first_entry(&pool->low_items,
- struct ion_page_pool_item, list);
- pool->low_count--;
- }
-
- list_del(&item->list);
- page = item->page;
- kfree(item);
- return page;
-}
-
-void *ion_page_pool_alloc(struct ion_page_pool *pool)
-{
- struct page *page = NULL;
-
- BUG_ON(!pool);
-
- mutex_lock(&pool->mutex);
- if (pool->high_count)
- page = ion_page_pool_remove(pool, true);
- else if (pool->low_count)
- page = ion_page_pool_remove(pool, false);
- mutex_unlock(&pool->mutex);
-
- if (!page)
- page = ion_page_pool_alloc_pages(pool);
-
- return page;
-}
-
-void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
-{
- int ret;
-
- ret = ion_page_pool_add(pool, page);
- if (ret)
- ion_page_pool_free_pages(pool, page);
-}
-
-static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
-{
- int total = 0;
-
- total += high ? (pool->high_count + pool->low_count) *
- (1 << pool->order) :
- pool->low_count * (1 << pool->order);
- return total;
-}
-
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan)
-{
- int nr_freed = 0;
- int i;
- bool high;
-
- high = gfp_mask & __GFP_HIGHMEM;
-
- if (nr_to_scan == 0)
- return ion_page_pool_total(pool, high);
-
- for (i = 0; i < nr_to_scan; i++) {
- struct page *page;
-
- mutex_lock(&pool->mutex);
- if (high && pool->high_count) {
- page = ion_page_pool_remove(pool, true);
- } else if (pool->low_count) {
- page = ion_page_pool_remove(pool, false);
- } else {
- mutex_unlock(&pool->mutex);
- break;
- }
- mutex_unlock(&pool->mutex);
- ion_page_pool_free_pages(pool, page);
- nr_freed += (1 << pool->order);
- }
-
- return nr_freed;
-}
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
-{
- struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
- GFP_KERNEL);
- if (!pool)
- return NULL;
- pool->high_count = 0;
- pool->low_count = 0;
- INIT_LIST_HEAD(&pool->low_items);
- INIT_LIST_HEAD(&pool->high_items);
- pool->gfp_mask = gfp_mask;
- pool->order = order;
- mutex_init(&pool->mutex);
- plist_node_init(&pool->list, order);
-
- return pool;
-}
-
-void ion_page_pool_destroy(struct ion_page_pool *pool)
-{
- kfree(pool);
-}
-
-static int __init ion_page_pool_init(void)
-{
- return 0;
-}
-
-static void __exit ion_page_pool_exit(void)
-{
-}
-
-module_init(ion_page_pool_init);
-module_exit(ion_page_pool_exit);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
deleted file mode 100644
index dadd580d942..00000000000
--- a/drivers/gpu/ion/ion_priv.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * drivers/gpu/ion/ion_priv.h
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ION_PRIV_H
-#define _ION_PRIV_H
-
-#include <linux/ion.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/seq_file.h>
-
-#include "msm_ion_priv.h"
-#include <linux/sched.h>
-#include <linux/shrinker.h>
-#include <linux/types.h>
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
-
-/**
- * struct ion_buffer - metadata for a particular buffer
- * @ref: refernce count
- * @node: node in the ion_device buffers tree
- * @dev: back pointer to the ion_device
- * @heap: back pointer to the heap the buffer came from
- * @flags: buffer specific flags
- * @size: size of the buffer
- * @priv_virt: private data to the buffer representable as
- * a void *
- * @priv_phys: private data to the buffer representable as
- * an ion_phys_addr_t (and someday a phys_addr_t)
- * @lock: protects the buffers cnt fields
- * @kmap_cnt: number of times the buffer is mapped to the kernel
- * @vaddr: the kenrel mapping if kmap_cnt is not zero
- * @dmap_cnt: number of times the buffer is mapped for dma
- * @sg_table: the sg table for the buffer if dmap_cnt is not zero
- * @dirty: bitmask representing which pages of this buffer have
- * been dirtied by the cpu and need cache maintenance
- * before dma
- * @vmas: list of vma's mapping this buffer
- * @handle_count: count of handles referencing this buffer
- * @task_comm: taskcomm of last client to reference this buffer in a
- * handle, used for debugging
- * @pid: pid of last client to reference this buffer in a
- * handle, used for debugging
-*/
-struct ion_buffer {
- struct kref ref;
- union {
- struct rb_node node;
- struct list_head list;
- };
- struct ion_device *dev;
- struct ion_heap *heap;
- unsigned long flags;
- size_t size;
- union {
- void *priv_virt;
- ion_phys_addr_t priv_phys;
- };
- struct mutex lock;
- int kmap_cnt;
- void *vaddr;
- int dmap_cnt;
- struct sg_table *sg_table;
- unsigned long *dirty;
- struct list_head vmas;
- /* used to track orphaned buffers */
- int handle_count;
- char task_comm[TASK_COMM_LEN];
- pid_t pid;
-};
-void ion_buffer_destroy(struct ion_buffer *buffer);
-
-/**
- * struct ion_heap_ops - ops to operate on a given heap
- * @allocate: allocate memory
- * @free: free memory. Will be called with
- * ION_FLAG_FREED_FROM_SHRINKER set in buffer flags when
- * called from a shrinker. In that case, the pages being
- * free'd must be truly free'd back to the system, not put
- * in a page pool or otherwise cached.
- * @phys get physical address of a buffer (only define on
- * physically contiguous heaps)
- * @map_dma map the memory for dma to a scatterlist
- * @unmap_dma unmap the memory for dma
- * @map_kernel map memory to the kernel
- * @unmap_kernel unmap memory to the kernel
- * @map_user map memory to userspace
- * @unmap_user unmap memory to userspace
- *
- * allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
- */
-struct ion_heap_ops {
- int (*allocate) (struct ion_heap *heap,
- struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
- void (*free) (struct ion_buffer *buffer);
- int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len);
- struct sg_table *(*map_dma) (struct ion_heap *heap,
- struct ion_buffer *buffer);
- void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
- void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
- void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
- int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
- const struct list_head *mem_map);
- int (*secure_heap)(struct ion_heap *heap, int version, void *data);
- int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
-};
-
-/**
- * heap flags - flags between the heaps and core ion code
- */
-#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
-
-/**
- * struct ion_heap - represents a heap in the system
- * @node: rb node to put the heap on the device's tree of heaps
- * @dev: back pointer to the ion_device
- * @type: type of heap
- * @ops: ops struct as above
- * @flags: flags
- * @id: id of heap, also indicates priority of this heap when
- * allocating. These are specified by platform data and
- * MUST be unique
- * @name: used for debugging
- * @shrinker: a shrinker for the heap, if the heap caches system
- * memory, it must define a shrinker to return it on low
- * memory conditions, this includes system memory cached
- * in the deferred free lists for heaps that support it
- * @priv: private heap data
- * @free_list: free list head if deferred free is used
- * @free_list_size size of the deferred free list in bytes
- * @lock: protects the free list
- * @waitqueue: queue to wait on from deferred free thread
- * @task: task struct of deferred free thread
- * @debug_show: called when heap debug file is read to add any
- * heap specific debug info to output
- *
- * Represents a pool of memory from which buffers can be made. In some
- * systems the only heap is regular system memory allocated via vmalloc.
- * On others, some blocks might require large physically contiguous buffers
- * that are allocated from a specially reserved heap.
- */
-struct ion_heap {
- struct plist_node node;
- struct ion_device *dev;
- enum ion_heap_type type;
- struct ion_heap_ops *ops;
- unsigned long flags;
- unsigned int id;
- const char *name;
- struct shrinker shrinker;
- void *priv;
- struct list_head free_list;
- size_t free_list_size;
- struct rt_mutex lock;
- wait_queue_head_t waitqueue;
- struct task_struct *task;
- int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
-};
-
-/**
- * ion_buffer_cached - this ion buffer is cached
- * @buffer: buffer
- *
- * indicates whether this ion buffer is cached
- */
-bool ion_buffer_cached(struct ion_buffer *buffer);
-
-/**
- * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
- * @buffer: buffer
- *
- * indicates whether userspace mappings of this buffer will be faulted
- * in, this can affect how buffers are allocated from the heap.
- */
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-
-/**
- * ion_device_create - allocates and returns an ion device
- * @custom_ioctl: arch specific ioctl function if applicable
- *
- * returns a valid device or -PTR_ERR
- */
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg));
-
-/**
- * ion_device_destroy - free and device and it's resource
- * @dev: the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
-/**
- * ion_device_add_heap - adds a heap to the ion device
- * @dev: the device
- * @heap: the heap to add
- */
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-
-struct pages_mem {
- struct page **pages;
- void (*free_fn) (const void *);
-};
-
-/**
- * some helpers for common operations on buffers using the sg_table
- * and vaddr fields
- */
-void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
-void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
-int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
- struct vm_area_struct *);
-int ion_heap_pages_zero(struct page **pages, int num_pages);
-int ion_heap_buffer_zero(struct ion_buffer *buffer);
-int ion_heap_high_order_page_zero(struct page *page, int order);
-
-/**
- * ion_heap_init_deferred_free -- initialize deferred free functionality
- * @heap: the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
- * be called to setup deferred frees. Calls to free the buffer will
- * return immediately and the actual free will occur some time later
- */
-int ion_heap_init_deferred_free(struct ion_heap *heap);
-
-/**
- * ion_heap_freelist_add - add a buffer to the deferred free list
- * @heap: the heap
- * @buffer: the buffer
- *
- * Adds an item to the deferred freelist.
- */
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
-
-/**
- * ion_heap_freelist_drain - drain the deferred free list
- * @heap: the heap
- * @size: ammount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- */
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
-
-/**
- * ion_heap_freelist_drain_from_shrinker - drain the deferred free
- * list, skipping any heap-specific
- * pooling or caching mechanisms
- *
- * @heap: the heap
- * @size: amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- *
- * Unlike with @ion_heap_freelist_drain, don't put any pages back into
- * page pools or otherwise cache the pages. Everything must be
- * genuinely free'd back to the system. If you're free'ing from a
- * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the
- * ION_FLAG_FREED_FROM_SHRINKER flag.
- */
-size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap,
- size_t size);
-
-/**
- * ion_heap_freelist_size - returns the size of the freelist in bytes
- * @heap: the heap
- */
-size_t ion_heap_freelist_size(struct ion_heap *heap);
-
-
-/**
- * functions for creating and destroying the built in ion heaps.
- * architectures can add their own custom architecture specific
- * heaps as appropriate.
- */
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *);
-void ion_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
-void ion_system_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
-void ion_system_contig_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
-void ion_carveout_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
-void ion_chunk_heap_destroy(struct ion_heap *);
-/**
- * kernel api to allocate/free from carveout -- used when carveout is
- * used to back an architecture specific custom heap
- */
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
- unsigned long align);
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size);
-/**
- * The carveout heap returns physical addresses, since 0 may be a valid
- * physical address, this is used to indicate allocation failed
- */
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
-/**
- * functions for creating and destroying a heap pool -- allows you
- * to keep a pool of pre allocated memory to use from your heap. Keeping
- * a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant peformance benefit on
- * many systems */
-
-/**
- * struct ion_page_pool - pagepool struct
- * @high_count: number of highmem items in the pool
- * @low_count: number of lowmem items in the pool
- * @high_items: list of highmem items
- * @low_items: list of lowmem items
- * @shrinker: a shrinker for the items
- * @mutex: lock protecting this struct and especially the count
- * item list
- * @alloc: function to be used to allocate pageory when the pool
- * is empty
- * @free: function to be used to free pageory back to the system
- * when the shrinker fires
- * @gfp_mask: gfp_mask to use from alloc
- * @order: order of pages in the pool
- * @list: plist node for list of pools
- *
- * Allows you to keep a pool of pre allocated pages to use from your heap.
- * Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant peformance benefit
- * on many systems
- */
-struct ion_page_pool {
- int high_count;
- int low_count;
- struct list_head high_items;
- struct list_head low_items;
- struct mutex mutex;
- gfp_t gfp_mask;
- unsigned int order;
- struct plist_node list;
-};
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
-void ion_page_pool_destroy(struct ion_page_pool *);
-void *ion_page_pool_alloc(struct ion_page_pool *);
-void ion_page_pool_free(struct ion_page_pool *, struct page *);
-
-/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
- * @pool: the pool
- * @gfp_mask: the memory type to reclaim
- * @nr_to_scan: number of items to shrink in pages
- *
- * returns the number of items freed in pages
- */
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan);
-
-int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
- int (*f)(struct ion_heap *heap, void *data));
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id);
-
-int ion_handle_put(struct ion_handle *handle);
-
-#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_removed_heap.c b/drivers/gpu/ion/ion_removed_heap.c
deleted file mode 100644
index 132a32c3514..00000000000
--- a/drivers/gpu/ion/ion_removed_heap.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * drivers/gpu/ion/ion_removed_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/spinlock.h>
-
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/seq_file.h>
-#include "ion_priv.h"
-
-#include <asm/cacheflush.h>
-#include <linux/io.h>
-#include <linux/msm_ion.h>
-
-struct ion_removed_heap {
- struct ion_heap heap;
- struct gen_pool *pool;
- ion_phys_addr_t base;
- unsigned long allocated_bytes;
- unsigned long total_size;
- int (*request_region)(void *);
- int (*release_region)(void *);
- atomic_t map_count;
- void *bus_id;
-};
-
-ion_phys_addr_t ion_removed_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
- unsigned long offset = gen_pool_alloc_aligned(removed_heap->pool,
- size, ilog2(align));
-
- if (!offset) {
- if ((removed_heap->total_size -
- removed_heap->allocated_bytes) >= size)
- pr_debug("%s: heap %s has enough memory (%lx) but the allocation of size %lx still failed. Memory is probably fragmented.",
- __func__, heap->name,
- removed_heap->total_size -
- removed_heap->allocated_bytes, size);
- return ION_CARVEOUT_ALLOCATE_FAIL;
- }
-
- removed_heap->allocated_bytes += size;
- return offset;
-}
-
-void ion_removed_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
-
- if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
- return;
- gen_pool_free(removed_heap->pool, addr, size);
- removed_heap->allocated_bytes -= size;
-}
-
-static int ion_removed_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- *addr = buffer->priv_phys;
- *len = buffer->size;
- return 0;
-}
-
-static int ion_removed_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- buffer->priv_phys = ion_removed_allocate(heap, size, align);
- return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_removed_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
-
- ion_removed_free(heap, buffer->priv_phys, buffer->size);
- buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_removed_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct sg_table *table;
- int ret;
-
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return ERR_PTR(-ENOMEM);
-
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret)
- goto err0;
-
- table->sgl->length = buffer->size;
- table->sgl->offset = 0;
- table->sgl->dma_address = buffer->priv_phys;
-
- return table;
-
-err0:
- kfree(table);
- return ERR_PTR(ret);
-}
-
-void ion_removed_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- if (buffer->sg_table)
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
- buffer->sg_table = 0;
-}
-
-static int ion_removed_request_region(struct ion_removed_heap *removed_heap)
-{
- int ret_value = 0;
- if (atomic_inc_return(&removed_heap->map_count) == 1) {
- if (removed_heap->request_region) {
- ret_value = removed_heap->request_region(
- removed_heap->bus_id);
- if (ret_value) {
- pr_err("Unable to request SMI region");
- atomic_dec(&removed_heap->map_count);
- }
- }
- }
- return ret_value;
-}
-
-static int ion_removed_release_region(struct ion_removed_heap *removed_heap)
-{
- int ret_value = 0;
- if (atomic_dec_and_test(&removed_heap->map_count)) {
- if (removed_heap->release_region) {
- ret_value = removed_heap->release_region(
- removed_heap->bus_id);
- if (ret_value)
- pr_err("Unable to release SMI region");
- }
- }
- return ret_value;
-}
-
-void *ion_removed_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
- void *ret_value;
-
- if (ion_removed_request_region(removed_heap))
- return NULL;
-
- if (ION_IS_CACHED(buffer->flags))
- ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
- else
- ret_value = ioremap(buffer->priv_phys, buffer->size);
-
- if (!ret_value)
- ion_removed_release_region(removed_heap);
- return ret_value;
-}
-
-void ion_removed_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
-
- iounmap(buffer->vaddr);
- buffer->vaddr = NULL;
-
- ion_removed_release_region(removed_heap);
- return;
-}
-
-int ion_removed_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
- int ret_value = 0;
-
- if (ion_removed_request_region(removed_heap))
- return -EINVAL;
-
- if (!ION_IS_CACHED(buffer->flags))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- ret_value = remap_pfn_range(vma, vma->vm_start,
- __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-
- if (ret_value)
- ion_removed_release_region(removed_heap);
- return ret_value;
-}
-
-void ion_removed_heap_unmap_user(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
- ion_removed_release_region(removed_heap);
-}
-
-static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct list_head *mem_map)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
-
- seq_printf(s, "total bytes currently allocated: %lx\n",
- removed_heap->allocated_bytes);
- seq_printf(s, "total heap size: %lx\n", removed_heap->total_size);
-
- if (mem_map) {
- unsigned long base = removed_heap->base;
- unsigned long size = removed_heap->total_size;
- unsigned long end = base+size;
- unsigned long last_end = base;
- struct mem_map_data *data;
-
- seq_printf(s, "\nMemory Map\n");
- seq_printf(s, "%16.s %14.s %14.s %14.s\n",
- "client", "start address", "end address",
- "size (hex)");
-
- list_for_each_entry(data, mem_map, node) {
- const char *client_name = "(null)";
-
- if (last_end < data->addr) {
- phys_addr_t da;
-
- da = data->addr-1;
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- "FREE", &last_end, &da,
- (unsigned long)data->addr-last_end,
- (unsigned long)data->addr-last_end);
- }
-
- if (data->client_name)
- client_name = data->client_name;
-
- seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
- client_name, &data->addr,
- &data->addr_end,
- data->size, data->size);
- last_end = data->addr_end+1;
- }
- if (last_end < end) {
- seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
- last_end, end-1, end-last_end, end-last_end);
- }
- }
- return 0;
-}
-
-static struct ion_heap_ops removed_heap_ops = {
- .allocate = ion_removed_heap_allocate,
- .free = ion_removed_heap_free,
- .phys = ion_removed_heap_phys,
- .map_user = ion_removed_heap_map_user,
- .map_kernel = ion_removed_heap_map_kernel,
- .unmap_user = ion_removed_heap_unmap_user,
- .unmap_kernel = ion_removed_heap_unmap_kernel,
- .map_dma = ion_removed_heap_map_dma,
- .unmap_dma = ion_removed_heap_unmap_dma,
- .print_debug = ion_removed_print_debug,
-};
-
-struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_removed_heap *removed_heap;
- int ret;
-
- removed_heap = kzalloc(sizeof(struct ion_removed_heap), GFP_KERNEL);
- if (!removed_heap)
- return ERR_PTR(-ENOMEM);
-
- removed_heap->pool = gen_pool_create(12, -1);
- if (!removed_heap->pool) {
- kfree(removed_heap);
- return ERR_PTR(-ENOMEM);
- }
- removed_heap->base = heap_data->base;
- ret = gen_pool_add(removed_heap->pool, removed_heap->base,
- heap_data->size, -1);
- if (ret < 0) {
- gen_pool_destroy(removed_heap->pool);
- kfree(removed_heap);
- return ERR_PTR(-EINVAL);
- }
- removed_heap->heap.ops = &removed_heap_ops;
- removed_heap->heap.type = ION_HEAP_TYPE_REMOVED;
- removed_heap->allocated_bytes = 0;
- removed_heap->total_size = heap_data->size;
-
- if (heap_data->extra_data) {
- struct ion_co_heap_pdata *extra_data =
- heap_data->extra_data;
-
- if (extra_data->setup_region)
- removed_heap->bus_id = extra_data->setup_region();
- if (extra_data->request_region)
- removed_heap->request_region =
- extra_data->request_region;
- if (extra_data->release_region)
- removed_heap->release_region =
- extra_data->release_region;
- }
- return &removed_heap->heap;
-}
-
-void ion_removed_heap_destroy(struct ion_heap *heap)
-{
- struct ion_removed_heap *removed_heap =
- container_of(heap, struct ion_removed_heap, heap);
-
- gen_pool_destroy(removed_heap->pool);
- kfree(removed_heap);
- removed_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
deleted file mode 100644
index 4d0af374b88..00000000000
--- a/drivers/gpu/ion/ion_system_heap.c
+++ /dev/null
@@ -1,546 +0,0 @@
-/*
- * drivers/gpu/ion/ion_system_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <asm/page.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-#include <linux/dma-mapping.h>
-#include <trace/events/kmem.h>
-
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN | __GFP_NORETRY |
- __GFP_NO_KSWAPD) & ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN);
-static const unsigned int orders[] = {9, 8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
-static int order_to_index(unsigned int order)
-{
- int i;
- for (i = 0; i < num_orders; i++)
- if (order == orders[i])
- return i;
- BUG();
- return -1;
-}
-
-static unsigned int order_to_size(int order)
-{
- return PAGE_SIZE << order;
-}
-
-struct ion_system_heap {
- struct ion_heap heap;
- struct ion_page_pool **uncached_pools;
- struct ion_page_pool **cached_pools;
-};
-
-struct page_info {
- struct page *page;
- unsigned int order;
- struct list_head list;
-};
-
-static struct page *alloc_buffer_page(struct ion_system_heap *heap,
- struct ion_buffer *buffer,
- unsigned long order)
-{
- bool cached = ion_buffer_cached(buffer);
- bool split_pages = ion_buffer_fault_user_mappings(buffer);
- struct page *page;
- struct ion_page_pool *pool;
-
- if (!cached)
- pool = heap->uncached_pools[order_to_index(order)];
- else
- pool = heap->cached_pools[order_to_index(order)];
- page = ion_page_pool_alloc(pool);
- if (!page)
- return 0;
-
- if (split_pages)
- split_page(page, order);
- return page;
-}
-
-static void free_buffer_page(struct ion_system_heap *heap,
- struct ion_buffer *buffer, struct page *page,
- unsigned int order)
-{
- bool cached = ion_buffer_cached(buffer);
- bool split_pages = ion_buffer_fault_user_mappings(buffer);
- int i;
-
- if ((buffer->flags & ION_FLAG_FREED_FROM_SHRINKER)) {
- if (split_pages) {
- for (i = 0; i < (1 << order); i++)
- __free_page(page + i);
- } else {
- __free_pages(page, order);
- }
- } else {
- struct ion_page_pool *pool;
- if (cached)
- pool = heap->cached_pools[order_to_index(order)];
- else
- pool = heap->uncached_pools[order_to_index(order)];
- ion_page_pool_free(pool, page);
- }
-}
-
-
-static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size,
- unsigned int max_order)
-{
- struct page *page;
- struct page_info *info;
- int i;
-
- for (i = 0; i < num_orders; i++) {
- if (size < order_to_size(orders[i]))
- continue;
- if (max_order < orders[i])
- continue;
-
- page = alloc_buffer_page(heap, buffer, orders[i]);
- if (!page)
- continue;
-
- info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
- if (info) {
- info->page = page;
- info->order = orders[i];
- }
- return info;
- }
- return NULL;
-}
-
-static int ion_system_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- struct sg_table *table;
- struct scatterlist *sg;
- int ret;
- struct list_head pages;
- struct page_info *info, *tmp_info;
- int i = 0;
- unsigned long size_remaining = PAGE_ALIGN(size);
- unsigned int max_order = orders[0];
- bool split_pages = ion_buffer_fault_user_mappings(buffer);
-
- INIT_LIST_HEAD(&pages);
- while (size_remaining > 0) {
- info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
- if (!info)
- goto err;
- list_add_tail(&info->list, &pages);
- size_remaining -= (1 << info->order) * PAGE_SIZE;
- max_order = info->order;
- i++;
- }
-
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- goto err;
-
- if (split_pages)
- ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
- GFP_KERNEL);
- else
- ret = sg_alloc_table(table, i, GFP_KERNEL);
-
- if (ret)
- goto err1;
-
- sg = table->sgl;
- list_for_each_entry_safe(info, tmp_info, &pages, list) {
- struct page *page = info->page;
- if (split_pages) {
- for (i = 0; i < (1 << info->order); i++) {
- sg_set_page(sg, page + i, PAGE_SIZE, 0);
- sg = sg_next(sg);
- }
- } else {
- sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
- 0);
- sg = sg_next(sg);
- }
- list_del(&info->list);
- kfree(info);
- }
-
- buffer->priv_virt = table;
- return 0;
-err1:
- kfree(table);
-err:
- list_for_each_entry_safe(info, tmp_info, &pages, list) {
- free_buffer_page(sys_heap, buffer, info->page, info->order);
- kfree(info);
- }
- return -ENOMEM;
-}
-
-void ion_system_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- struct sg_table *table = buffer->sg_table;
- struct scatterlist *sg;
- LIST_HEAD(pages);
- int i;
-
- if (!(buffer->flags & ION_FLAG_FREED_FROM_SHRINKER))
- ion_heap_buffer_zero(buffer);
-
- for_each_sg(table->sgl, sg, table->nents, i)
- free_buffer_page(sys_heap, buffer, sg_page(sg),
- get_order(sg_dma_len(sg)));
- sg_free_table(table);
- kfree(table);
-}
-
-struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-void ion_system_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-static struct ion_heap_ops system_heap_ops = {
- .allocate = ion_system_heap_allocate,
- .free = ion_system_heap_free,
- .map_dma = ion_system_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_heap_map_user,
-};
-
-static int ion_system_heap_shrink(struct shrinker *shrinker,
- struct shrink_control *sc) {
-
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
- shrinker);
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int nr_total = 0;
- int nr_freed = 0;
- int i;
-
- if (sc->nr_to_scan == 0)
- goto end;
-
- /* shrink the free list first, no point in zeroing the memory if
- we're just going to reclaim it. Also, skip any possible
- page pooling */
- nr_freed += ion_heap_freelist_drain_from_shrinker(
- heap, sc->nr_to_scan * PAGE_SIZE) / PAGE_SIZE;
-
- if (nr_freed >= sc->nr_to_scan)
- goto end;
-
- for (i = 0; i < num_orders; i++) {
- nr_freed += ion_page_pool_shrink(sys_heap->uncached_pools[i],
- sc->gfp_mask, sc->nr_to_scan);
- if (nr_freed >= sc->nr_to_scan)
- goto end;
-
- nr_freed += ion_page_pool_shrink(sys_heap->cached_pools[i],
- sc->gfp_mask, sc->nr_to_scan);
- if (nr_freed >= sc->nr_to_scan)
- goto end;
- }
-
-end:
- /* total number of items is whatever the page pools are holding
- plus whatever's in the freelist */
- for (i = 0; i < num_orders; i++) {
- nr_total += ion_page_pool_shrink(
- sys_heap->uncached_pools[i], sc->gfp_mask, 0);
- nr_total += ion_page_pool_shrink(
- sys_heap->cached_pools[i], sc->gfp_mask, 0);
- }
- nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
- return nr_total;
-
-}
-
-static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
- void *unused)
-{
-
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int i;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->uncached_pools[i];
- seq_printf(s,
- "%d order %u highmem pages in uncached pool = %lu total\n",
- pool->high_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->high_count);
- seq_printf(s,
- "%d order %u lowmem pages in uncached pool = %lu total\n",
- pool->low_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->low_count);
- }
-
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->cached_pools[i];
- seq_printf(s,
- "%d order %u highmem pages in cached pool = %lu total\n",
- pool->high_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->high_count);
- seq_printf(s,
- "%d order %u lowmem pages in cached pool = %lu total\n",
- pool->low_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->low_count);
- }
-
- return 0;
-}
-
-
-static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
-{
- int i;
- for (i = 0; i < num_orders; i++)
- if (pools[i])
- ion_page_pool_destroy(pools[i]);
-}
-
-/**
- * ion_system_heap_create_pools - Creates pools for all orders
- *
- * If this fails you don't need to destroy any pools. It's all or
- * nothing. If it succeeds you'll eventually need to use
- * ion_system_heap_destroy_pools to destroy the pools.
- */
-static int ion_system_heap_create_pools(struct ion_page_pool **pools)
-{
- int i;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags = low_order_gfp_flags;
-
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto err_create_pool;
- pools[i] = pool;
- }
- return 0;
-err_create_pool:
- ion_system_heap_destroy_pools(pools);
- return 1;
-}
-
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
-{
- struct ion_system_heap *heap;
- int pools_size = sizeof(struct ion_page_pool *) * num_orders;
-
- heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
- if (!heap)
- return ERR_PTR(-ENOMEM);
- heap->heap.ops = &system_heap_ops;
- heap->heap.type = ION_HEAP_TYPE_SYSTEM;
- heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-
- heap->uncached_pools = kzalloc(pools_size, GFP_KERNEL);
- if (!heap->uncached_pools)
- goto err_alloc_uncached_pools;
-
- heap->cached_pools = kzalloc(pools_size, GFP_KERNEL);
- if (!heap->cached_pools)
- goto err_alloc_cached_pools;
-
- if (ion_system_heap_create_pools(heap->uncached_pools))
- goto err_create_uncached_pools;
-
- if (ion_system_heap_create_pools(heap->cached_pools))
- goto err_create_cached_pools;
-
- heap->heap.shrinker.shrink = ion_system_heap_shrink;
- heap->heap.shrinker.seeks = DEFAULT_SEEKS;
- heap->heap.shrinker.batch = 0;
- register_shrinker(&heap->heap.shrinker);
- heap->heap.debug_show = ion_system_heap_debug_show;
- return &heap->heap;
-
-err_create_cached_pools:
- ion_system_heap_destroy_pools(heap->uncached_pools);
-err_create_uncached_pools:
- kfree(heap->cached_pools);
-err_alloc_cached_pools:
- kfree(heap->uncached_pools);
-err_alloc_uncached_pools:
- kfree(heap);
- return ERR_PTR(-ENOMEM);
-}
-
-void ion_system_heap_destroy(struct ion_heap *heap)
-{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
-
- ion_system_heap_destroy_pools(sys_heap->uncached_pools);
- ion_system_heap_destroy_pools(sys_heap->cached_pools);
- kfree(sys_heap->uncached_pools);
- kfree(sys_heap->cached_pools);
- kfree(sys_heap);
-}
-
-struct kmalloc_buffer_info {
- struct sg_table *table;
- void *vaddr;
-};
-
-static int ion_system_contig_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
-{
- int ret;
- struct kmalloc_buffer_info *info;
-
- info = kmalloc(sizeof(struct kmalloc_buffer_info), GFP_KERNEL);
- if (!info) {
- ret = -ENOMEM;
- goto out;
- }
-
- info->table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!info->table) {
- ret = -ENOMEM;
- goto kfree_info;
- }
-
- ret = sg_alloc_table(info->table, 1, GFP_KERNEL);
- if (ret)
- goto kfree_table;
-
- info->vaddr = kzalloc(len, GFP_KERNEL);
- if (!info->vaddr) {
- ret = -ENOMEM;
- goto sg_free_table;
- }
-
- sg_set_page(info->table->sgl, virt_to_page(info->vaddr), len,
- 0);
- sg_dma_address(info->table->sgl) = virt_to_phys(info->vaddr);
- dma_sync_sg_for_device(NULL, info->table->sgl, 1, DMA_BIDIRECTIONAL);
-
- buffer->priv_virt = info;
- return 0;
-
-sg_free_table:
- sg_free_table(info->table);
-kfree_table:
- kfree(info->table);
-kfree_info:
- kfree(info);
-out:
- return ret;
-}
-
-void ion_system_contig_heap_free(struct ion_buffer *buffer)
-{
- struct kmalloc_buffer_info *info = buffer->priv_virt;
- sg_free_table(info->table);
- kfree(info->table);
- kfree(info->vaddr);
-}
-
-static int ion_system_contig_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct kmalloc_buffer_info *info = buffer->priv_virt;
- *addr = virt_to_phys(info->vaddr);
- *len = buffer->size;
- return 0;
-}
-
-struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct kmalloc_buffer_info *info = buffer->priv_virt;
- return info->table;
-}
-
-void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static struct ion_heap_ops kmalloc_ops = {
- .allocate = ion_system_contig_heap_allocate,
- .free = ion_system_contig_heap_free,
- .phys = ion_system_contig_heap_phys,
- .map_dma = ion_system_contig_heap_map_dma,
- .unmap_dma = ion_system_contig_heap_unmap_dma,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_heap_map_user,
-};
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
-{
- struct ion_heap *heap;
-
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
- if (!heap)
- return ERR_PTR(-ENOMEM);
- heap->ops = &kmalloc_ops;
- heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
- return heap;
-}
-
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
-{
- kfree(heap);
-}
-
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
deleted file mode 100644
index 692458e07b5..00000000000
--- a/drivers/gpu/ion/ion_system_mapper.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * drivers/gpu/ion/ion_system_mapper.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/memory.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-/*
- * This mapper is valid for any heap that allocates memory that already has
- * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
- * pages obtained via io_remap, etc.
- */
-static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct ion_mapping **mapping)
-{
- if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
- pr_err("%s: attempting to map an unsupported heap\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- /* XXX REVISIT ME!!! */
- *((unsigned long *)mapping) = (unsigned long)buffer->priv;
- return buffer->priv;
-}
-
-static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct ion_mapping *mapping)
-{
- if (!((1 << buffer->heap->type) & mapper->heap_mask))
- pr_err("%s: attempting to unmap an unsupported heap\n",
- __func__);
-}
-
-static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct ion_mapping *mapping)
-{
- if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
- pr_err("%s: attempting to unmap an unsupported heap\n",
- __func__);
- return ERR_PTR(-EINVAL);
- }
- return buffer->priv;
-}
-
-static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
- struct ion_buffer *buffer,
- struct vm_area_struct *vma,
- struct ion_mapping *mapping)
-{
- int ret;
-
- switch (buffer->heap->type) {
- case ION_HEAP_KMALLOC:
- {
- unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
- ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- break;
- }
- case ION_HEAP_VMALLOC:
- ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
- break;
- default:
- pr_err("%s: attempting to map unsupported heap to userspace\n",
- __func__);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct ion_mapper_ops ops = {
- .map = ion_kernel_mapper_map,
- .map_kernel = ion_kernel_mapper_map_kernel,
- .map_user = ion_kernel_mapper_map_user,
- .unmap = ion_kernel_mapper_unmap,
-};
-
-struct ion_mapper *ion_system_mapper_create(void)
-{
- struct ion_mapper *mapper;
- mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
- if (!mapper)
- return ERR_PTR(-ENOMEM);
- mapper->type = ION_SYSTEM_MAPPER;
- mapper->ops = &ops;
- mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
- return mapper;
-}
-
-void ion_system_mapper_destroy(struct ion_mapper *mapper)
-{
- kfree(mapper);
-}
-
diff --git a/drivers/gpu/ion/msm/Makefile b/drivers/gpu/ion/msm/Makefile
deleted file mode 100644
index 25d515a5306..00000000000
--- a/drivers/gpu/ion/msm/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += msm_ion.o ion_iommu_map.o secure_buffer.o
diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h
deleted file mode 100644
index 249f58d0029..00000000000
--- a/drivers/gpu/ion/msm/ion_cp_common.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef ION_CP_COMMON_H
-#define ION_CP_COMMON_H
-
-#include <asm-generic/errno-base.h>
-#include <linux/msm_ion.h>
-
-#define ION_CP_V1 1
-#define ION_CP_V2 2
-
-struct ion_cp_buffer {
- phys_addr_t buffer;
- atomic_t secure_cnt;
- int is_secure;
- int want_delayed_unsecure;
- /*
- * Currently all user/kernel mapping is protected by the heap lock.
- * This is sufficient to protect the map count as well. The lock
- * should be used to protect map_cnt if the whole heap lock is
- * ever removed.
- */
- atomic_t map_cnt;
- /*
- * protects secure_cnt for securing.
- */
- struct mutex lock;
- int version;
- void *data;
- /*
- * secure is happening at allocation time, ignore version/data check
- */
- bool ignore_check;
-};
-
-#if defined(CONFIG_ION_MSM)
-/*
- * ion_cp2_protect_mem - secures memory via trustzone
- *
- * @chunks - physical address of the array containing the chunks to
- * be locked down
- * @nchunks - number of entries in the array
- * @chunk_size - size of each memory chunk
- * @usage - usage hint
- * @lock - 1 for lock, 0 for unlock
- *
- * return value is the result of the scm call
- */
-int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks,
- unsigned int chunk_size, enum cp_mem_usage usage,
- int lock);
-
-int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type, int version,
- void *data);
-
-int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type, int version,
- void *data);
-
-int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
- int flags);
-
-int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure);
-
-int msm_ion_secure_table(struct sg_table *table, enum cp_mem_usage usage,
- int flags);
-
-int msm_ion_unsecure_table(struct sg_table *table);
-
-bool msm_secure_v2_is_supported(void);
-
-#else
-static inline int ion_cp_change_chunks_state(unsigned long chunks,
- unsigned int nchunks, unsigned int chunk_size,
- enum cp_mem_usage usage, int lock)
-{
- return -ENODEV;
-}
-
-static inline int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type, int version,
- void *data)
-{
- return -ENODEV;
-}
-
-static inline int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type, int version,
- void *data)
-{
- return -ENODEV;
-}
-
-static inline int ion_cp_secure_buffer(struct ion_buffer *buffer, int version,
- void *data, int flags)
-{
- return -ENODEV;
-}
-
-static inline int ion_cp_unsecure_buffer(struct ion_buffer *buffer,
- int force_unsecure)
-{
- return -ENODEV;
-}
-
-int msm_ion_secure_table(struct sg_table *table, enum cp_mem_usage usage,
- int flags)
-{
- return -ENODEV;
-}
-
-int msm_ion_unsecure_table(struct sg_table *table)
-{
- return -ENODEV;
-}
-
-bool msm_secure_v2_is_supported(void)
-{
- return false;
-}
-
-#endif
-
-#endif
diff --git a/drivers/gpu/ion/msm/ion_iommu_map.c b/drivers/gpu/ion/msm/ion_iommu_map.c
deleted file mode 100644
index 3e8dfbe3cdf..00000000000
--- a/drivers/gpu/ion/msm/ion_iommu_map.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/dma-buf.h>
-#include <linux/export.h>
-#include <linux/iommu.h>
-#include <linux/ion.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-#include <linux/msm_iommu_domains.h>
-
-enum {
- DI_PARTITION_NUM = 0,
- DI_DOMAIN_NUM = 1,
- DI_MAX,
-};
-
-#define iommu_map_domain(__m) ((__m)->domain_info[1])
-#define iommu_map_partition(__m) ((__m)->domain_info[0])
-
-/**
- * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
- * @iova_addr - iommu virtual address
- * @node - rb node to exist in the buffer's tree of iommu mappings
- * @domain_info - contains the partition number and domain number
- * domain_info[1] = domain number
- * domain_info[0] = partition number
- * @ref - for reference counting this mapping
- * @mapped_size - size of the iova space mapped
- * (may not be the same as the buffer size)
- * @flags - iommu domain/partition specific flags.
- *
- * Represents a mapping of one ion buffer to a particular iommu domain
- * and address range. There may exist other mappings of this buffer in
- * different domains or address ranges. All mappings will have the same
- * cacheability and security.
- */
-struct ion_iommu_map {
- unsigned long iova_addr;
- struct rb_node node;
- union {
- int domain_info[DI_MAX];
- uint64_t key;
- };
- struct ion_iommu_meta *meta;
- struct kref ref;
- int mapped_size;
- unsigned long flags;
-};
-
-
-struct ion_iommu_meta {
- struct rb_node node;
- struct ion_handle *handle;
- struct rb_root iommu_maps;
- struct kref ref;
- struct sg_table *table;
- unsigned long size;
- struct mutex lock;
- struct dma_buf *dbuf;
-};
-
-static struct rb_root iommu_root;
-DEFINE_MUTEX(msm_iommu_map_mutex);
-
-static void ion_iommu_meta_add(struct ion_iommu_meta *meta)
-{
- struct rb_root *root = &iommu_root;
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_meta *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_meta, node);
-
- if (meta->table < entry->table) {
- p = &(*p)->rb_left;
- } else if (meta->table > entry->table) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: handle %p already exists\n", __func__,
- entry->handle);
- BUG();
- }
- }
-
- rb_link_node(&meta->node, parent, p);
- rb_insert_color(&meta->node, root);
-}
-
-
-static struct ion_iommu_meta *ion_iommu_meta_lookup(struct sg_table *table)
-{
- struct rb_root *root = &iommu_root;
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_meta *entry = NULL;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_meta, node);
-
- if (table < entry->table)
- p = &(*p)->rb_left;
- else if (table > entry->table)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- return NULL;
-}
-
-
-
-static void ion_iommu_add(struct ion_iommu_meta *meta,
- struct ion_iommu_map *iommu)
-{
- struct rb_node **p = &meta->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (iommu->key < entry->key) {
- p = &(*p)->rb_left;
- } else if (iommu->key > entry->key) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: handle %p already has mapping for domain %d and partition %d\n",
- __func__,
- meta->handle,
- iommu_map_domain(iommu),
- iommu_map_partition(iommu));
- BUG();
- }
- }
-
- rb_link_node(&iommu->node, parent, p);
- rb_insert_color(&iommu->node, &meta->iommu_maps);
-}
-
-
-static struct ion_iommu_map *ion_iommu_lookup(
- struct ion_iommu_meta *meta,
- unsigned int domain_no,
- unsigned int partition_no)
-{
- struct rb_node **p = &meta->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
- uint64_t key = domain_no;
- key = key << 32 | partition_no;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (key < entry->key)
- p = &(*p)->rb_left;
- else if (key > entry->key)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- return NULL;
-}
-
-static int ion_iommu_map_iommu(struct ion_iommu_meta *meta,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra, size;
- struct sg_table *table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
-
- size = meta->size;
- data->mapped_size = iova_length;
- extra = iova_length - size;
- table = meta->table;
-
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (sg_dma_len(table->sgl) > align)
- align = sg_dma_len(table->sgl);
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr,
- table->sgl,
- size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + size;
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- size);
-
-out:
-
- return ret;
-}
-
-static void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- BUG_ON(!msm_use_iommu());
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-
-
-static struct ion_iommu_map *__ion_iommu_map(struct ion_iommu_meta *meta,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long flags,
- ion_phys_addr_t *iova)
-{
- struct ion_iommu_map *data;
- int ret;
-
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
-
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- iommu_map_domain(data) = domain_num;
- iommu_map_partition(data) = partition_num;
-
- ret = ion_iommu_map_iommu(meta, data,
- domain_num,
- partition_num,
- align,
- iova_length,
- flags);
-
- if (ret)
- goto out;
-
- kref_init(&data->ref);
- *iova = data->iova_addr;
- data->meta = meta;
-
- ion_iommu_add(meta, data);
-
- return data;
-
-out:
- kfree(data);
- return ERR_PTR(ret);
-}
-
-static struct ion_iommu_meta *ion_iommu_meta_create(struct ion_client *client,
- struct ion_handle *handle,
- struct sg_table *table,
- unsigned long size)
-{
- struct ion_iommu_meta *meta;
-
- meta = kzalloc(sizeof(*meta), GFP_KERNEL);
-
- if (!meta)
- return ERR_PTR(-ENOMEM);
-
- meta->handle = handle;
- meta->table = table;
- meta->size = size;
- meta->dbuf = ion_share_dma_buf(client, handle);
- kref_init(&meta->ref);
- mutex_init(&meta->lock);
- ion_iommu_meta_add(meta);
-
- return meta;
-}
-
-static void ion_iommu_meta_destroy(struct kref *kref)
-{
- struct ion_iommu_meta *meta = container_of(kref, struct ion_iommu_meta,
- ref);
-
-
- rb_erase(&meta->node, &iommu_root);
- dma_buf_put(meta->dbuf);
- kfree(meta);
-}
-
-static void ion_iommu_meta_put(struct ion_iommu_meta *meta)
-{
- /*
- * Need to lock here to prevent race against map/unmap
- */
- mutex_lock(&msm_iommu_map_mutex);
- kref_put(&meta->ref, ion_iommu_meta_destroy);
- mutex_unlock(&msm_iommu_map_mutex);
-}
-
-int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, ion_phys_addr_t *iova,
- unsigned long *buffer_size,
- unsigned long flags, unsigned long iommu_flags)
-{
- struct ion_iommu_map *iommu_map;
- struct ion_iommu_meta *iommu_meta = NULL;
- struct sg_table *table;
- struct scatterlist *sg;
- int ret = 0;
- int i;
- unsigned long size = 0;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return -EINVAL;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return -EINVAL;
- }
-
- table = ion_sg_table(client, handle);
-
- if (IS_ERR_OR_NULL(table))
- return PTR_ERR(table);
-
- for_each_sg(table->sgl, sg, table->nents, i)
- size += sg_dma_len(sg);
-
- if (!msm_use_iommu()) {
- unsigned long pa = sg_dma_address(table->sgl);
- if (pa == 0)
- pa = sg_phys(table->sgl);
- *iova = pa;
- *buffer_size = size;
- }
- /*
- * If clients don't want a custom iova length, just use whatever
- * the buffer size is
- */
- if (!iova_length)
- iova_length = size;
-
- if (size > iova_length) {
- pr_debug("%s: iova length %lx is not at least buffer size %lx\n",
- __func__, iova_length, size);
- ret = -EINVAL;
- goto out;
- }
-
- if (size & ~PAGE_MASK) {
- pr_debug("%s: buffer size %lx is not aligned to %lx", __func__,
- size, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- if (iova_length & ~PAGE_MASK) {
- pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
- iova_length, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- mutex_lock(&msm_iommu_map_mutex);
- iommu_meta = ion_iommu_meta_lookup(table);
-
- if (!iommu_meta)
- iommu_meta = ion_iommu_meta_create(client, handle, table, size);
- else
- kref_get(&iommu_meta->ref);
- BUG_ON(iommu_meta->size != size);
- mutex_unlock(&msm_iommu_map_mutex);
-
- mutex_lock(&iommu_meta->lock);
- iommu_map = ion_iommu_lookup(iommu_meta, domain_num, partition_num);
- if (!iommu_map) {
- iommu_map = __ion_iommu_map(iommu_meta, domain_num,
- partition_num, align, iova_length,
- flags, iova);
- if (!IS_ERR_OR_NULL(iommu_map)) {
- iommu_map->flags = iommu_flags;
- ret = 0;
- } else {
- ret = PTR_ERR(iommu_map);
- goto out_unlock;
- }
- } else {
- if (iommu_map->flags != iommu_flags) {
- pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
- __func__, handle,
- iommu_map->flags, iommu_flags);
- ret = -EINVAL;
- goto out_unlock;
- } else if (iommu_map->mapped_size != iova_length) {
- pr_err("%s: handle %p is already mapped with length %x, trying to map with length %lx\n",
- __func__, handle, iommu_map->mapped_size,
- iova_length);
- ret = -EINVAL;
- goto out_unlock;
- } else {
- kref_get(&iommu_map->ref);
- *iova = iommu_map->iova_addr;
- }
- }
- mutex_unlock(&iommu_meta->lock);
- *buffer_size = size;
- return ret;
-
-out_unlock:
- mutex_unlock(&iommu_meta->lock);
-out:
-
- ion_iommu_meta_put(iommu_meta);
- return ret;
-}
-EXPORT_SYMBOL(ion_map_iommu);
-
-
-static void ion_iommu_map_release(struct kref *kref)
-{
- struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
- ref);
- struct ion_iommu_meta *meta = map->meta;
-
- rb_erase(&map->node, &meta->iommu_maps);
- ion_iommu_heap_unmap_iommu(map);
- kfree(map);
-}
-
-void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num)
-{
- struct ion_iommu_map *iommu_map;
- struct ion_iommu_meta *meta;
- struct sg_table *table;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return;
- }
-
- table = ion_sg_table(client, handle);
-
- mutex_lock(&msm_iommu_map_mutex);
- meta = ion_iommu_meta_lookup(table);
- if (!meta) {
- WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
- domain_num, partition_num, handle);
- mutex_unlock(&msm_iommu_map_mutex);
- goto out;
-
- }
- mutex_unlock(&msm_iommu_map_mutex);
-
- mutex_lock(&meta->lock);
- iommu_map = ion_iommu_lookup(meta, domain_num, partition_num);
-
- if (!iommu_map) {
- WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
- domain_num, partition_num, handle);
- mutex_unlock(&meta->lock);
- goto out;
- }
-
- kref_put(&iommu_map->ref, ion_iommu_map_release);
- mutex_unlock(&meta->lock);
-
- ion_iommu_meta_put(meta);
-
-out:
- return;
-}
-EXPORT_SYMBOL(ion_unmap_iommu);
-
-
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
deleted file mode 100644
index 5fa2514628a..00000000000
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ /dev/null
@@ -1,1061 +0,0 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/export.h>
-#include <linux/err.h>
-#include <linux/msm_ion.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/sched.h>
-#include <linux/rwsem.h>
-#include <linux/uaccess.h>
-#include <linux/memblock.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-contiguous.h>
-#include <mach/msm_memtypes.h>
-#include <asm/cacheflush.h>
-#include "../ion_priv.h"
-#include "ion_cp_common.h"
-
-#define ION_COMPAT_STR "qcom,msm-ion"
-#define ION_COMPAT_MEM_RESERVE_STR "qcom,msm-ion-reserve"
-
-static struct ion_device *idev;
-static int num_heaps;
-static struct ion_heap **heaps;
-
-struct ion_heap_desc {
- unsigned int id;
- enum ion_heap_type type;
- const char *name;
- unsigned int permission_type;
-};
-
-
-#ifdef CONFIG_OF
-static struct ion_heap_desc ion_heap_meta[] = {
- {
- .id = ION_SYSTEM_HEAP_ID,
- .name = ION_SYSTEM_HEAP_NAME,
- },
- {
- .id = ION_SYSTEM_CONTIG_HEAP_ID,
- .name = ION_KMALLOC_HEAP_NAME,
- },
- {
- .id = ION_CP_MM_HEAP_ID,
- .name = ION_MM_HEAP_NAME,
- .permission_type = IPT_TYPE_MM_CARVEOUT,
- },
- {
- .id = ION_MM_FIRMWARE_HEAP_ID,
- .name = ION_MM_FIRMWARE_HEAP_NAME,
- },
- {
- .id = ION_CP_MFC_HEAP_ID,
- .name = ION_MFC_HEAP_NAME,
- .permission_type = IPT_TYPE_MFC_SHAREDMEM,
- },
- {
- .id = ION_SF_HEAP_ID,
- .name = ION_SF_HEAP_NAME,
- },
- {
- .id = ION_QSECOM_HEAP_ID,
- .name = ION_QSECOM_HEAP_NAME,
- },
- {
- .id = ION_AUDIO_HEAP_ID,
- .name = ION_AUDIO_HEAP_NAME,
- },
- {
- .id = ION_PIL1_HEAP_ID,
- .name = ION_PIL1_HEAP_NAME,
- },
- {
- .id = ION_PIL2_HEAP_ID,
- .name = ION_PIL2_HEAP_NAME,
- },
- {
- .id = ION_CP_WB_HEAP_ID,
- .name = ION_WB_HEAP_NAME,
- },
- {
- .id = ION_CAMERA_HEAP_ID,
- .name = ION_CAMERA_HEAP_NAME,
- },
- {
- .id = ION_ADSP_HEAP_ID,
- .name = ION_ADSP_HEAP_NAME,
- }
-};
-#endif
-
-struct ion_client *msm_ion_client_create(unsigned int heap_mask,
- const char *name)
-{
- /*
- * The assumption is that if there is a NULL device, the ion
- * driver has not yet probed.
- */
- if (idev == NULL)
- return ERR_PTR(-EPROBE_DEFER);
-
- if (IS_ERR(idev))
- return (struct ion_client *)idev;
-
- return ion_client_create(idev, name);
-}
-EXPORT_SYMBOL(msm_ion_client_create);
-
-int msm_ion_secure_heap(int heap_id)
-{
- return ion_secure_heap(idev, heap_id, ION_CP_V1, NULL);
-}
-EXPORT_SYMBOL(msm_ion_secure_heap);
-
-int msm_ion_unsecure_heap(int heap_id)
-{
- return ion_unsecure_heap(idev, heap_id, ION_CP_V1, NULL);
-}
-EXPORT_SYMBOL(msm_ion_unsecure_heap);
-
-int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
-{
- return ion_secure_heap(idev, heap_id, ION_CP_V2, (void *)usage);
-}
-EXPORT_SYMBOL(msm_ion_secure_heap_2_0);
-
-int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage)
-{
- return ion_unsecure_heap(idev, heap_id, ION_CP_V2, (void *)usage);
-}
-EXPORT_SYMBOL(msm_ion_unsecure_heap_2_0);
-
-int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
- void *vaddr, unsigned long len, unsigned int cmd)
-{
- return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
-}
-EXPORT_SYMBOL(msm_ion_do_cache_op);
-
-static int ion_no_pages_cache_ops(struct ion_client *client,
- struct ion_handle *handle,
- void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
- unsigned int size_to_vmap, total_size;
- int i, j, ret;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = 0;
- ion_phys_addr_t buff_phys_start = 0;
- size_t buf_length = 0;
-
- ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
- if (ret)
- return -EINVAL;
-
- buff_phys = buff_phys_start;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
- total_size = buf_length;
-
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (!outer_cache_op)
- return -EINVAL;
-
- outer_cache_op(buff_phys_start + offset,
- buff_phys_start + offset + length);
-
- return 0;
-}
-
-#ifdef CONFIG_OUTER_CACHE
-static void ion_pages_outer_cache_op(void (*op)(phys_addr_t, phys_addr_t),
- struct sg_table *table)
-{
- unsigned long pstart;
- struct scatterlist *sg;
- int i;
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- pstart = page_to_phys(page);
- /*
- * If page -> phys is returning NULL, something
- * has really gone wrong...
- */
- if (!pstart) {
- WARN(1, "Could not translate virtual address to physical address\n");
- return;
- }
- op(pstart, pstart + PAGE_SIZE);
- }
-}
-#else
-static void ion_pages_outer_cache_op(void (*op)(phys_addr_t, phys_addr_t),
- struct sg_table *table)
-{
-
-}
-#endif
-
-static int ion_pages_cache_ops(struct ion_client *client,
- struct ion_handle *handle,
- void *vaddr, unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
- struct sg_table *table = NULL;
-
- table = ion_sg_table(client, handle);
- if (IS_ERR_OR_NULL(table))
- return PTR_ERR(table);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, table->sgl,
- table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, table->sgl,
- table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, table->sgl,
- table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, table->sgl,
- table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- ion_pages_outer_cache_op(outer_cache_op, table);
-
- return 0;
-}
-
-int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
- void *uaddr, unsigned long offset, unsigned long len,
- unsigned int cmd)
-{
- int ret = -EINVAL;
- unsigned long flags;
- struct sg_table *table;
- struct page *page;
-
- ret = ion_handle_get_flags(client, handle, &flags);
- if (ret)
- return -EINVAL;
-
- if (!ION_IS_CACHED(flags))
- return 0;
-
- if (flags & ION_FLAG_SECURE)
- return 0;
-
- table = ion_sg_table(client, handle);
-
- if (IS_ERR_OR_NULL(table))
- return PTR_ERR(table);
-
- page = sg_page(table->sgl);
-
- if (page)
- ret = ion_pages_cache_ops(client, handle, uaddr,
- offset, len, cmd);
- else
- ret = ion_no_pages_cache_ops(client, handle, uaddr,
- offset, len, cmd);
-
- return ret;
-
-}
-
-static void msm_ion_allocate(struct ion_platform_heap *heap)
-{
-
- if (!heap->base && heap->extra_data) {
- WARN(1, "Specifying carveout heaps without a base is deprecated. Convert to the DMA heap type instead");
- return;
- }
-}
-
-static int is_heap_overlapping(const struct ion_platform_heap *heap1,
- const struct ion_platform_heap *heap2)
-{
- ion_phys_addr_t heap1_base = heap1->base;
- ion_phys_addr_t heap2_base = heap2->base;
- ion_phys_addr_t heap1_end = heap1->base + heap1->size - 1;
- ion_phys_addr_t heap2_end = heap2->base + heap2->size - 1;
-
- if (heap1_base == heap2_base)
- return 1;
- if (heap1_base < heap2_base && heap1_end >= heap2_base)
- return 1;
- if (heap2_base < heap1_base && heap2_end >= heap1_base)
- return 1;
- return 0;
-}
-
-static void check_for_heap_overlap(const struct ion_platform_heap heap_list[],
- unsigned long nheaps)
-{
- unsigned long i;
- unsigned long j;
-
- for (i = 0; i < nheaps; ++i) {
- const struct ion_platform_heap *heap1 = &heap_list[i];
- if (!heap1->base)
- continue;
- for (j = i + 1; j < nheaps; ++j) {
- const struct ion_platform_heap *heap2 = &heap_list[j];
- if (!heap2->base)
- continue;
- if (is_heap_overlapping(heap1, heap2)) {
- panic("Memory in heap %s overlaps with heap %s\n",
- heap1->name, heap2->name);
- }
- }
- }
-}
-
-#ifdef CONFIG_OF
-static int msm_init_extra_data(struct device_node *node,
- struct ion_platform_heap *heap,
- const struct ion_heap_desc *heap_desc)
-{
- int ret = 0;
-
- switch ((int) heap->type) {
- case ION_HEAP_TYPE_CP:
- {
- heap->extra_data = kzalloc(sizeof(struct ion_cp_heap_pdata),
- GFP_KERNEL);
- if (!heap->extra_data) {
- ret = -ENOMEM;
- } else {
- struct ion_cp_heap_pdata *extra = heap->extra_data;
- extra->permission_type = heap_desc->permission_type;
- }
- break;
- }
- case ION_HEAP_TYPE_CARVEOUT:
- {
- heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata),
- GFP_KERNEL);
- if (!heap->extra_data)
- ret = -ENOMEM;
- break;
- }
- case ION_HEAP_TYPE_SECURE_DMA:
- {
- unsigned int val;
-
- ret = of_property_read_u32(node,
- "qcom,default-prefetch-size", &val);
-
- if (!ret) {
- heap->extra_data = kzalloc(sizeof(struct ion_cma_pdata),
- GFP_KERNEL);
-
- if (!heap->extra_data) {
- ret = -ENOMEM;
- } else {
- struct ion_cma_pdata *extra = heap->extra_data;
- extra->default_prefetch_size = val;
- }
- } else {
- ret = 0;
- }
- break;
- }
- default:
- heap->extra_data = 0;
- break;
- }
- return ret;
-}
-
-#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
- .heap_type = ION_HEAP_TYPE_##h, }
-
-static struct heap_types_info {
- const char *name;
- int heap_type;
-} heap_types_info[] = {
- MAKE_HEAP_TYPE_MAPPING(SYSTEM),
- MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
- MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
- MAKE_HEAP_TYPE_MAPPING(CHUNK),
- MAKE_HEAP_TYPE_MAPPING(DMA),
- MAKE_HEAP_TYPE_MAPPING(CP),
- MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
- MAKE_HEAP_TYPE_MAPPING(REMOVED),
-};
-
-static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
- int *heap_type)
-{
- const char *name;
- int i, ret = -EINVAL;
- ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
- if (ret)
- goto out;
- for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
- if (!strcmp(heap_types_info[i].name, name)) {
- *heap_type = heap_types_info[i].heap_type;
- ret = 0;
- goto out;
- }
- }
- WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
- name, __FILE__);
-out:
- return ret;
-}
-
-static int msm_ion_populate_heap(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- unsigned int i;
- int ret = -EINVAL, heap_type = -1;
- unsigned int len = ARRAY_SIZE(ion_heap_meta);
- for (i = 0; i < len; ++i) {
- if (ion_heap_meta[i].id == heap->id) {
- heap->name = ion_heap_meta[i].name;
- ret = msm_ion_get_heap_type_from_dt_node(node,
- &heap_type);
- if (ret)
- break;
- heap->type = heap_type;
- ret = msm_init_extra_data(node, heap,
- &ion_heap_meta[i]);
- break;
- }
- }
- if (ret)
- pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
- return ret;
-}
-
-static void free_pdata(const struct ion_platform_data *pdata)
-{
- unsigned int i;
- for (i = 0; i < pdata->nr; ++i)
- kfree(pdata->heaps[i].extra_data);
- kfree(pdata->heaps);
- kfree(pdata);
-}
-
-static void msm_ion_get_heap_align(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- unsigned int val;
-
- int ret = of_property_read_u32(node, "qcom,heap-align", &val);
- if (!ret) {
- switch ((int) heap->type) {
- case ION_HEAP_TYPE_CP:
- {
- struct ion_cp_heap_pdata *extra =
- heap->extra_data;
- extra->align = val;
- break;
- }
- case ION_HEAP_TYPE_CARVEOUT:
- {
- struct ion_co_heap_pdata *extra =
- heap->extra_data;
- extra->align = val;
- break;
- }
- default:
- pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n",
- heap->name);
- break;
- }
- }
-}
-
-static int msm_ion_get_heap_size(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- unsigned int val;
- int ret = 0;
- u32 out_values[2];
- struct device_node *pnode;
-
- ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
- if (!ret)
- heap->size = val;
-
- ret = of_property_read_u32_array(node, "qcom,memory-fixed",
- out_values, 2);
- if (!ret) {
- heap->size = out_values[1];
- goto out;
- }
-
- pnode = of_parse_phandle(node, "linux,contiguous-region", 0);
- if (pnode != NULL) {
- const u32 *addr;
- u64 size;
-
- addr = of_get_address(pnode, 0, &size, NULL);
- if (!addr) {
- of_node_put(pnode);
- ret = -EINVAL;
- goto out;
- }
- heap->size = (u32) size;
- ret = 0;
- of_node_put(pnode);
- }
-
- ret = 0;
-out:
- return ret;
-}
-
-static void msm_ion_get_heap_base(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- u32 out_values[2];
- int ret = 0;
- struct device_node *pnode;
-
- ret = of_property_read_u32_array(node, "qcom,memory-fixed",
- out_values, 2);
- if (!ret)
- heap->base = out_values[0];
-
- pnode = of_parse_phandle(node, "linux,contiguous-region", 0);
- if (pnode != NULL) {
- heap->base = cma_get_base(heap->priv);
- of_node_put(pnode);
- }
-
- return;
-}
-
-static void msm_ion_get_heap_adjacent(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- unsigned int val;
- int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val);
- if (!ret) {
- switch (heap->type) {
- case ION_HEAP_TYPE_CARVEOUT:
- {
- struct ion_co_heap_pdata *extra = heap->extra_data;
- extra->adjacent_mem_id = val;
- break;
- }
- default:
- pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n",
- heap->name);
- break;
- }
- } else {
- switch (heap->type) {
- case ION_HEAP_TYPE_CARVEOUT:
- {
- struct ion_co_heap_pdata *extra = heap->extra_data;
- extra->adjacent_mem_id = INVALID_HEAP_ID;
- break;
- }
- default:
- break;
- }
- }
-}
-
-static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
-{
- struct ion_platform_data *pdata = 0;
- struct ion_platform_heap *heaps = NULL;
- struct device_node *node;
- struct platform_device *new_dev = NULL;
- const struct device_node *dt_node = pdev->dev.of_node;
- uint32_t val = 0;
- int ret = 0;
- uint32_t num_heaps = 0;
- int idx = 0;
-
- for_each_child_of_node(dt_node, node)
- num_heaps++;
-
- if (!num_heaps)
- return ERR_PTR(-EINVAL);
-
- pdata = kzalloc(sizeof(struct ion_platform_data), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- heaps = kzalloc(sizeof(struct ion_platform_heap)*num_heaps, GFP_KERNEL);
- if (!heaps) {
- kfree(pdata);
- return ERR_PTR(-ENOMEM);
- }
-
- pdata->heaps = heaps;
- pdata->nr = num_heaps;
-
- for_each_child_of_node(dt_node, node) {
- new_dev = of_platform_device_create(node, NULL, &pdev->dev);
- if (!new_dev) {
- pr_err("Failed to create device %s\n", node->name);
- goto free_heaps;
- }
-
- pdata->heaps[idx].priv = &new_dev->dev;
- /**
- * TODO: Replace this with of_get_address() when this patch
- * gets merged: http://
- * permalink.gmane.org/gmane.linux.drivers.devicetree/18614
- */
- ret = of_property_read_u32(node, "reg", &val);
- if (ret) {
- pr_err("%s: Unable to find reg key", __func__);
- goto free_heaps;
- }
- pdata->heaps[idx].id = val;
-
- ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
- if (ret)
- goto free_heaps;
-
- msm_ion_get_heap_base(node, &pdata->heaps[idx]);
- msm_ion_get_heap_align(node, &pdata->heaps[idx]);
-
- ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
- if (ret)
- goto free_heaps;
-
- msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]);
-
- ++idx;
- }
- return pdata;
-
-free_heaps:
- free_pdata(pdata);
- return ERR_PTR(ret);
-}
-#else
-static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
-{
- return NULL;
-}
-
-static void free_pdata(const struct ion_platform_data *pdata)
-{
-
-}
-#endif
-
-static int check_vaddr_bounds(unsigned long start, unsigned long end)
-{
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *vma;
- int ret = 1;
-
- if (end < start)
- goto out;
-
- vma = find_vma(mm, start);
- if (vma && vma->vm_start < end) {
- if (start < vma->vm_start)
- goto out;
- if (end > vma->vm_end)
- goto out;
- ret = 0;
- }
-
-out:
- return ret;
-}
-
-int ion_heap_allow_secure_allocation(enum ion_heap_type type)
-{
- return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP) ||
- type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA);
-}
-
-int ion_heap_allow_handle_secure(enum ion_heap_type type)
-{
- return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP) ||
- type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA);
-}
-
-int ion_heap_allow_heap_secure(enum ion_heap_type type)
-{
- return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP);
-}
-
-static long msm_ion_custom_ioctl(struct ion_client *client,
- unsigned int cmd,
- unsigned long arg)
-{
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- case ION_IOC_INV_CACHES:
- case ION_IOC_CLEAN_INV_CACHES:
- {
- struct ion_flush_data data;
- unsigned long start, end;
- struct ion_handle *handle = NULL;
- int ret;
- struct mm_struct *mm = current->active_mm;
-
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_flush_data)))
- return -EFAULT;
-
- if (data.handle > 0) {
- handle = ion_handle_get_by_id(client, (int)data.handle);
- if (IS_ERR(handle)) {
- pr_info("%s: Could not find handle: %d\n",
- __func__, (int)data.handle);
- return PTR_ERR(handle);
- }
- } else {
- handle = ion_import_dma_buf(client, data.fd);
- if (IS_ERR(handle)) {
- pr_info("%s: Could not import handle: %p\n",
- __func__, handle);
- return -EINVAL;
- }
- }
-
- down_read(&mm->mmap_sem);
-
- start = (unsigned long) data.vaddr;
- end = (unsigned long) data.vaddr + data.length;
-
- if (start && check_vaddr_bounds(start, end)) {
- pr_err("%s: virtual address %p is out of bounds\n",
- __func__, data.vaddr);
- ret = -EINVAL;
- } else {
- ret = ion_do_cache_op(client, handle, data.vaddr,
- data.offset, data.length, cmd);
- }
- up_read(&mm->mmap_sem);
-
- ion_free(client, handle);
-
- if (ret < 0)
- return ret;
- break;
- }
- case ION_IOC_PREFETCH:
- {
- struct ion_prefetch_data data;
-
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_prefetch_data)))
- return -EFAULT;
-
- ion_walk_heaps(client, data.heap_id, (void *)data.len,
- ion_secure_cma_prefetch);
- break;
- }
- case ION_IOC_DRAIN:
- {
- struct ion_prefetch_data data;
-
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_prefetch_data)))
- return -EFAULT;
-
- ion_walk_heaps(client, data.heap_id, (void *)data.len,
- ion_secure_cma_drain_pool);
- break;
- }
-
- default:
- return -ENOTTY;
- }
- return 0;
-}
-
-static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_heap *heap = NULL;
-
- switch ((int)heap_data->type) {
- case ION_HEAP_TYPE_CP:
- heap = ion_cp_heap_create(heap_data);
- break;
-#ifdef CONFIG_CMA
- case ION_HEAP_TYPE_DMA:
- heap = ion_cma_heap_create(heap_data);
- break;
-
- case ION_HEAP_TYPE_SECURE_DMA:
- heap = ion_secure_cma_heap_create(heap_data);
- break;
-#endif
- case ION_HEAP_TYPE_REMOVED:
- heap = ion_removed_heap_create(heap_data);
- break;
-
- default:
- heap = ion_heap_create(heap_data);
- }
-
- if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
- __func__, heap_data->name, heap_data->type,
- &heap_data->base, heap_data->size);
- return ERR_PTR(-EINVAL);
- }
-
- heap->name = heap_data->name;
- heap->id = heap_data->id;
- heap->priv = heap_data->priv;
- return heap;
-}
-
-static void msm_ion_heap_destroy(struct ion_heap *heap)
-{
- if (!heap)
- return;
-
- switch ((int)heap->type) {
- case ION_HEAP_TYPE_CP:
- ion_cp_heap_destroy(heap);
- break;
-#ifdef CONFIG_CMA
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SECURE_DMA:
- ion_secure_cma_heap_destroy(heap);
- break;
-#endif
- case ION_HEAP_TYPE_REMOVED:
- ion_removed_heap_destroy(heap);
- break;
- default:
- ion_heap_destroy(heap);
- }
-}
-
-static int msm_ion_probe(struct platform_device *pdev)
-{
- static struct ion_device *new_dev;
- struct ion_platform_data *pdata;
- unsigned int pdata_needs_to_be_freed;
- int err = -1;
- int i;
- if (pdev->dev.of_node) {
- pdata = msm_ion_parse_dt(pdev);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto out;
- }
- pdata_needs_to_be_freed = 1;
- } else {
- pdata = pdev->dev.platform_data;
- pdata_needs_to_be_freed = 0;
- }
-
- num_heaps = pdata->nr;
-
- heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
-
- if (!heaps) {
- err = -ENOMEM;
- goto out;
- }
-
- new_dev = ion_device_create(msm_ion_custom_ioctl);
- if (IS_ERR_OR_NULL(new_dev)) {
- /*
- * set this to the ERR to indicate to the clients
- * that Ion failed to probe.
- */
- idev = new_dev;
- err = PTR_ERR(new_dev);
- goto freeheaps;
- }
-
- /* create the heaps as specified in the board file */
- for (i = 0; i < num_heaps; i++) {
- struct ion_platform_heap *heap_data = &pdata->heaps[i];
- msm_ion_allocate(heap_data);
-
- heap_data->has_outer_cache = pdata->has_outer_cache;
- heaps[i] = msm_ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- heaps[i] = 0;
- continue;
- } else {
- if (heap_data->size)
- pr_info("ION heap %s created at %pa with size %zx\n",
- heap_data->name,
- &heap_data->base,
- heap_data->size);
- else
- pr_info("ION heap %s created\n",
- heap_data->name);
- }
-
- ion_device_add_heap(new_dev, heaps[i]);
- }
- check_for_heap_overlap(pdata->heaps, num_heaps);
- if (pdata_needs_to_be_freed)
- free_pdata(pdata);
-
- platform_set_drvdata(pdev, new_dev);
- /*
- * intentionally set this at the very end to allow probes to be deferred
- * completely until Ion is setup
- */
- idev = new_dev;
- return 0;
-
-freeheaps:
- kfree(heaps);
- if (pdata_needs_to_be_freed)
- free_pdata(pdata);
-out:
- return err;
-}
-
-static int msm_ion_remove(struct platform_device *pdev)
-{
- struct ion_device *idev = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < num_heaps; i++)
- msm_ion_heap_destroy(heaps[i]);
-
- ion_device_destroy(idev);
- kfree(heaps);
- return 0;
-}
-
-static struct of_device_id msm_ion_match_table[] = {
- {.compatible = ION_COMPAT_STR},
- {},
-};
-EXPORT_COMPAT(ION_COMPAT_MEM_RESERVE_STR);
-
-static struct platform_driver msm_ion_driver = {
- .probe = msm_ion_probe,
- .remove = msm_ion_remove,
- .driver = {
- .name = "ion-msm",
- .of_match_table = msm_ion_match_table,
- },
-};
-
-static int __init msm_ion_init(void)
-{
- return platform_driver_register(&msm_ion_driver);
-}
-
-static void __exit msm_ion_exit(void)
-{
- platform_driver_unregister(&msm_ion_driver);
-}
-
-subsys_initcall(msm_ion_init);
-module_exit(msm_ion_exit);
-
diff --git a/drivers/gpu/ion/msm/secure_buffer.c b/drivers/gpu/ion/msm/secure_buffer.c
deleted file mode 100644
index c492ec3245e..00000000000
--- a/drivers/gpu/ion/msm/secure_buffer.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Copyright (C) 2011 Google, Inc
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/highmem.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/msm_ion.h>
-#include <linux/mutex.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-#include <mach/scm.h>
-
-static struct rb_root secure_root;
-DEFINE_MUTEX(secure_buffer_mutex);
-
-struct secure_meta {
- struct rb_node node;
- struct sg_table *table;
- struct kref ref;
- enum cp_mem_usage usage;
-};
-
-struct cp2_mem_chunks {
- unsigned int *chunk_list;
- unsigned int chunk_list_size;
- unsigned int chunk_size;
-} __attribute__ ((__packed__));
-
-struct cp2_lock_req {
- struct cp2_mem_chunks chunks;
- unsigned int mem_usage;
- unsigned int lock;
-} __attribute__ ((__packed__));
-
-#define MEM_PROTECT_LOCK_ID2 0x0A
-#define V2_CHUNK_SIZE SZ_1M
-#define FEATURE_ID_CP 12
-
-static void secure_meta_add(struct secure_meta *meta)
-{
- struct rb_root *root = &secure_root;
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct secure_meta *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct secure_meta, node);
-
- if (meta->table < entry->table) {
- p = &(*p)->rb_left;
- } else if (meta->table > entry->table) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: table %p already exists\n", __func__,
- entry->table);
- BUG();
- }
- }
-
- rb_link_node(&meta->node, parent, p);
- rb_insert_color(&meta->node, root);
-}
-
-
-static struct secure_meta *secure_meta_lookup(struct sg_table *table)
-{
- struct rb_root *root = &secure_root;
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct secure_meta *entry = NULL;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct secure_meta, node);
-
- if (table < entry->table)
- p = &(*p)->rb_left;
- else if (table > entry->table)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- return NULL;
-}
-
-
-static int secure_buffer_change_chunk(unsigned long chunks,
- unsigned int nchunks,
- unsigned int chunk_size,
- enum cp_mem_usage usage,
- int lock)
-{
- struct cp2_lock_req request;
- u32 resp;
-
- request.mem_usage = usage;
- request.lock = lock;
-
- request.chunks.chunk_list = (unsigned int *)chunks;
- request.chunks.chunk_list_size = nchunks;
- request.chunks.chunk_size = chunk_size;
-
- kmap_flush_unused();
- kmap_atomic_flush_unused();
- return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
- &request, sizeof(request), &resp, sizeof(resp));
-
-}
-
-
-
-static int secure_buffer_change_table(struct sg_table *table,
- enum cp_mem_usage usage,
- int lock)
-{
- int i;
- int ret = -EINVAL;
- unsigned long *chunk_list;
- struct scatterlist *sg;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- int nchunks;
- int size = sg_dma_len(sg);
- int chunk_list_len;
- phys_addr_t chunk_list_phys;
-
- /*
- * This should theoretically be a phys_addr_t but the protocol
- * indicates this should be an unsigned long.
- */
- unsigned long base = (unsigned long)sg_dma_address(sg);
-
- nchunks = size / V2_CHUNK_SIZE;
- chunk_list_len = sizeof(unsigned long)*nchunks;
-
- chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
-
- if (!chunk_list)
- return -ENOMEM;
-
- chunk_list_phys = virt_to_phys(chunk_list);
- for (i = 0; i < nchunks; i++)
- chunk_list[i] = base + i * V2_CHUNK_SIZE;
-
- /*
- * Flush the chunk list before sending the memory to the
- * secure environment to ensure the data is actually present
- * in RAM
- */
- dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
- outer_flush_range(chunk_list_phys,
- chunk_list_phys + chunk_list_len);
-
- ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
- nchunks, V2_CHUNK_SIZE, usage, lock);
-
- kfree(chunk_list);
- }
-
- return ret;
-}
-
-int msm_ion_secure_table(struct sg_table *table, enum cp_mem_usage usage,
- int flags)
-{
- struct secure_meta *meta;
- int ret;
-
- mutex_lock(&secure_buffer_mutex);
- meta = secure_meta_lookup(table);
-
- if (meta) {
- kref_get(&meta->ref);
- ret = 0;
- } else {
- meta = kzalloc(sizeof(*meta), GFP_KERNEL);
-
- if (!meta) {
- ret = -ENOMEM;
- goto out;
- }
-
- meta->table = table;
- meta->usage = usage;
- kref_init(&meta->ref);
-
- ret = secure_buffer_change_table(table, usage, 1);
- if (!ret)
- secure_meta_add(meta);
- else
- kfree(meta);
- }
-out:
- mutex_unlock(&secure_buffer_mutex);
-
- return ret;
-
-}
-
-int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle,
- enum cp_mem_usage usage, int flags)
-{
- struct sg_table *table;
- int ret;
-
- table = ion_sg_table(client, handle);
-
- if (IS_ERR_OR_NULL(table)) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = msm_ion_secure_table(table, usage, flags);
-out:
- return ret;
-}
-
-
-static void msm_secure_buffer_release(struct kref *kref)
-{
- struct secure_meta *meta = container_of(kref, struct secure_meta,
- ref);
-
- rb_erase(&meta->node, &secure_root);
- secure_buffer_change_table(meta->table, meta->usage, 0);
- kfree(meta);
-}
-
-int msm_ion_unsecure_table(struct sg_table *table)
-{
- struct secure_meta *meta;
- int ret = 0;
-
- mutex_lock(&secure_buffer_mutex);
- meta = secure_meta_lookup(table);
-
- if (!meta) {
- ret = -EINVAL;
- goto out;
- }
-
- kref_put(&meta->ref, msm_secure_buffer_release);
-
-out:
- mutex_unlock(&secure_buffer_mutex);
- return ret;
-
-}
-
-int msm_ion_unsecure_buffer(struct ion_client *client,
- struct ion_handle *handle)
-{
- struct sg_table *table;
- int ret = 0;
-
- table = ion_sg_table(client, handle);
-
- if (IS_ERR_OR_NULL(table)) {
- WARN(1, "Could not get table for handle %p to unsecure\n",
- handle);
- ret = -EINVAL;
- goto out;
- }
-
- msm_ion_unsecure_table(table);
-
-out:
- return ret;
-}
-
-#define MAKE_CP_VERSION(major, minor, patch) \
- (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
-
-bool msm_secure_v2_is_supported(void)
-{
- int version = scm_get_feat_version(FEATURE_ID_CP);
-
- /*
- * if the version is < 1.1.0 then dynamic buffer allocation is
- * not supported
- */
- return version >= MAKE_CP_VERSION(1, 1, 0);
-}
diff --git a/drivers/gpu/ion/msm_ion_priv.h b/drivers/gpu/ion/msm_ion_priv.h
deleted file mode 100644
index 3225f1a6506..00000000000
--- a/drivers/gpu/ion/msm_ion_priv.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * drivers/gpu/ion/ion_priv.h
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _MSM_ION_PRIV_H
-#define _MSM_ION_PRIV_H
-
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/ion.h>
-#include <linux/iommu.h>
-#include <linux/seq_file.h>
-
-/**
- * struct mem_map_data - represents information about the memory map for a heap
- * @node: list node used to store in the list of mem_map_data
- * @addr: start address of memory region.
- * @addr: end address of memory region.
- * @size: size of memory region
- * @client_name: name of the client who owns this buffer.
- *
- */
-struct mem_map_data {
- struct list_head node;
- ion_phys_addr_t addr;
- ion_phys_addr_t addr_end;
- unsigned long size;
- const char *client_name;
-};
-
-struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
-void ion_iommu_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
-void ion_cp_heap_destroy(struct ion_heap *);
-
-#ifdef CONFIG_CMA
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
-void ion_cma_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *);
-void ion_secure_cma_heap_destroy(struct ion_heap *);
-
-int ion_secure_cma_prefetch(struct ion_heap *heap, void *data);
-
-int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused);
-
-#else
-static inline int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
-{
- return -ENODEV;
-}
-
-static inline int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
-{
- return -ENODEV;
-}
-
-
-
-#endif
-
-struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *);
-void ion_removed_heap_destroy(struct ion_heap *);
-
-#define ION_CP_ALLOCATE_FAIL -1
-#define ION_RESERVED_ALLOCATE_FAIL -1
-
-/**
- * ion_do_cache_op - do cache operations.
- *
- * @client - pointer to ION client.
- * @handle - pointer to buffer handle.
- * @uaddr - virtual address to operate on.
- * @offset - offset from physical address.
- * @len - Length of data to do cache operation on.
- * @cmd - Cache operation to perform:
- * ION_IOC_CLEAN_CACHES
- * ION_IOC_INV_CACHES
- * ION_IOC_CLEAN_INV_CACHES
- *
- * Returns 0 on success
- */
-int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
- void *uaddr, unsigned long offset, unsigned long len,
- unsigned int cmd);
-
-void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
- unsigned long *size);
-
-void ion_mem_map_show(struct ion_heap *heap);
-
-int ion_heap_allow_secure_allocation(enum ion_heap_type type);
-
-int ion_heap_allow_heap_secure(enum ion_heap_type type);
-
-int ion_heap_allow_handle_secure(enum ion_heap_type type);
-
-/**
- * ion_create_chunked_sg_table - helper function to create sg table
- * with specified chunk size
- * @buffer_base: The starting address used for the sg dma address
- * @chunk_size: The size of each entry in the sg table
- * @total_size: The total size of the sg table (i.e. the sum of the
- * entries). This will be rounded up to the nearest
- * multiple of `chunk_size'
- */
-struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
- size_t chunk_size, size_t total_size);
-#endif /* _MSM_ION_PRIV_H */
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile
deleted file mode 100644
index 11cd003fb08..00000000000
--- a/drivers/gpu/ion/tegra/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += tegra_ion.o
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
deleted file mode 100644
index 7af6e168ff4..00000000000
--- a/drivers/gpu/ion/tegra/tegra_ion.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * drivers/gpu/tegra/tegra_ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include "../ion_priv.h"
-
-struct ion_device *idev;
-struct ion_mapper *tegra_user_mapper;
-int num_heaps;
-struct ion_heap **heaps;
-
-int tegra_ion_probe(struct platform_device *pdev)
-{
- struct ion_platform_data *pdata = pdev->dev.platform_data;
- int err;
- int i;
-
- num_heaps = pdata->nr;
-
- heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
-
- idev = ion_device_create(NULL);
- if (IS_ERR_OR_NULL(idev)) {
- kfree(heaps);
- return PTR_ERR(idev);
- }
-
- /* create the heaps as specified in the board file */
- for (i = 0; i < num_heaps; i++) {
- struct ion_platform_heap *heap_data = &pdata->heaps[i];
-
- heaps[i] = ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err;
- }
- ion_device_add_heap(idev, heaps[i]);
- }
- platform_set_drvdata(pdev, idev);
- return 0;
-err:
- for (i = 0; i < num_heaps; i++) {
- if (heaps[i])
- ion_heap_destroy(heaps[i]);
- }
- kfree(heaps);
- return err;
-}
-
-int tegra_ion_remove(struct platform_device *pdev)
-{
- struct ion_device *idev = platform_get_drvdata(pdev);
- int i;
-
- ion_device_destroy(idev);
- for (i = 0; i < num_heaps; i++)
- ion_heap_destroy(heaps[i]);
- kfree(heaps);
- return 0;
-}
-
-static struct platform_driver ion_driver = {
- .probe = tegra_ion_probe,
- .remove = tegra_ion_remove,
- .driver = { .name = "ion-tegra" }
-};
-
-static int __init ion_init(void)
-{
- return platform_driver_register(&ion_driver);
-}
-
-static void __exit ion_exit(void)
-{
- platform_driver_unregister(&ion_driver);
-}
-
-module_init(ion_init);
-module_exit(ion_exit);
-
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 46ac589c78b..35ab19c634c 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1162,9 +1162,11 @@ static int adreno_iommu_setstate(struct kgsl_device *device,
* after the command has been retired
*/
if (result)
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+ kgsl_mmu_disable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
else
- kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts,
+ KGSL_IOMMU_CONTEXT_USER);
done:
kgsl_context_put(context);
@@ -1449,6 +1451,12 @@ static int adreno_of_get_pdata(struct platform_device *pdev)
if (ret)
goto err;
+ /* get pm_qos from target, set it to default if not found */
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,pm_qos_latency",
+ &pdata->pm_qos_latency))
+ pdata->pm_qos_latency = 501;
+
+
if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
&pdata->idle_timeout))
pdata->idle_timeout = HZ/12;
diff --git a/drivers/gpu/msm/adreno_a4xx_snapshot.c b/drivers/gpu/msm/adreno_a4xx_snapshot.c
index 3655325305f..cc6a37b8c1e 100644
--- a/drivers/gpu/msm/adreno_a4xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a4xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -326,6 +326,7 @@ void *a4xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
goto skip_regs;
}
if (kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_PRIV)) {
+ kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
KGSL_CORE_ERR("Failed to turn on iommu priv context clocks\n");
goto skip_regs;
}
@@ -333,6 +334,9 @@ void *a4xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
snapshot = kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
kgsl_snapshot_dump_regs, &list);
+
+ kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_PRIV);
skip_regs:
snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
remain,
@@ -399,7 +403,5 @@ skip_regs:
kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2,
clock_ctl2);
- /* This will only disable the clock if no one else turned on */
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, 0);
return snapshot;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 68d766ae716..aac609f3a06 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -652,7 +652,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
/* Add two dwords for the CP_INTERRUPT */
- total_sizedwords += drawctxt ? 2 : 0;
+ total_sizedwords +=
+ (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) ? 2 : 0;
/* context rollover */
if (adreno_is_a3xx(adreno_dev))
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index a791d67c8e0..5454498f6db 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -457,7 +457,7 @@ done:
* Disables iommu clocks
* Return - void
*/
-static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
+static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
{
struct kgsl_iommu *iommu = mmu->priv;
struct msm_iommu_drvdata *iommu_drvdata;
@@ -466,8 +466,15 @@ static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
- if (!iommu_unit->dev[j].clk_enabled)
+ if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
+ atomic_dec(&iommu_unit->dev[j].clk_enable_count);
+ BUG_ON(
+ atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0);
+ /*
+ * the clock calls have a refcount so call them on every
+ * enable/disable call
+ */
iommu_drvdata = dev_get_drvdata(
iommu_unit->dev[j].dev->parent);
if (iommu_drvdata->aclk)
@@ -475,7 +482,6 @@ static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
if (iommu_drvdata->clk)
clk_disable_unprepare(iommu_drvdata->clk);
clk_disable_unprepare(iommu_drvdata->pclk);
- iommu_unit->dev[j].clk_enabled = false;
}
}
}
@@ -496,32 +502,14 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
unsigned int id, unsigned int ts,
u32 type)
{
- struct kgsl_mmu *mmu = data;
- struct kgsl_iommu *iommu = mmu->priv;
-
- if (!iommu->clk_event_queued) {
- if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
- KGSL_DRV_ERR(device,
- "IOMMU disable clock event being cancelled, "
- "iommu_last_cmd_ts: %x, retired ts: %x\n",
- iommu->iommu_last_cmd_ts, ts);
- return;
- }
+ struct kgsl_iommu_disable_clk_param *param = data;
- if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
- kgsl_iommu_disable_clk(mmu);
- iommu->clk_event_queued = false;
- } else {
- /* add new event to fire when ts is reached, this can happen
- * if we queued an event and someone requested the clocks to
- * be disbaled on a later timestamp */
- if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
- kgsl_iommu_clk_disable_event, mmu, mmu)) {
- KGSL_DRV_ERR(device,
- "Failed to add IOMMU disable clk event\n");
- iommu->clk_event_queued = false;
- }
- }
+ if ((0 <= timestamp_cmp(ts, param->ts)) ||
+ (KGSL_EVENT_CANCELLED == type))
+ kgsl_iommu_disable_clk(param->mmu, param->ctx_id);
+ else
+ /* something went wrong with the event handling mechanism */
+ BUG_ON(1);
}
/*
@@ -531,6 +519,8 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
* @ts_valid - Indicates whether ts parameter is valid, if this parameter
* is false then it means that the calling function wants to disable the
* IOMMU clocks immediately without waiting for any timestamp
+ * @ctx_id: Context id of the IOMMU context for which clocks are to be
+ * turned off
*
* Creates an event to disable the IOMMU clocks on timestamp and if event
* already exists then updates the timestamp of disabling the IOMMU clocks
@@ -539,28 +529,25 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
* Return - void
*/
static void
-kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
- bool ts_valid)
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+ unsigned int ts, int ctx_id)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_disable_clk_param *param;
- if (iommu->clk_event_queued) {
- if (ts_valid && (0 <
- timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
- iommu->iommu_last_cmd_ts = ts;
- } else {
- if (ts_valid) {
- iommu->iommu_last_cmd_ts = ts;
- iommu->clk_event_queued = true;
- if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
- ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
- KGSL_DRV_ERR(mmu->device,
- "Failed to add IOMMU disable clk event\n");
- iommu->clk_event_queued = false;
- }
- } else {
- kgsl_iommu_disable_clk(mmu);
- }
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*param));
+ return;
+ }
+ param->mmu = mmu;
+ param->ctx_id = ctx_id;
+ param->ts = ts;
+
+ if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+ ts, kgsl_iommu_clk_disable_event, param, mmu)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Failed to add IOMMU disable clk event\n");
+ kfree(param);
}
}
@@ -583,8 +570,7 @@ static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
- if (iommu_unit->dev[j].clk_enabled ||
- ctx_id != iommu_unit->dev[j].ctx_id)
+ if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
iommu_drvdata =
dev_get_drvdata(iommu_unit->dev[j].dev->parent);
@@ -610,12 +596,25 @@ static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
goto done;
}
}
- iommu_unit->dev[j].clk_enabled = true;
+ atomic_inc(&iommu_unit->dev[j].clk_enable_count);
}
}
done:
- if (ret)
- kgsl_iommu_disable_clk(mmu);
+ if (ret) {
+ struct kgsl_iommu_unit *iommu_unit;
+ if (iommu->unit_count == i)
+ i--;
+ iommu_unit = &iommu->iommu_units[i];
+ do {
+ for (j--; j >= 0; j--)
+ kgsl_iommu_disable_clk(mmu, ctx_id);
+ i--;
+ if (i >= 0) {
+ iommu_unit = &iommu->iommu_units[i];
+ j = iommu_unit->dev_count;
+ }
+ } while (i >= 0);
+ }
return ret;
}
@@ -842,6 +841,9 @@ static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
ret = -EINVAL;
goto done;
}
+ atomic_set(
+ &(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count),
+ 0);
iommu_unit->dev[iommu_unit->dev_count].dev =
msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@@ -1625,6 +1627,7 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
}
status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
if (status) {
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
KGSL_CORE_ERR("clk enable failed\n");
goto done;
}
@@ -1670,14 +1673,11 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
KGSL_IOMMU_SETSTATE_NOP_OFFSET,
cp_nop_packet(1), sizeof(unsigned int));
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
mmu->flags |= KGSL_FLAGS_STARTED;
done:
- if (status) {
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
- kgsl_detach_pagetable_iommu_domain(mmu);
- }
return status;
}
@@ -1800,6 +1800,7 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
iommu_unit,
iommu_unit->dev[j].ctx_id,
FSR, 0);
+ kgsl_iommu_disable_clk(mmu, j);
_iommu_unlock(iommu);
iommu_unit->dev[j].fault = 0;
}
@@ -1812,7 +1813,6 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
/*
* stop device mmu
*
@@ -1828,9 +1828,7 @@ static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
kgsl_iommu_pagefault_resume(mmu);
}
/* switch off MMU clocks and cancel any events it has queued */
- iommu->clk_event_queued = false;
kgsl_cancel_events(mmu->device, mmu);
- kgsl_iommu_disable_clk(mmu);
}
static int kgsl_iommu_close(struct kgsl_mmu *mmu)
@@ -1883,7 +1881,7 @@ kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
pt_base = KGSL_IOMMU_GET_CTX_REG_TTBR0(iommu,
(&iommu->iommu_units[0]),
KGSL_IOMMU_CONTEXT_USER);
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}
@@ -1911,7 +1909,6 @@ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
phys_addr_t pt_val;
ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
-
if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
return ret;
@@ -1992,7 +1989,7 @@ unlock:
_iommu_unlock(iommu);
/* Disable smmu clock */
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return ret;
}
@@ -2072,13 +2069,14 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return ret;
}
/* Need to idle device before changing options */
ret = mmu->device->ftbl->idle(mmu->device);
if (ret) {
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
return ret;
}
@@ -2101,7 +2099,8 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
SCTLR, sctlr_val);
}
}
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
return ret;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 3b900dea663..85ab7dbee94 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -155,6 +155,7 @@ struct kgsl_iommu_register_list {
* are on, else the clocks are off
* fault: Flag when set indicates that this iommu device has caused a page
* fault
+ * @clk_enable_count: The ref count of clock enable calls
*/
struct kgsl_iommu_device {
struct device *dev;
@@ -164,6 +165,7 @@ struct kgsl_iommu_device {
bool clk_enabled;
struct kgsl_device *kgsldev;
int fault;
+ atomic_t clk_enable_count;
};
/*
@@ -194,10 +196,6 @@ struct kgsl_iommu_unit {
* iommu contexts owned by graphics cores
* @unit_count: Number of IOMMU units that are available for this
* instance of the IOMMU driver
- * @iommu_last_cmd_ts: The timestamp of last command submitted that
- * aceeses iommu registers
- * @clk_event_queued: Indicates whether an event to disable clocks
- * is already queued or not
* @device: Pointer to kgsl device
* @ctx_offset: The context offset to be added to base address when
* accessing IOMMU registers
@@ -213,8 +211,6 @@ struct kgsl_iommu_unit {
struct kgsl_iommu {
struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
unsigned int unit_count;
- unsigned int iommu_last_cmd_ts;
- bool clk_event_queued;
struct kgsl_device *device;
unsigned int ctx_offset;
struct kgsl_iommu_register_list *iommu_reg_list;
@@ -234,4 +230,18 @@ struct kgsl_iommu_pt {
struct kgsl_iommu *iommu;
};
+/*
+ * struct kgsl_iommu_disable_clk_param - Parameter struct for disble clk event
+ * @mmu: The mmu pointer
+ * @rb_level: the rb level in which the timestamp of the event belongs to
+ * @ctx_id: The IOMMU context whose clock is to be turned off
+ * @ts: Timestamp on which clock is to be disabled
+ */
+struct kgsl_iommu_disable_clk_param {
+ struct kgsl_mmu *mmu;
+ int rb_level;
+ int ctx_id;
+ unsigned int ts;
+};
+
#endif
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 0d5f46454c1..8fb3a23e33f 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -99,11 +99,12 @@ struct kgsl_mmu_ops {
void (*mmu_pagefault_resume)
(struct kgsl_mmu *mmu);
void (*mmu_disable_clk_on_ts)
- (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
+ (struct kgsl_mmu *mmu,
+ uint32_t ts, int ctx_id);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
void (*mmu_disable_clk)
- (struct kgsl_mmu *mmu);
+ (struct kgsl_mmu *mmu, int ctx_id);
phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -275,17 +276,18 @@ static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
return 0;
}
-static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
- mmu->mmu_ops->mmu_disable_clk(mmu);
+ mmu->mmu_ops->mmu_disable_clk(mmu, ctx_id);
}
static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
- unsigned int ts, bool ts_valid)
+ unsigned int ts,
+ int ctx_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
- mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
+ mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ctx_id);
}
static inline unsigned int kgsl_mmu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 44c0a331430..7271e402e74 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1085,8 +1085,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->interval_timeout = pdata->idle_timeout;
pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
- /* Set the CPU latency to 501usec to allow low latency PC modes */
- pwr->pm_qos_latency = 501;
+ pwr->pm_qos_latency = pdata->pm_qos_latency;
pm_runtime_enable(device->parentdev);
@@ -1366,8 +1365,6 @@ _sleep(struct kgsl_device *device)
break;
}
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
-
return 0;
}