diff options
Diffstat (limited to 'camera/common')
| -rw-r--r-- | camera/common/Makefile | 2 | ||||
| -rw-r--r-- | camera/common/cam_hw_ops.c | 338 | ||||
| -rw-r--r-- | camera/common/cam_hw_ops.h | 39 | ||||
| -rw-r--r-- | camera/common/cam_smmu_api.c | 1687 | ||||
| -rw-r--r-- | camera/common/cam_smmu_api.h | 166 | ||||
| -rw-r--r-- | camera/common/cam_soc_api.c | 944 | ||||
| -rw-r--r-- | camera/common/cam_soc_api.h | 386 | ||||
| -rw-r--r-- | camera/common/msm_camera_io_util.c | 843 | ||||
| -rw-r--r-- | camera/common/msm_camera_io_util.h | 93 |
9 files changed, 4498 insertions, 0 deletions
diff --git a/camera/common/Makefile b/camera/common/Makefile new file mode 100644 index 00000000..51ea004c --- /dev/null +++ b/camera/common/Makefile @@ -0,0 +1,2 @@ +ccflags-y += -Icamera +obj-$(CONFIG_MSMB_CAMERA) += msm_camera_io_util.o cam_smmu_api.o cam_hw_ops.o cam_soc_api.o diff --git a/camera/common/cam_hw_ops.c b/camera/common/cam_hw_ops.c new file mode 100644 index 00000000..e8c5cbf2 --- /dev/null +++ b/camera/common/cam_hw_ops.c @@ -0,0 +1,338 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "CAM-AHB %s:%d " fmt, __func__, __LINE__ +#define TRUE 1 +#include <linux/module.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include <linux/of_platform.h> +#include <linux/pm_opp.h> +#include <linux/regulator/rpm-smd-regulator.h> +#include "cam_hw_ops.h" + +#ifdef CONFIG_CAM_AHB_DBG +#define CDBG(fmt, args...) pr_err(fmt, ##args) +#else +#define CDBG(fmt, args...) pr_debug(fmt, ##args) +#endif + +struct cam_ahb_client { + enum cam_ahb_clk_vote vote; +}; + +struct cam_bus_vector { + const char *name; +}; + +struct cam_ahb_client_data { + struct msm_bus_scale_pdata *pbus_data; + u32 ahb_client; + u32 ahb_clk_state; + struct msm_bus_vectors *paths; + struct msm_bus_paths *usecases; + struct cam_bus_vector *vectors; + u32 *votes; + u32 cnt; + u32 probe_done; + struct cam_ahb_client clients[CAM_AHB_CLIENT_MAX]; + struct mutex lock; +}; + +static struct cam_ahb_client_data data; + +int get_vector_index(char *name) +{ + int i = 0, rc = -1; + + for (i = 0; i < data.cnt; i++) { + if (strcmp(name, data.vectors[i].name) == 0) + return i; + } + + return rc; +} + +int cam_ahb_clk_init(struct platform_device *pdev) +{ + int i = 0, cnt = 0, rc = 0, index = 0; + struct device_node *of_node; + + if (!pdev) { + pr_err("invalid pdev argument\n"); + return -EINVAL; + } + + of_node = pdev->dev.of_node; + data.cnt = of_property_count_strings(of_node, "bus-vectors"); + if (data.cnt == 0) { + pr_err("no vectors strings found in device tree, count=%d", + data.cnt); + return 0; + } + + cnt = of_property_count_u32_elems(of_node, "qcom,bus-votes"); + if (cnt == 0) { + pr_err("no vector values found in device tree, count=%d", cnt); + return 0; + } + + if (data.cnt != cnt) { + pr_err("vector mismatch num of strings=%u, num of values %d\n", + data.cnt, cnt); + return -EINVAL; + } + + CDBG("number of bus vectors: %d\n", data.cnt); + + data.vectors = devm_kzalloc(&pdev->dev, + sizeof(struct cam_bus_vector) * cnt, + GFP_KERNEL); + if (!data.vectors) + return -ENOMEM; + + for (i = 0; i < data.cnt; i++) { + rc = of_property_read_string_index(of_node, "bus-vectors", + i, &(data.vectors[i].name)); + CDBG("dbg: names[%d] = %s\n", i, data.vectors[i].name); + if (rc < 0) { + pr_err("failed\n"); + rc = -EINVAL; + goto err1; + } + } + + data.paths = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_vectors) * cnt, + GFP_KERNEL); + if (!data.paths) { + rc = -ENOMEM; + goto err1; + } + + data.usecases = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_paths) * cnt, + GFP_KERNEL); + if (!data.usecases) { + rc = -ENOMEM; + goto err2; + } + + data.pbus_data = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_scale_pdata), + GFP_KERNEL); + if (!data.pbus_data) { + rc = -ENOMEM; + goto err3; + } + + data.votes = devm_kzalloc(&pdev->dev, sizeof(u32) * cnt, + GFP_KERNEL); + if (!data.votes) { + rc = -ENOMEM; + goto err4; + } + + rc = of_property_read_u32_array(of_node, "qcom,bus-votes", + data.votes, cnt); + + for (i = 0; i < data.cnt; i++) { + data.paths[i] = (struct msm_bus_vectors) { + MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_CAMERA_CFG, + 0, + data.votes[i] + }; + data.usecases[i] = (struct msm_bus_paths) { + .num_paths = 1, + .vectors = &data.paths[i], + }; + CDBG("dbg: votes[%d] = %u\n", i, data.votes[i]); + } + + *data.pbus_data = (struct msm_bus_scale_pdata) { + .name = "msm_camera_ahb", + .num_usecases = data.cnt, + .usecase = data.usecases, + }; + + data.ahb_client = + msm_bus_scale_register_client(data.pbus_data); + if (!data.ahb_client) { + pr_err("ahb vote registering failed\n"); + rc = -EINVAL; + goto err5; + } + + index = get_vector_index("suspend"); + if (index < 0) { + pr_err("svs vector not supported\n"); + rc = -EINVAL; + goto err6; + } + + /* request for svs in init */ + msm_bus_scale_client_update_request(data.ahb_client, + index); + data.ahb_clk_state = CAM_AHB_SUSPEND_VOTE; + data.probe_done = TRUE; + mutex_init(&data.lock); + + CDBG("dbg, done registering ahb votes\n"); + CDBG("dbg, clk state :%u, probe :%d\n", + data.ahb_clk_state, data.probe_done); + return rc; + +err6: + msm_bus_scale_unregister_client(data.ahb_client); +err5: + devm_kfree(&pdev->dev, data.votes); + data.votes = NULL; +err4: + devm_kfree(&pdev->dev, data.pbus_data); + data.pbus_data = NULL; +err3: + devm_kfree(&pdev->dev, data.usecases); + data.usecases = NULL; +err2: + devm_kfree(&pdev->dev, data.paths); + data.paths = NULL; +err1: + devm_kfree(&pdev->dev, data.vectors); + data.vectors = NULL; + return rc; +} +EXPORT_SYMBOL(cam_ahb_clk_init); + +int cam_consolidate_ahb_vote(enum cam_ahb_clk_client id, + enum cam_ahb_clk_vote vote) +{ + int i = 0; + u32 max = 0; + + CDBG("dbg: id :%u, vote : 0x%x\n", id, vote); + mutex_lock(&data.lock); + data.clients[id].vote = vote; + + if (vote == data.ahb_clk_state) { + CDBG("dbg: already at desired vote\n"); + mutex_unlock(&data.lock); + return 0; + } + + for (i = 0; i < CAM_AHB_CLIENT_MAX; i++) { + if (data.clients[i].vote > max) + max = data.clients[i].vote; + } + + CDBG("dbg: max vote : %u\n", max); + if (max >= 0) { + if (max != data.ahb_clk_state) { + msm_bus_scale_client_update_request(data.ahb_client, + max); + data.ahb_clk_state = max; + CDBG("dbg: state : %u, vector : %d\n", + data.ahb_clk_state, max); + } + } else { + pr_err("err: no bus vector found\n"); + mutex_unlock(&data.lock); + return -EINVAL; + } + mutex_unlock(&data.lock); + return 0; +} + +static int cam_ahb_get_voltage_level(unsigned int corner) +{ + switch (corner) { + case RPM_REGULATOR_CORNER_NONE: + return CAM_AHB_SUSPEND_VOTE; + + case RPM_REGULATOR_CORNER_SVS_KRAIT: + case RPM_REGULATOR_CORNER_SVS_SOC: + return CAM_AHB_SVS_VOTE; + + case RPM_REGULATOR_CORNER_NORMAL: + return CAM_AHB_NOMINAL_VOTE; + + case RPM_REGULATOR_CORNER_SUPER_TURBO: + return CAM_AHB_TURBO_VOTE; + + case RPM_REGULATOR_CORNER_TURBO: + case RPM_REGULATOR_CORNER_RETENTION: + default: + return -EINVAL; + } +} + +int cam_config_ahb_clk(struct device *dev, unsigned long freq, + enum cam_ahb_clk_client id, enum cam_ahb_clk_vote vote) +{ + struct dev_pm_opp *opp; + unsigned int corner; + enum cam_ahb_clk_vote dyn_vote = vote; + int rc = -EINVAL; + + if (id >= CAM_AHB_CLIENT_MAX) { + pr_err("err: invalid argument\n"); + return -EINVAL; + } + + if (data.probe_done != TRUE) { + pr_err("ahb init is not done yet\n"); + return -EINVAL; + } + + CDBG("dbg: id :%u, vote : 0x%x\n", id, vote); + switch (dyn_vote) { + case CAM_AHB_SUSPEND_VOTE: + case CAM_AHB_SVS_VOTE: + case CAM_AHB_NOMINAL_VOTE: + case CAM_AHB_TURBO_VOTE: + break; + case CAM_AHB_DYNAMIC_VOTE: + if (!dev) { + pr_err("device is NULL\n"); + return -EINVAL; + } + opp = dev_pm_opp_find_freq_exact(dev, freq, true); + if (IS_ERR(opp)) { + pr_err("Error on OPP freq :%ld\n", freq); + return -EINVAL; + } + corner = dev_pm_opp_get_voltage(opp); + if (corner == 0) { + pr_err("Bad voltage corner for OPP freq :%ld\n", freq); + return -EINVAL; + } + dyn_vote = cam_ahb_get_voltage_level(corner); + if (dyn_vote < 0) { + pr_err("Bad vote requested\n"); + return -EINVAL; + } + break; + default: + pr_err("err: invalid vote argument\n"); + return -EINVAL; + } + + rc = cam_consolidate_ahb_vote(id, dyn_vote); + if (rc < 0) { + pr_err("%s: failed to vote for AHB\n", __func__); + goto end; + } + +end: + return rc; +} +EXPORT_SYMBOL(cam_config_ahb_clk); diff --git a/camera/common/cam_hw_ops.h b/camera/common/cam_hw_ops.h new file mode 100644 index 00000000..015c2099 --- /dev/null +++ b/camera/common/cam_hw_ops.h @@ -0,0 +1,39 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +enum cam_ahb_clk_vote { + /* need to update the voting requests + * according to dtsi entries. + */ + CAM_AHB_SUSPEND_VOTE = 0x0, + CAM_AHB_SVS_VOTE = 0x01, + CAM_AHB_NOMINAL_VOTE = 0x02, + CAM_AHB_TURBO_VOTE = 0x03, + CAM_AHB_DYNAMIC_VOTE = 0xFF, +}; + +enum cam_ahb_clk_client { + CAM_AHB_CLIENT_CSIPHY, + CAM_AHB_CLIENT_CSID, + CAM_AHB_CLIENT_CCI, + CAM_AHB_CLIENT_ISPIF, + CAM_AHB_CLIENT_VFE0, + CAM_AHB_CLIENT_VFE1, + CAM_AHB_CLIENT_CPP, + CAM_AHB_CLIENT_FD, + CAM_AHB_CLIENT_JPEG, + CAM_AHB_CLIENT_MAX +}; + +int cam_config_ahb_clk(struct device *dev, unsigned long freq, + enum cam_ahb_clk_client id, enum cam_ahb_clk_vote vote); +int cam_ahb_clk_init(struct platform_device *pdev); diff --git a/camera/common/cam_smmu_api.c b/camera/common/cam_smmu_api.c new file mode 100644 index 00000000..e703791f --- /dev/null +++ b/camera/common/cam_smmu_api.c @@ -0,0 +1,1687 @@ +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__ + +#include <linux/module.h> +#include <linux/dma-buf.h> +#include <asm/dma-iommu.h> +#include <linux/dma-direction.h> +#include <linux/dma-attrs.h> +#include <linux/of_platform.h> +#include <linux/iommu.h> +#include <linux/slab.h> +#include <linux/qcom_iommu.h> +#include <linux/dma-mapping.h> +#include <linux/msm_dma_iommu_mapping.h> +#include <linux/workqueue.h> +#include "cam_smmu_api.h" + +#define SCRATCH_ALLOC_START SZ_128K +#define SCRATCH_ALLOC_END SZ_256M +#define VA_SPACE_END SZ_2G +#define IOMMU_INVALID_DIR -1 +#define BYTE_SIZE 8 +#define COOKIE_NUM_BYTE 2 +#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE) +#define COOKIE_MASK ((1<<COOKIE_SIZE)-1) +#define HANDLE_INIT (-1) +#define CAM_SMMU_CB_MAX 2 + +#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK)) +#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK) + +#ifdef CONFIG_CAM_SMMU_DBG +#define CDBG(fmt, args...) pr_err(fmt, ##args) +#else +#define CDBG(fmt, args...) pr_debug(fmt, ##args) +#endif + +struct cam_smmu_work_payload { + int idx; + struct iommu_domain *domain; + struct device *dev; + unsigned long iova; + int flags; + void *token; + struct list_head list; +}; + +enum cam_protection_type { + CAM_PROT_INVALID, + CAM_NON_SECURE, + CAM_SECURE, + CAM_PROT_MAX, +}; + +enum cam_iommu_type { + CAM_SMMU_INVALID, + CAM_QSMMU, + CAM_ARM_SMMU, + CAM_SMMU_MAX, +}; + +enum cam_smmu_buf_state { + CAM_SMMU_BUFF_EXIST, + CAM_SMMU_BUFF_NOT_EXIST +}; + +enum cam_smmu_init_dir { + CAM_SMMU_TABLE_INIT, + CAM_SMMU_TABLE_DEINIT, +}; + +struct scratch_mapping { + void *bitmap; + size_t bits; + unsigned int order; + dma_addr_t base; +}; + +struct cam_context_bank_info { + struct device *dev; + struct dma_iommu_mapping *mapping; + dma_addr_t va_start; + size_t va_len; + const char *name; + bool is_secure; + uint8_t scratch_buf_support; + struct scratch_mapping scratch_map; + struct list_head smmu_buf_list; + struct mutex lock; + int handle; + enum cam_smmu_ops_param state; + void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *, + struct device *, unsigned long, + int, void*); + void *token[CAM_SMMU_CB_MAX]; + int cb_count; +}; + +struct cam_iommu_cb_set { + struct cam_context_bank_info *cb_info; + u32 cb_num; + u32 cb_init_count; + struct work_struct smmu_work; + struct mutex payload_list_lock; + struct list_head payload_list; +}; + +static struct of_device_id msm_cam_smmu_dt_match[] = { + { .compatible = "qcom,msm-cam-smmu", }, + { .compatible = "qcom,msm-cam-smmu-cb", }, + { .compatible = "qcom,qsmmu-cam-cb", }, + {} +}; + +struct cam_dma_buff_info { + struct dma_buf *buf; + struct dma_buf_attachment *attach; + struct sg_table *table; + enum dma_data_direction dir; + int iommu_dir; + int ref_count; + dma_addr_t paddr; + struct list_head list; + int ion_fd; + size_t len; + size_t phys_len; +}; + +static struct cam_iommu_cb_set iommu_cb_set; + +static enum dma_data_direction cam_smmu_translate_dir( + enum cam_smmu_map_dir dir); + +static int cam_smmu_check_handle_unique(int hdl); + +static int cam_smmu_create_iommu_handle(int idx); + +static int cam_smmu_create_add_handle_in_table(char *name, + int *hdl); + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx, + int ion_fd); + +static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map, + dma_addr_t base, size_t size, + int order); + +static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping, + size_t size, + dma_addr_t *iova); + +static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping, + dma_addr_t addr, size_t size); + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx, + dma_addr_t virt_addr); + +static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd, + enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr, + size_t *len_ptr); + +static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx, + size_t virt_len, + size_t phys_len, + unsigned int iommu_dir, + dma_addr_t *virt_addr); +static int cam_smmu_unmap_buf_and_remove_from_list( + struct cam_dma_buff_info *mapping_info, int idx); + +static int cam_smmu_free_scratch_buffer_remove_from_list( + struct cam_dma_buff_info *mapping_info, + int idx); + +static void cam_smmu_clean_buffer_list(int idx); + +static void cam_smmu_print_list(int idx); + +static void cam_smmu_print_table(void); + +static int cam_smmu_probe(struct platform_device *pdev); + +static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr); + +static void cam_smmu_page_fault_work(struct work_struct *work) +{ + int j; + int idx; + struct cam_smmu_work_payload *payload; + + mutex_lock(&iommu_cb_set.payload_list_lock); + payload = list_first_entry(&iommu_cb_set.payload_list, + struct cam_smmu_work_payload, + list); + list_del(&payload->list); + mutex_unlock(&iommu_cb_set.payload_list_lock); + + /* Dereference the payload to call the handler */ + idx = payload->idx; + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova); + for (j = 0; j < CAM_SMMU_CB_MAX; j++) { + if ((iommu_cb_set.cb_info[idx].handler[j])) { + iommu_cb_set.cb_info[idx].handler[j]( + payload->domain, + payload->dev, + payload->iova, + payload->flags, + iommu_cb_set.cb_info[idx].token[j]); + } + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + kfree(payload); +} + +static void cam_smmu_print_list(int idx) +{ + struct cam_dma_buff_info *mapping; + + pr_err("index = %d ", idx); + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + pr_err("ion_fd = %d, paddr= 0x%p, len = %u\n", + mapping->ion_fd, (void *)mapping->paddr, + (unsigned int)mapping->len); + } +} + +static void cam_smmu_print_table(void) +{ + int i; + + for (i = 0; i < iommu_cb_set.cb_num; i++) { + pr_err("i= %d, handle= %d, name_addr=%p\n", i, + (int)iommu_cb_set.cb_info[i].handle, + (void *)iommu_cb_set.cb_info[i].name); + pr_err("dev = %p ", iommu_cb_set.cb_info[i].dev); + } +} + + +int cam_smmu_query_vaddr_in_range(int handle, + unsigned long fault_addr, unsigned long *start_addr, + unsigned long *end_addr, int *fd) +{ + int idx, rc = -EINVAL; + struct cam_dma_buff_info *mapping; + unsigned long sa, ea; + + if (!start_addr || !end_addr || !fd) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EINVAL; + } + + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + sa = (unsigned long)mapping->paddr; + ea = (unsigned long)mapping->paddr + mapping->len; + + if (sa <= fault_addr && fault_addr < ea) { + *start_addr = sa; + *end_addr = ea; + *fd = mapping->ion_fd; + rc = 0; + break; + } + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_query_vaddr_in_range); + +static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr) +{ + struct cam_dma_buff_info *mapping; + unsigned long start_addr, end_addr, current_addr; + + current_addr = (unsigned long)vaddr; + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + start_addr = (unsigned long)mapping->paddr; + end_addr = (unsigned long)mapping->paddr + mapping->len; + + if (start_addr <= current_addr && current_addr < end_addr) { + pr_err("Error: va %p is valid: range:%p-%p, fd = %d cb: %s\n", + vaddr, (void *)start_addr, (void *)end_addr, + mapping->ion_fd, + iommu_cb_set.cb_info[idx].name); + return; + } else { + CDBG("va %p is not in this range: %p-%p, fd = %d\n", + vaddr, (void *)start_addr, (void *)end_addr, + mapping->ion_fd); + } + } + pr_err("Cannot find vaddr:%p in SMMU. %s uses invalid virtual address\n", + vaddr, iommu_cb_set.cb_info[idx].name); + return; +} + +void cam_smmu_reg_client_page_fault_handler(int handle, + void (*client_page_fault_handler)(struct iommu_domain *, + struct device *, unsigned long, + int, void*), void *token) +{ + int idx, i = 0; + + if (!token) { + pr_err("Error: token is NULL\n"); + return; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return; + } + + if (client_page_fault_handler) { + if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) { + pr_err("%s Should not regiester more handlers\n", + iommu_cb_set.cb_info[idx].name); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return; + } + iommu_cb_set.cb_info[idx].cb_count++; + for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) { + if (iommu_cb_set.cb_info[idx].token[i] == NULL) { + iommu_cb_set.cb_info[idx].token[i] = token; + iommu_cb_set.cb_info[idx].handler[i] = + client_page_fault_handler; + break; + } + } + } else { + for (i = 0; i < CAM_SMMU_CB_MAX; i++) { + if (iommu_cb_set.cb_info[idx].token[i] == token) { + iommu_cb_set.cb_info[idx].token[i] = NULL; + iommu_cb_set.cb_info[idx].handler[i] = + NULL; + iommu_cb_set.cb_info[idx].cb_count--; + break; + } + } + if (i == CAM_SMMU_CB_MAX) + pr_err("Error: hdl %x no matching tokens: %s\n", + handle, iommu_cb_set.cb_info[idx].name); + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return; +} + +static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain, + struct device *dev, unsigned long iova, + int flags, void *token) +{ + char *cb_name; + int idx; + struct cam_smmu_work_payload *payload; + + if (!token) { + pr_err("Error: token is NULL\n"); + pr_err("Error: domain = %p, device = %p\n", domain, dev); + pr_err("iova = %lX, flags = %d\n", iova, flags); + return 0; + } + + cb_name = (char *)token; + /* check wether it is in the table */ + for (idx = 0; idx < iommu_cb_set.cb_num; idx++) { + if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name)) + break; + } + + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: index is not valid, index = %d, token = %s\n", + idx, cb_name); + return 0; + } + + payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC); + if (!payload) + return 0; + + payload->domain = domain; + payload->dev = dev; + payload->iova = iova; + payload->flags = flags; + payload->token = token; + payload->idx = idx; + + mutex_lock(&iommu_cb_set.payload_list_lock); + list_add_tail(&payload->list, &iommu_cb_set.payload_list); + mutex_unlock(&iommu_cb_set.payload_list_lock); + + schedule_work(&iommu_cb_set.smmu_work); + + return 0; +} + +static int cam_smmu_translate_dir_to_iommu_dir( + enum cam_smmu_map_dir dir) +{ + switch (dir) { + case CAM_SMMU_MAP_READ: + return IOMMU_READ; + case CAM_SMMU_MAP_WRITE: + return IOMMU_WRITE; + case CAM_SMMU_MAP_RW: + return IOMMU_READ|IOMMU_WRITE; + case CAM_SMMU_MAP_INVALID: + default: + pr_err("Error: Direction is invalid. dir = %d\n", dir); + break; + }; + return IOMMU_INVALID_DIR; +} + +static enum dma_data_direction cam_smmu_translate_dir( + enum cam_smmu_map_dir dir) +{ + switch (dir) { + case CAM_SMMU_MAP_READ: + return DMA_FROM_DEVICE; + case CAM_SMMU_MAP_WRITE: + return DMA_TO_DEVICE; + case CAM_SMMU_MAP_RW: + return DMA_BIDIRECTIONAL; + case CAM_SMMU_MAP_INVALID: + default: + pr_err("Error: Direction is invalid. dir = %d\n", (int)dir); + break; + } + return DMA_NONE; +} + +void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops) +{ + unsigned int i; + int j = 0; + for (i = 0; i < iommu_cb_set.cb_num; i++) { + iommu_cb_set.cb_info[i].handle = HANDLE_INIT; + INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list); + iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH; + iommu_cb_set.cb_info[i].dev = NULL; + iommu_cb_set.cb_info[i].cb_count = 0; + for (j = 0; j < CAM_SMMU_CB_MAX; j++) { + iommu_cb_set.cb_info[i].token[j] = NULL; + iommu_cb_set.cb_info[i].handler[j] = NULL; + } + if (ops == CAM_SMMU_TABLE_INIT) + mutex_init(&iommu_cb_set.cb_info[i].lock); + else + mutex_destroy(&iommu_cb_set.cb_info[i].lock); + } +} + +static int cam_smmu_check_handle_unique(int hdl) +{ + int i; + + if (hdl == HANDLE_INIT) { + CDBG("iommu handle is init number. Need to try again\n"); + return 1; + } + + for (i = 0; i < iommu_cb_set.cb_num; i++) { + if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT) + continue; + + if (iommu_cb_set.cb_info[i].handle == hdl) { + CDBG("iommu handle %d conflicts\n", (int)hdl); + return 1; + } + } + return 0; +} + +/** + * use low 2 bytes for handle cookie + */ +static int cam_smmu_create_iommu_handle(int idx) +{ + int rand, hdl = 0; + get_random_bytes(&rand, COOKIE_NUM_BYTE); + hdl = GET_SMMU_HDL(idx, rand); + CDBG("create handle value = %x\n", (int)hdl); + return hdl; +} + +static int cam_smmu_attach_device(int idx) +{ + int rc; + struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx]; + + /* attach the mapping to device */ + rc = arm_iommu_attach_device(cb->dev, cb->mapping); + if (rc < 0) { + pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc); + return -ENODEV; + } + return rc; +} + +static int cam_smmu_create_add_handle_in_table(char *name, + int *hdl) +{ + int i; + int handle; + + /* create handle and add in the iommu hardware table */ + for (i = 0; i < iommu_cb_set.cb_num; i++) { + if (!strcmp(iommu_cb_set.cb_info[i].name, name)) { + mutex_lock(&iommu_cb_set.cb_info[i].lock); + if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) { + pr_err("Error: %s already got handle 0x%x\n", + name, + iommu_cb_set.cb_info[i].handle); + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + return -EINVAL; + } + + /* make sure handle is unique */ + do { + handle = cam_smmu_create_iommu_handle(i); + } while (cam_smmu_check_handle_unique(handle)); + + /* put handle in the table */ + iommu_cb_set.cb_info[i].handle = handle; + iommu_cb_set.cb_info[i].cb_count = 0; + *hdl = handle; + CDBG("%s creates handle 0x%x\n", name, handle); + mutex_unlock(&iommu_cb_set.cb_info[i].lock); + return 0; + } + } + + /* if i == iommu_cb_set.cb_num */ + pr_err("Error: Cannot find name %s or all handle exist!\n", + name); + cam_smmu_print_table(); + return -EINVAL; +} + +static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map, + dma_addr_t base, size_t size, + int order) +{ + unsigned int count = size >> (PAGE_SHIFT + order); + unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); + int err = 0; + + if (!count) { + err = -EINVAL; + pr_err("Error: wrong size passed, page count can't be zero"); + goto bail; + } + + scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!scratch_map->bitmap) { + err = -ENOMEM; + goto bail; + } + + scratch_map->base = base; + scratch_map->bits = BITS_PER_BYTE * bitmap_size; + scratch_map->order = order; + +bail: + return err; +} + +static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping, + size_t size, + dma_addr_t *iova) +{ + int rc = 0; + unsigned int order = get_order(size); + unsigned int align = 0; + unsigned int count, start; + + count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + + (1 << mapping->order) - 1) >> mapping->order; + + /* Transparently, add a guard page to the total count of pages + * to be allocated */ + count++; + + if (order > mapping->order) + align = (1 << (order - mapping->order)) - 1; + + start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, + count, align); + + if (start > mapping->bits) + rc = -ENOMEM; + + bitmap_set(mapping->bitmap, start, count); + + *iova = mapping->base + (start << (mapping->order + PAGE_SHIFT)); + return rc; +} + +static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping, + dma_addr_t addr, size_t size) +{ + unsigned int start = (addr - mapping->base) >> + (mapping->order + PAGE_SHIFT); + unsigned int count = ((size >> PAGE_SHIFT) + + (1 << mapping->order) - 1) >> mapping->order; + + if (!addr) { + pr_err("Error: Invalid address\n"); + return -EINVAL; + } + + if (start + count > mapping->bits) { + pr_err("Error: Invalid page bits in scratch map\n"); + return -EINVAL; + } + + /* Transparently, add a guard page to the total count of pages + * to be freed */ + count++; + + bitmap_clear(mapping->bitmap, start, count); + + return 0; +} + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx, + dma_addr_t virt_addr) +{ + struct cam_dma_buff_info *mapping; + + list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if (mapping->paddr == virt_addr) { + CDBG("Found virtual address %lx\n", + (unsigned long)virt_addr); + return mapping; + } + } + + pr_err("Error: Cannot find virtual address %lx by index %d\n", + (unsigned long)virt_addr, idx); + return NULL; +} + +static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx, + int ion_fd) +{ + struct cam_dma_buff_info *mapping; + + list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if (mapping->ion_fd == ion_fd) { + CDBG(" find ion_fd %d\n", ion_fd); + return mapping; + } + } + + pr_err("Error: Cannot find fd %d by index %d\n", + ion_fd, idx); + return NULL; +} + +static void cam_smmu_clean_buffer_list(int idx) +{ + int ret; + struct cam_dma_buff_info *mapping_info, *temp; + + list_for_each_entry_safe(mapping_info, temp, + &iommu_cb_set.cb_info[idx].smmu_buf_list, list) { + CDBG("Free mapping address %p, i = %d, fd = %d\n", + (void *)mapping_info->paddr, idx, + mapping_info->ion_fd); + + if (mapping_info->ion_fd == 0xDEADBEEF) + /* Clean up scratch buffers */ + ret = cam_smmu_free_scratch_buffer_remove_from_list( + mapping_info, idx); + else + /* Clean up regular mapped buffers */ + ret = cam_smmu_unmap_buf_and_remove_from_list( + mapping_info, + idx); + + if (ret < 0) { + pr_err("Buffer delete failed: idx = %d\n", idx); + pr_err("Buffer delete failed: addr = %lx, fd = %d\n", + (unsigned long)mapping_info->paddr, + mapping_info->ion_fd); + /* + * Ignore this error and continue to delete other + * buffers in the list + */ + continue; + } + } +} + +static int cam_smmu_attach(int idx) +{ + int ret; + + if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) { + ret = 0; + } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) { + ret = cam_smmu_attach_device(idx); + if (ret < 0) { + pr_err("Error: ATTACH fail\n"); + return -ENODEV; + } + iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH; + ret = 0; + } else { + pr_err("Error: Not detach/attach\n"); + ret = -EINVAL; + } + return ret; +} + +static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd, + enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr, + size_t *len_ptr) +{ + int rc = -1; + struct cam_dma_buff_info *mapping_info; + struct dma_buf *buf = NULL; + struct dma_buf_attachment *attach = NULL; + struct sg_table *table = NULL; + + if (!paddr_ptr) { + pr_err("Error: Input pointer invalid\n"); + rc = -EINVAL; + goto err_out; + } + + /* allocate memory for each buffer information */ + buf = dma_buf_get(ion_fd); + if (IS_ERR_OR_NULL(buf)) { + rc = PTR_ERR(buf); + pr_err("Error: dma get buf failed. fd = %d\n", ion_fd); + goto err_out; + } + + attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev); + if (IS_ERR_OR_NULL(attach)) { + rc = PTR_ERR(attach); + pr_err("Error: dma buf attach failed\n"); + goto err_put; + } + + table = dma_buf_map_attachment(attach, dma_dir); + if (IS_ERR_OR_NULL(table)) { + rc = PTR_ERR(table); + pr_err("Error: dma buf map attachment failed\n"); + goto err_detach; + } + + rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev, table->sgl, + table->nents, dma_dir, buf); + if (!rc) { + pr_err("Error: msm_dma_map_sg_lazy failed\n"); + goto err_unmap_sg; + } + + if (table->sgl) { + CDBG("DMA buf: %p, device: %p, attach: %p, table: %p\n", + (void *)buf, + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)attach, (void *)table); + CDBG("table sgl: %p, rc: %d, dma_address: 0x%x\n", + (void *)table->sgl, rc, + (unsigned int)table->sgl->dma_address); + } else { + rc = -EINVAL; + pr_err("Error: table sgl is null\n"); + goto err_unmap_sg; + } + + /* fill up mapping_info */ + mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL); + if (!mapping_info) { + pr_err("Error: No enough space!\n"); + rc = -ENOSPC; + goto err_unmap_sg; + } + mapping_info->ion_fd = ion_fd; + mapping_info->buf = buf; + mapping_info->attach = attach; + mapping_info->table = table; + mapping_info->paddr = sg_dma_address(table->sgl); + mapping_info->len = (size_t)sg_dma_len(table->sgl); + mapping_info->dir = dma_dir; + mapping_info->ref_count = 1; + + /* return paddr and len to client */ + *paddr_ptr = sg_dma_address(table->sgl); + *len_ptr = (size_t)sg_dma_len(table->sgl); + + if (!*paddr_ptr || !*len_ptr) { + pr_err("Error: Space Allocation failed!\n"); + rc = -ENOSPC; + goto err_unmap_sg; + } + CDBG("ion_fd = %d, dev = %p, paddr= %p, len = %u\n", ion_fd, + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)*paddr_ptr, (unsigned int)*len_ptr); + + /* add to the list */ + list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list); + return 0; + +err_unmap_sg: + dma_buf_unmap_attachment(attach, table, dma_dir); +err_detach: + dma_buf_detach(buf, attach); +err_put: + dma_buf_put(buf); +err_out: + return rc; +} + +static int cam_smmu_unmap_buf_and_remove_from_list( + struct cam_dma_buff_info *mapping_info, + int idx) +{ + if ((!mapping_info->buf) || (!mapping_info->table) || + (!mapping_info->attach)) { + pr_err("Error: Invalid params dev = %p, table = %p", + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)mapping_info->table); + pr_err("Error:dma_buf = %p, attach = %p\n", + (void *)mapping_info->buf, + (void *)mapping_info->attach); + return -EINVAL; + } + + /* iommu buffer clean up */ + msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev, + mapping_info->table->sgl, mapping_info->table->nents, + mapping_info->dir, mapping_info->buf); + dma_buf_unmap_attachment(mapping_info->attach, + mapping_info->table, mapping_info->dir); + dma_buf_detach(mapping_info->buf, mapping_info->attach); + dma_buf_put(mapping_info->buf); + mapping_info->buf = NULL; + + list_del_init(&mapping_info->list); + + /* free one buffer */ + kfree(mapping_info); + return 0; +} + +static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx, + int ion_fd, dma_addr_t *paddr_ptr, + size_t *len_ptr) +{ + struct cam_dma_buff_info *mapping; + list_for_each_entry(mapping, + &iommu_cb_set.cb_info[idx].smmu_buf_list, + list) { + if (mapping->ion_fd == ion_fd) { + mapping->ref_count++; + *paddr_ptr = mapping->paddr; + *len_ptr = mapping->len; + return CAM_SMMU_BUFF_EXIST; + } + } + return CAM_SMMU_BUFF_NOT_EXIST; +} + +int cam_smmu_get_handle(char *identifier, int *handle_ptr) +{ + int ret = 0; + + if (!identifier) { + pr_err("Error: iommu harware name is NULL\n"); + return -EFAULT; + } + + if (!handle_ptr) { + pr_err("Error: handle pointer is NULL\n"); + return -EFAULT; + } + + /* create and put handle in the table */ + ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr); + if (ret < 0) { + pr_err("Error: %s get handle fail\n", identifier); + return ret; + } + return ret; +} +EXPORT_SYMBOL(cam_smmu_get_handle); + +int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops) +{ + int ret = 0, idx; + + CDBG("E: ops = %d\n", ops); + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EINVAL; + } + + switch (ops) { + case CAM_SMMU_ATTACH: { + ret = cam_smmu_attach(idx); + break; + } + case CAM_SMMU_DETACH: { + ret = 0; + break; + } + case CAM_SMMU_VOTE: + case CAM_SMMU_DEVOTE: + default: + pr_err("Error: idx = %d, ops = %d\n", idx, ops); + ret = -EINVAL; + } + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return ret; +} +EXPORT_SYMBOL(cam_smmu_ops); + +static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx, + size_t virt_len, + size_t phys_len, + unsigned int iommu_dir, + dma_addr_t *virt_addr) +{ + unsigned long nents = virt_len / phys_len; + struct cam_dma_buff_info *mapping_info = NULL; + size_t unmapped; + dma_addr_t iova = 0; + struct scatterlist *sg; + int i = 0; + int rc; + struct iommu_domain *domain = NULL; + struct page *page; + struct sg_table *table = NULL; + + CDBG("%s: nents = %lu, idx = %d, virt_len = %zx\n", + __func__, nents, idx, virt_len); + CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %p\n", + __func__, phys_len, iommu_dir, virt_addr); + + /* This table will go inside the 'mapping' structure + * where it will be held until put_scratch_buffer is called + */ + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) { + rc = -ENOMEM; + goto err_table_alloc; + } + + rc = sg_alloc_table(table, nents, GFP_KERNEL); + if (rc < 0) { + rc = -EINVAL; + goto err_sg_alloc; + } + + page = alloc_pages(GFP_KERNEL, get_order(phys_len)); + if (!page) { + rc = -ENOMEM; + goto err_page_alloc; + } + + /* Now we create the sg list */ + for_each_sg(table->sgl, sg, table->nents, i) + sg_set_page(sg, page, phys_len, 0); + + + /* Get the domain from within our cb_set struct and map it*/ + domain = iommu_cb_set.cb_info[idx].mapping->domain; + + rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map, + virt_len, &iova); + + if (rc < 0) { + pr_err("Could not find valid iova for scratch buffer"); + goto err_iommu_map; + } + + if (iommu_map_sg(domain, + iova, + table->sgl, + table->nents, + iommu_dir) != virt_len) { + pr_err("iommu_map_sg() failed"); + goto err_iommu_map; + } + + /* Now update our mapping information within the cb_set struct */ + mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL); + if (!mapping_info) { + rc = -ENOMEM; + goto err_mapping_info; + } + + mapping_info->ion_fd = 0xDEADBEEF; + mapping_info->buf = NULL; + mapping_info->attach = NULL; + mapping_info->table = table; + mapping_info->paddr = iova; + mapping_info->len = virt_len; + mapping_info->iommu_dir = iommu_dir; + mapping_info->ref_count = 1; + mapping_info->phys_len = phys_len; + + CDBG("%s: paddr = %p, len = %zx, phys_len = %zx", + __func__, (void *)mapping_info->paddr, + mapping_info->len, mapping_info->phys_len); + + list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list); + + *virt_addr = (dma_addr_t)iova; + + CDBG("%s: mapped virtual address = %lx\n", __func__, + (unsigned long)*virt_addr); + return 0; + +err_mapping_info: + unmapped = iommu_unmap(domain, iova, virt_len); + if (unmapped != virt_len) + pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len); +err_iommu_map: + __free_pages(sg_page(table->sgl), get_order(phys_len)); +err_page_alloc: + sg_free_table(table); +err_sg_alloc: + kfree(table); +err_table_alloc: + return rc; +} + +static int cam_smmu_free_scratch_buffer_remove_from_list( + struct cam_dma_buff_info *mapping_info, + int idx) +{ + int rc = 0; + size_t unmapped; + struct iommu_domain *domain = + iommu_cb_set.cb_info[idx].mapping->domain; + struct scratch_mapping *scratch_map = + &iommu_cb_set.cb_info[idx].scratch_map; + + if (!mapping_info->table) { + pr_err("Error: Invalid params: dev = %p, table = %p, ", + (void *)iommu_cb_set.cb_info[idx].dev, + (void *)mapping_info->table); + return -EINVAL; + } + + /* Clean up the mapping_info struct from the list */ + unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len); + if (unmapped != mapping_info->len) + pr_err("Unmapped only %zx instead of %zx", + unmapped, mapping_info->len); + + rc = cam_smmu_free_scratch_va(scratch_map, + mapping_info->paddr, + mapping_info->len); + if (rc < 0) { + pr_err("Error: Invalid iova while freeing scratch buffer\n"); + rc = -EINVAL; + } + + __free_pages(sg_page(mapping_info->table->sgl), + get_order(mapping_info->phys_len)); + sg_free_table(mapping_info->table); + kfree(mapping_info->table); + list_del_init(&mapping_info->list); + + kfree(mapping_info); + mapping_info = NULL; + + return rc; +} + +int cam_smmu_get_phy_addr_scratch(int handle, + enum cam_smmu_map_dir dir, + dma_addr_t *paddr_ptr, + size_t virt_len, + size_t phys_len) +{ + int idx, rc; + unsigned int iommu_dir; + + if (!paddr_ptr || !virt_len || !phys_len) { + pr_err("Error: Input pointer or lengths invalid\n"); + return -EINVAL; + } + + if (virt_len < phys_len) { + pr_err("Error: virt_len > phys_len"); + return -EINVAL; + } + + iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir); + if (iommu_dir == IOMMU_INVALID_DIR) { + pr_err("Error: translate direction failed. dir = %d\n", dir); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto error; + } + + if (!iommu_cb_set.cb_info[idx].scratch_buf_support) { + pr_err("Error: Context bank does not support scratch bufs\n"); + rc = -EINVAL; + goto error; + } + + CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n", + __func__, handle, idx, dir); + CDBG("%s: virt_len = %zx, phys_len = %zx\n", + __func__, phys_len, virt_len); + + if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { + pr_err("Error: Device %s should call SMMU attach before map buffer\n", + iommu_cb_set.cb_info[idx].name); + rc = -EINVAL; + goto error; + } + + if (!IS_ALIGNED(virt_len, PAGE_SIZE)) { + pr_err("Requested scratch buffer length not page aligned"); + rc = -EINVAL; + goto error; + } + + if (!IS_ALIGNED(virt_len, phys_len)) { + pr_err("Requested virtual length not aligned with physical length"); + rc = -EINVAL; + goto error; + } + + rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx, + virt_len, + phys_len, + iommu_dir, + paddr_ptr); + if (rc < 0) { + pr_err("Error: mapping or add list fail\n"); + goto error; + } + +error: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} + +int cam_smmu_put_phy_addr_scratch(int handle, + dma_addr_t paddr) +{ + int idx; + int rc = -1; + struct cam_dma_buff_info *mapping_info; + + /* find index in the iommu_cb_set.cb_info */ + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto handle_err; + } + + if (!iommu_cb_set.cb_info[idx].scratch_buf_support) { + pr_err("Error: Context bank does not support scratch buffers"); + rc = -EINVAL; + goto handle_err; + } + + /* Based on virtual address and index, we can find mapping info + * of the scratch buffer + */ + mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr); + if (!mapping_info) { + pr_err("Error: Invalid params\n"); + rc = -EINVAL; + goto handle_err; + } + + /* unmapping one buffer from device */ + rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx); + if (rc < 0) { + pr_err("Error: unmap or remove list fail\n"); + goto handle_err; + } + +handle_err: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} + +int cam_smmu_get_phy_addr(int handle, int ion_fd, + enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, + size_t *len_ptr) +{ + int idx, rc; + enum dma_data_direction dma_dir; + enum cam_smmu_buf_state buf_state; + + if (!paddr_ptr || !len_ptr) { + pr_err("Error: Input pointers are invalid\n"); + return -EINVAL; + } + /* clean the content from clients */ + *paddr_ptr = (dma_addr_t)NULL; + *len_ptr = (size_t)0; + + dma_dir = cam_smmu_translate_dir(dir); + if (dma_dir == DMA_NONE) { + pr_err("Error: translate direction failed. dir = %d\n", dir); + return -EINVAL; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto get_addr_end; + } + + if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) { + pr_err("Error: Device %s should call SMMU attach before map buffer\n", + iommu_cb_set.cb_info[idx].name); + rc = -EINVAL; + goto get_addr_end; + } + + buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr); + if (buf_state == CAM_SMMU_BUFF_EXIST) { + CDBG("ion_fd:%d already in the list, give same addr back", + ion_fd); + rc = 0; + goto get_addr_end; + } + rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir, + paddr_ptr, len_ptr); + if (rc < 0) { + pr_err("Error: mapping or add list fail\n"); + goto get_addr_end; + } + +get_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_get_phy_addr); + +int cam_smmu_put_phy_addr(int handle, int ion_fd) +{ + int idx, rc; + struct cam_dma_buff_info *mapping_info; + + /* find index in the iommu_cb_set.cb_info */ + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + rc = -EINVAL; + goto put_addr_end; + } + + /* based on ion fd and index, we can find mapping info of buffer */ + mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd); + if (!mapping_info) { + pr_err("Error: Invalid params! idx = %d, fd = %d\n", + idx, ion_fd); + rc = -EINVAL; + goto put_addr_end; + } + + mapping_info->ref_count--; + if (mapping_info->ref_count > 0) { + CDBG("There are still %u buffer(s) with same fd %d", + mapping_info->ref_count, mapping_info->ion_fd); + rc = 0; + goto put_addr_end; + } + + /* unmapping one buffer from device */ + rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx); + if (rc < 0) { + pr_err("Error: unmap or remove list fail\n"); + goto put_addr_end; + } + +put_addr_end: + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return rc; +} +EXPORT_SYMBOL(cam_smmu_put_phy_addr); + +int cam_smmu_destroy_handle(int handle) +{ + int idx; + + idx = GET_SMMU_TABLE_IDX(handle); + if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) { + pr_err("Error: handle or index invalid. idx = %d hdl = %x\n", + idx, handle); + return -EINVAL; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return -EINVAL; + } + + if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) { + pr_err("Client %s buffer list is not clean!\n", + iommu_cb_set.cb_info[idx].name); + cam_smmu_print_list(idx); + cam_smmu_clean_buffer_list(idx); + } + + iommu_cb_set.cb_info[idx].cb_count = 0; + iommu_cb_set.cb_info[idx].handle = HANDLE_INIT; + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return 0; +} +EXPORT_SYMBOL(cam_smmu_destroy_handle); + +/*This function can only be called after smmu driver probe*/ +int cam_smmu_get_num_of_clients(void) +{ + return iommu_cb_set.cb_num; +} + +static void cam_smmu_release_cb(struct platform_device *pdev) +{ + int i = 0; + + for (i = 0; i < iommu_cb_set.cb_num; i++) { + arm_iommu_detach_device(iommu_cb_set.cb_info[i].dev); + arm_iommu_release_mapping(iommu_cb_set.cb_info[i].mapping); + } + + devm_kfree(&pdev->dev, iommu_cb_set.cb_info); + iommu_cb_set.cb_num = 0; +} + +static int cam_smmu_setup_cb(struct cam_context_bank_info *cb, + struct device *dev) +{ + int rc = 0; + int disable_htw = 1; + + if (!cb || !dev) { + pr_err("Error: invalid input params\n"); + return -EINVAL; + } + + cb->dev = dev; + /* Reserve 256M if scratch buffer support is desired + * and initialize the scratch mapping structure + */ + if (cb->scratch_buf_support) { + cb->va_start = SCRATCH_ALLOC_END; + cb->va_len = VA_SPACE_END - SCRATCH_ALLOC_END; + + rc = cam_smmu_init_scratch_map(&cb->scratch_map, + SCRATCH_ALLOC_START, + SCRATCH_ALLOC_END - SCRATCH_ALLOC_START, + 0); + if (rc < 0) { + pr_err("Error: failed to create scratch map\n"); + rc = -ENODEV; + goto end; + } + } else { + cb->va_start = SZ_128K; + cb->va_len = VA_SPACE_END - SZ_128K; + } + + /* create a virtual mapping */ + cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev), + cb->va_start, cb->va_len); + if (IS_ERR(cb->mapping)) { + pr_err("Error: create mapping Failed\n"); + rc = -ENODEV; + goto end; + } + + /* + * Set the domain attributes + * disable L2 redirect since it decreases + * performance + */ + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_COHERENT_HTW_DISABLE, + &disable_htw)) { + pr_err("Error: couldn't disable coherent HTW\n"); + rc = -ENODEV; + goto err_set_attr; + } + return 0; +err_set_attr: + arm_iommu_release_mapping(cb->mapping); +end: + return rc; +} + +static int cam_alloc_smmu_context_banks(struct device *dev) +{ + struct device_node *domains_child_node = NULL; + if (!dev) { + pr_err("Error: Invalid device\n"); + return -ENODEV; + } + + iommu_cb_set.cb_num = 0; + + /* traverse thru all the child nodes and increment the cb count */ + for_each_child_of_node(dev->of_node, domains_child_node) { + if (of_device_is_compatible(domains_child_node, + "qcom,msm-cam-smmu-cb")) + iommu_cb_set.cb_num++; + + if (of_device_is_compatible(domains_child_node, + "qcom,qsmmu-cam-cb")) + iommu_cb_set.cb_num++; + } + + if (iommu_cb_set.cb_num == 0) { + pr_err("Error: no context banks present\n"); + return -ENOENT; + } + + /* allocate memory for the context banks */ + iommu_cb_set.cb_info = devm_kzalloc(dev, + iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info), + GFP_KERNEL); + + if (!iommu_cb_set.cb_info) { + pr_err("Error: cannot allocate context banks\n"); + return -ENOMEM; + } + + cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT); + iommu_cb_set.cb_init_count = 0; + + CDBG("no of context banks :%d\n", iommu_cb_set.cb_num); + return 0; +} + +static int cam_populate_smmu_context_banks(struct device *dev, + enum cam_iommu_type type) +{ + int rc = 0; + struct cam_context_bank_info *cb; + struct device *ctx; + + if (!dev) { + pr_err("Error: Invalid device\n"); + return -ENODEV; + } + + /* check the bounds */ + if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) { + pr_err("Error: populate more than allocated cb\n"); + rc = -EBADHANDLE; + goto cb_init_fail; + } + + /* read the context bank from cb set */ + cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count]; + + /* set the name of the context bank */ + rc = of_property_read_string(dev->of_node, "label", &cb->name); + if (rc) { + pr_err("Error: failed to read label from sub device\n"); + goto cb_init_fail; + } + + /* Check if context bank supports scratch buffers */ + if (of_property_read_bool(dev->of_node, "qcom,scratch-buf-support")) + cb->scratch_buf_support = 1; + else + cb->scratch_buf_support = 0; + + /* set the secure/non secure domain type */ + if (of_property_read_bool(dev->of_node, "qcom,secure-context")) + cb->is_secure = CAM_SECURE; + else + cb->is_secure = CAM_NON_SECURE; + + CDBG("cb->name :%s, cb->is_secure :%d, cb->scratch_support :%d\n", + cb->name, cb->is_secure, cb->scratch_buf_support); + + /* set up the iommu mapping for the context bank */ + if (type == CAM_QSMMU) { + ctx = msm_iommu_get_ctx(cb->name); + if (IS_ERR_OR_NULL(ctx)) { + rc = PTR_ERR(ctx); + pr_err("Invalid pointer of ctx : %s rc = %d\n", + cb->name, rc); + return -EINVAL; + } + CDBG("getting QSMMU ctx : %s\n", cb->name); + } else { + ctx = dev; + CDBG("getting Arm SMMU ctx : %s\n", cb->name); + } + rc = cam_smmu_setup_cb(cb, ctx); + if (rc < 0) + pr_err("Error: failed to setup cb : %s\n", cb->name); + + iommu_set_fault_handler(cb->mapping->domain, + cam_smmu_iommu_fault_handler, + (void *)cb->name); + + /* increment count to next bank */ + iommu_cb_set.cb_init_count++; + + CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count); + return rc; + +cb_init_fail: + iommu_cb_set.cb_info = NULL; + return rc; +} + +static int cam_smmu_probe(struct platform_device *pdev) +{ + int rc = 0; + struct device *dev = &pdev->dev; + + if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) { + rc = cam_alloc_smmu_context_banks(dev); + if (rc < 0) { + pr_err("Error: allocating context banks\n"); + return -ENOMEM; + } + } + if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) { + rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU); + if (rc < 0) { + pr_err("Error: populating context banks\n"); + return -ENOMEM; + } + return rc; + } + if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) { + rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU); + if (rc < 0) { + pr_err("Error: populating context banks\n"); + return -ENOMEM; + } + return rc; + } + + /* probe thru all the subdevices */ + rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match, + NULL, &pdev->dev); + if (rc < 0) + pr_err("Error: populating devices\n"); + + INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work); + mutex_init(&iommu_cb_set.payload_list_lock); + INIT_LIST_HEAD(&iommu_cb_set.payload_list); + + return rc; +} + +static int cam_smmu_remove(struct platform_device *pdev) +{ + /* release all the context banks and memory allocated */ + cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu")) + cam_smmu_release_cb(pdev); + return 0; +} + +static struct platform_driver cam_smmu_driver = { + .probe = cam_smmu_probe, + .remove = cam_smmu_remove, + .driver = { + .name = "msm_cam_smmu", + .owner = THIS_MODULE, + .of_match_table = msm_cam_smmu_dt_match, + }, +}; + +static int __init cam_smmu_init_module(void) +{ + return platform_driver_register(&cam_smmu_driver); +} + +static void __exit cam_smmu_exit_module(void) +{ + platform_driver_unregister(&cam_smmu_driver); +} + +module_init(cam_smmu_init_module); +module_exit(cam_smmu_exit_module); +MODULE_DESCRIPTION("MSM Camera SMMU driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/camera/common/cam_smmu_api.h b/camera/common/cam_smmu_api.h new file mode 100644 index 00000000..59d08598 --- /dev/null +++ b/camera/common/cam_smmu_api.h @@ -0,0 +1,166 @@ +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _CAM_SMMU_API_H_ +#define _CAM_SMMU_API_H_ + +#include <linux/dma-direction.h> +#include <linux/module.h> +#include <linux/dma-buf.h> +#include <asm/dma-iommu.h> +#include <linux/dma-direction.h> +#include <linux/dma-attrs.h> +#include <linux/of_platform.h> +#include <linux/iommu.h> +#include <linux/random.h> +#include <linux/spinlock_types.h> +#include <linux/mutex.h> + +/* + * Enum for possible CAM SMMU operations + */ + +enum cam_smmu_ops_param { + CAM_SMMU_ATTACH, + CAM_SMMU_DETACH, + CAM_SMMU_VOTE, + CAM_SMMU_DEVOTE, + CAM_SMMU_OPS_INVALID +}; + +enum cam_smmu_map_dir { + CAM_SMMU_MAP_READ, + CAM_SMMU_MAP_WRITE, + CAM_SMMU_MAP_RW, + CAM_SMMU_MAP_INVALID +}; + +/** + * @param identifier: Unique identifier to be used by clients which they + * should get from device tree. CAM SMMU driver will + * not enforce how this string is obtained and will + * only validate this against the list of permitted + * identifiers + * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will + * fill the handle pointed by handle_ptr + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_get_handle(char *identifier, int *handle_ptr); + +/** + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH + * or CAM_SMMU_DETACH + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_ops(int handle, enum cam_smmu_ops_param op); + +/** + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param ion_fd: ION handle identifying the memory buffer. + * @phys_addr : Pointer to physical address where mapped address will be + * returned. + * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL, + * DMA_TO_DEVICE or DMA_FROM_DEVICE + * @len : Length of buffer mapped returned by CAM SMMU driver. + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_get_phy_addr(int handle, + int ion_fd, enum cam_smmu_map_dir dir, + dma_addr_t *dma_addr, size_t *len_ptr); + +/** + * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.) + * @param ion_fd: ION handle identifying the memory buffer. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_put_phy_addr(int handle, int ion_fd); + +/** + * @brief : Allocates a scratch buffer + * + * This function allocates a scratch virtual buffer of length virt_len in the + * device virtual address space mapped to phys_len physically contiguous bytes + * in that device's SMMU. + * + * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each + * other, otherwise -EINVAL is returned. + * + * -EINVAL will be returned if virt_len is less than phys_len. + * + * Passing a too large phys_len might also cause failure if that much size is + * not available for allocation in a physically contiguous way. + * + * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.) + * @param dir : Direction of mapping which will translate to IOMMU_READ + * IOMMU_WRITE or a bit mask of both. + * @param paddr_ptr: Device virtual address that the client device will be + * able to read from/write to + * @param virt_len : Virtual length of the scratch buffer + * @param phys_len : Physical length of the scratch buffer + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int cam_smmu_get_phy_addr_scratch(int handle, + enum cam_smmu_map_dir dir, + dma_addr_t *paddr_ptr, + size_t virt_len, + size_t phys_len); + +/** + * @brief : Frees a scratch buffer + * + * This function frees a scratch buffer and releases the corresponding SMMU + * mappings. + * + * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.) + * IOMMU_WRITE or a bit mask of both. + * @param paddr_ptr: Device virtual address of client's scratch buffer that + * will be freed. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int cam_smmu_put_phy_addr_scratch(int handle, + dma_addr_t paddr); + +/** + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int cam_smmu_destroy_handle(int handle); + +/** + * @return numger of client. Zero in case of error. + */ +int cam_smmu_get_num_of_clients(void); + +/** + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @return Index of SMMU client. Nagative in case of error. + */ +int cam_smmu_find_index_by_handle(int hdl); + +/** + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) + * @param client_page_fault_handler: It is triggered in IOMMU page fault + * @param token: It is input param when trigger page fault handler + */ +void cam_smmu_reg_client_page_fault_handler(int handle, + void (*client_page_fault_handler)(struct iommu_domain *, + struct device *, unsigned long, + int, void*), void *token); + +#endif /* _CAM_SMMU_API_H_ */ diff --git a/camera/common/cam_soc_api.c b/camera/common/cam_soc_api.c new file mode 100644 index 00000000..d699d091 --- /dev/null +++ b/camera/common/cam_soc_api.c @@ -0,0 +1,944 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "CAM-SOC %s:%d " fmt, __func__, __LINE__ +#define NO_SET_RATE -1 +#define INIT_RATE -2 + +#ifdef CONFIG_CAM_SOC_API_DBG +#define CDBG(fmt, args...) pr_err(fmt, ##args) +#else +#define CDBG(fmt, args...) pr_debug(fmt, ##args) +#endif + +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/msm-bus.h> +#include "cam_soc_api.h" + +struct msm_cam_bus_pscale_data { + struct msm_bus_scale_pdata *pdata; + uint32_t bus_client; + uint32_t num_usecases; + uint32_t num_paths; + unsigned int vector_index; + bool dyn_vote; + struct mutex lock; +}; + +struct msm_cam_bus_pscale_data g_cv[CAM_BUS_CLIENT_MAX]; + + +/* Get all clocks from DT */ +int msm_camera_get_clk_info(struct platform_device *pdev, + struct msm_cam_clk_info **clk_info, + struct clk ***clk_ptr, + size_t *num_clk) +{ + int rc = 0; + size_t cnt, tmp; + uint32_t *rates, i = 0; + const char *clk_ctl = NULL; + bool clock_cntl_support = false; + struct device_node *of_node; + + if (!pdev || !clk_info || !num_clk) + return -EINVAL; + + of_node = pdev->dev.of_node; + + cnt = of_property_count_strings(of_node, "clock-names"); + if (cnt <= 0) { + pr_err("err: No clocks found in DT=%zu\n", cnt); + return -EINVAL; + } + + tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates"); + if (tmp <= 0) { + pr_err("err: No clk rates device tree, count=%zu", tmp); + return -EINVAL; + } + + if (cnt != tmp) { + pr_err("err: clk name/rates mismatch, strings=%zu, rates=%zu\n", + cnt, tmp); + return -EINVAL; + } + + if (of_property_read_bool(of_node, "qcom,clock-cntl-support")) { + tmp = of_property_count_strings(of_node, + "qcom,clock-control"); + if (tmp <= 0) { + pr_err("err: control strings not found in DT count=%zu", + tmp); + return -EINVAL; + } + if (cnt != tmp) { + pr_err("err: controls mismatch, strings=%zu, ctl=%zu\n", + cnt, tmp); + return -EINVAL; + } + clock_cntl_support = true; + } + + *num_clk = cnt; + + *clk_info = devm_kcalloc(&pdev->dev, cnt, + sizeof(struct msm_cam_clk_info), GFP_KERNEL); + if (!*clk_info) + return -ENOMEM; + + *clk_ptr = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *), + GFP_KERNEL); + if (!*clk_ptr) { + rc = -ENOMEM; + goto err1; + } + + rates = devm_kcalloc(&pdev->dev, cnt, sizeof(long), GFP_KERNEL); + if (!rates) { + rc = -ENOMEM; + goto err2; + } + + rc = of_property_read_u32_array(of_node, "qcom,clock-rates", + rates, cnt); + if (rc < 0) { + pr_err("err: failed reading clock rates\n"); + rc = -EINVAL; + goto err3; + } + + for (i = 0; i < cnt; i++) { + rc = of_property_read_string_index(of_node, "clock-names", + i, &((*clk_info)[i].clk_name)); + if (rc < 0) { + pr_err("%s reading clock-name failed index %d\n", + __func__, i); + rc = -EINVAL; + goto err3; + } + + CDBG("dbg: clk-name[%d] = %s\n", i, (*clk_info)[i].clk_name); + if (clock_cntl_support) { + rc = of_property_read_string_index(of_node, + "qcom,clock-control", i, &clk_ctl); + if (rc < 0) { + pr_err("%s reading clock-control failed index %d\n", + __func__, i); + rc = -EINVAL; + goto err3; + } + + if (!strcmp(clk_ctl, "NO_SET_RATE")) + (*clk_info)[i].clk_rate = NO_SET_RATE; + else if (!strcmp(clk_ctl, "INIT_RATE")) + (*clk_info)[i].clk_rate = INIT_RATE; + else if (!strcmp(clk_ctl, "SET_RATE")) + (*clk_info)[i].clk_rate = rates[i]; + else { + pr_err("%s: error: clock control has invalid value\n", + __func__); + rc = -EBUSY; + goto err3; + } + } else + (*clk_info)[i].clk_rate = + (rates[i] == 0) ? (long)-1 : rates[i]; + + CDBG("dbg: clk-rate[%d] = rate: %ld\n", + i, (*clk_info)[i].clk_rate); + + (*clk_ptr)[i] = + devm_clk_get(&pdev->dev, (*clk_info)[i].clk_name); + if (IS_ERR((*clk_ptr)[i])) { + rc = PTR_ERR((*clk_ptr)[i]); + goto err4; + } + CDBG("clk ptr[%d] :%p\n", i, (*clk_ptr)[i]); + } + + devm_kfree(&pdev->dev, rates); + return rc; + +err4: + for (--i; i >= 0; i--) + devm_clk_put(&pdev->dev, (*clk_ptr)[i]); +err3: + devm_kfree(&pdev->dev, rates); +err2: + devm_kfree(&pdev->dev, *clk_ptr); +err1: + devm_kfree(&pdev->dev, *clk_info); + return rc; +} +EXPORT_SYMBOL(msm_camera_get_clk_info); + +/* Get all clocks and multiple rates from DT */ +int msm_camera_get_clk_info_and_rates( + struct platform_device *pdev, + struct msm_cam_clk_info **pclk_info, + struct clk ***pclks, + uint32_t ***pclk_rates, + size_t *num_set, + size_t *num_clk) +{ + int rc = 0, tmp_var, cnt, tmp; + uint32_t i = 0, j = 0; + struct device_node *of_node; + uint32_t **rates; + struct clk **clks; + struct msm_cam_clk_info *clk_info; + + if (!pdev || !pclk_info || !num_clk + || !pclk_rates || !pclks || !num_set) + return -EINVAL; + + of_node = pdev->dev.of_node; + + cnt = of_property_count_strings(of_node, "clock-names"); + if (cnt <= 0) { + pr_err("err: No clocks found in DT=%d\n", cnt); + return -EINVAL; + } + + tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates"); + if (tmp <= 0) { + pr_err("err: No clk rates device tree, count=%d\n", tmp); + return -EINVAL; + } + + if ((tmp % cnt) != 0) { + pr_err("err: clk name/rates mismatch, strings=%d, rates=%d\n", + cnt, tmp); + return -EINVAL; + } + + *num_clk = cnt; + *num_set = (tmp / cnt); + + clk_info = devm_kcalloc(&pdev->dev, cnt, + sizeof(struct msm_cam_clk_info), GFP_KERNEL); + if (!clk_info) + return -ENOMEM; + + clks = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *), + GFP_KERNEL); + if (!clks) { + rc = -ENOMEM; + goto err1; + } + + rates = devm_kcalloc(&pdev->dev, *num_set, + sizeof(uint32_t *), GFP_KERNEL); + if (!rates) { + rc = -ENOMEM; + goto err2; + } + + for (i = 0; i < *num_set; i++) { + rates[i] = devm_kcalloc(&pdev->dev, *num_clk, + sizeof(uint32_t), GFP_KERNEL); + if (!rates[i]) { + rc = -ENOMEM; + for (--i; i >= 0; i--) + devm_kfree(&pdev->dev, rates[i]); + goto err3; + } + } + + tmp_var = 0; + for (i = 0; i < *num_set; i++) { + for (j = 0; j < *num_clk; j++) { + rc = of_property_read_u32_index(of_node, + "qcom,clock-rates", tmp_var++, &rates[i][j]); + if (rc < 0) { + pr_err("err: failed reading clock rates\n"); + rc = -EINVAL; + goto err4; + } + CDBG("Clock rate idx %d idx %d value %d\n", + i, j, rates[i][j]); + } + } + for (i = 0; i < *num_clk; i++) { + rc = of_property_read_string_index(of_node, "clock-names", + i, &clk_info[i].clk_name); + if (rc < 0) { + pr_err("%s reading clock-name failed index %d\n", + __func__, i); + rc = -EINVAL; + goto err4; + } + + CDBG("dbg: clk-name[%d] = %s\n", i, clk_info[i].clk_name); + + clks[i] = + devm_clk_get(&pdev->dev, clk_info[i].clk_name); + if (IS_ERR(clks[i])) { + rc = PTR_ERR(clks[i]); + goto err5; + } + CDBG("clk ptr[%d] :%p\n", i, clks[i]); + } + *pclk_info = clk_info; + *pclks = clks; + *pclk_rates = rates; + + return rc; + +err5: + for (--i; i >= 0; i--) + devm_clk_put(&pdev->dev, clks[i]); +err4: + for (i = 0; i < *num_set; i++) + devm_kfree(&pdev->dev, rates[i]); +err3: + devm_kfree(&pdev->dev, rates); +err2: + devm_kfree(&pdev->dev, clks); +err1: + devm_kfree(&pdev->dev, clk_info); + return rc; +} +EXPORT_SYMBOL(msm_camera_get_clk_info_and_rates); + +/* Enable/Disable all clocks */ +int msm_camera_clk_enable(struct device *dev, + struct msm_cam_clk_info *clk_info, + struct clk **clk_ptr, int num_clk, int enable) +{ + int i; + int rc = 0; + long clk_rate; + + if (enable) { + for (i = 0; i < num_clk; i++) { + CDBG("enable %s\n", clk_info[i].clk_name); + if (clk_info[i].clk_rate > 0) { + clk_rate = clk_round_rate(clk_ptr[i], + clk_info[i].clk_rate); + if (clk_rate < 0) { + pr_err("%s round failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + rc = clk_set_rate(clk_ptr[i], + clk_rate); + if (rc < 0) { + pr_err("%s set failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + + } else if (clk_info[i].clk_rate == INIT_RATE) { + clk_rate = clk_get_rate(clk_ptr[i]); + if (clk_rate == 0) { + clk_rate = + clk_round_rate(clk_ptr[i], 0); + if (clk_rate < 0) { + pr_err("%s round rate failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + rc = clk_set_rate(clk_ptr[i], + clk_rate); + if (rc < 0) { + pr_err("%s set rate failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + } + } + rc = clk_prepare_enable(clk_ptr[i]); + if (rc < 0) { + pr_err("%s enable failed\n", + clk_info[i].clk_name); + goto cam_clk_enable_err; + } + if (clk_info[i].delay > 20) { + msleep(clk_info[i].delay); + } else if (clk_info[i].delay) { + usleep_range(clk_info[i].delay * 1000, + (clk_info[i].delay * 1000) + 1000); + } + } + } else { + for (i = num_clk - 1; i >= 0; i--) { + if (clk_ptr[i] != NULL) { + CDBG("%s disable %s\n", __func__, + clk_info[i].clk_name); + clk_disable_unprepare(clk_ptr[i]); + } + } + } + return rc; + +cam_clk_enable_err: +cam_clk_set_err: + for (i--; i >= 0; i--) { + if (clk_ptr[i] != NULL) + clk_disable_unprepare(clk_ptr[i]); + } + return rc; +} +EXPORT_SYMBOL(msm_camera_clk_enable); + +/* Set rate on a specific clock */ +long msm_camera_clk_set_rate(struct device *dev, + struct clk *clk, + long clk_rate) +{ + int rc = 0; + long rate = 0; + + if (!dev || !clk || (clk_rate < 0)) + return -EINVAL; + + CDBG("clk : %p, enable : %ld\n", clk, clk_rate); + + if (clk_rate > 0) { + rate = clk_round_rate(clk, clk_rate); + if (rate < 0) { + pr_err("round rate failed\n"); + return -EINVAL; + } + + rc = clk_set_rate(clk, rate); + if (rc < 0) { + pr_err("set rate failed\n"); + return -EINVAL; + } + } + + return rate; +} +EXPORT_SYMBOL(msm_camera_clk_set_rate); + +/* release memory allocated for clocks */ +int msm_camera_put_clk_info(struct platform_device *pdev, + struct msm_cam_clk_info **clk_info, + struct clk ***clk_ptr, int cnt) +{ + int i; + + for (i = cnt - 1; i >= 0; i--) { + if (clk_ptr[i] != NULL) + devm_clk_put(&pdev->dev, (*clk_ptr)[i]); + + CDBG("clk ptr[%d] :%p\n", i, (*clk_ptr)[i]); + } + devm_kfree(&pdev->dev, *clk_info); + devm_kfree(&pdev->dev, *clk_ptr); + *clk_info = NULL; + *clk_ptr = NULL; + return 0; +} +EXPORT_SYMBOL(msm_camera_put_clk_info); + +int msm_camera_put_clk_info_and_rates(struct platform_device *pdev, + struct msm_cam_clk_info **clk_info, + struct clk ***clk_ptr, uint32_t ***clk_rates, + size_t set, size_t cnt) +{ + int i; + + for (i = set - 1; i >= 0; i--) + devm_kfree(&pdev->dev, (*clk_rates)[i]); + + devm_kfree(&pdev->dev, *clk_rates); + for (i = cnt - 1; i >= 0; i--) { + if (clk_ptr[i] != NULL) + devm_clk_put(&pdev->dev, (*clk_ptr)[i]); + CDBG("clk ptr[%d] :%p\n", i, (*clk_ptr)[i]); + } + devm_kfree(&pdev->dev, *clk_info); + devm_kfree(&pdev->dev, *clk_ptr); + *clk_info = NULL; + *clk_ptr = NULL; + *clk_rates = NULL; + return 0; +} +EXPORT_SYMBOL(msm_camera_put_clk_info_and_rates); + +/* Get regulators from DT */ +int msm_camera_get_regulator_info(struct platform_device *pdev, + struct regulator ***vdd, + int *num_reg) +{ + uint32_t cnt; + int i, rc; + struct device_node *of_node; + const char *name; + char prop_name[32]; + + if (!pdev || !vdd || !num_reg) + return -EINVAL; + + of_node = pdev->dev.of_node; + + if (!of_get_property(of_node, "qcom,vdd-names", NULL)) { + pr_err("err: Regulators property not found\n"); + return -EINVAL; + } + + cnt = of_property_count_strings(of_node, "qcom,vdd-names"); + if (cnt <= 0) { + pr_err("err: no regulators found in device tree, count=%d", + cnt); + return -EINVAL; + } + + *num_reg = cnt; + (*vdd) = devm_kcalloc(&pdev->dev, cnt, sizeof(struct regulator *), + GFP_KERNEL); + if (!*vdd) + return -ENOMEM; + + for (i = 0; i < cnt; i++) { + rc = of_property_read_string_index(of_node, + "qcom,vdd-names", i, &name); + if (rc < 0) { + pr_err("Fail to fetch regulators: %d\n", i); + rc = -EINVAL; + goto err1; + } + + CDBG("regulator-names[%d] = %s\n", i, name); + + snprintf(prop_name, 32, "%s-supply", name); + + if (of_get_property(of_node, prop_name, NULL)) { + (*vdd)[i] = devm_regulator_get(&pdev->dev, name); + if (IS_ERR((*vdd)[i])) { + rc = -EINVAL; + pr_err("Fail to get regulator :%d\n", i); + goto err1; + } + } else { + pr_err("Regulator phandle not found :%s\n", name); + goto err1; + } + CDBG("vdd ptr[%d] :%p\n", i, (*vdd)[i]); + } + + return 0; + +err1: + for (--i; i >= 0; i--) + devm_regulator_put((*vdd)[i]); + devm_kfree(&pdev->dev, *vdd); + return rc; +} +EXPORT_SYMBOL(msm_camera_get_regulator_info); + + +/* Enable/Disable regulators */ +int msm_camera_regulator_enable(struct regulator **vdd, + int cnt, int enable) +{ + int i; + int rc; + + CDBG("cnt : %d, enable : %d\n", cnt, enable); + if (!vdd) { + pr_err("Invalid params"); + return -EINVAL; + } + + for (i = 0; i < cnt; i++) { + if (enable) { + rc = regulator_enable(vdd[i]); + if (rc < 0) { + pr_err("regulator enable failed %d\n", i); + goto error; + } + } else { + rc = regulator_disable(vdd[i]); + if (rc < 0) + pr_err("regulator disable failed %d\n", i); + } + } + + return 0; +error: + for (--i; i > 0; i--) { + if (!IS_ERR_OR_NULL(vdd[i])) + regulator_disable(vdd[i]); + } + return rc; +} +EXPORT_SYMBOL(msm_camera_regulator_enable); + +/* Put regulators regulators */ +void msm_camera_put_regulators(struct platform_device *pdev, + struct regulator ***vdd, + int cnt) +{ + int i; + + if (!*vdd) { + pr_err("Invalid params\n"); + return; + } + + for (i = cnt - 1; i >= 0; i--) { + if (!IS_ERR_OR_NULL((*vdd)[i])) + devm_regulator_put((*vdd)[i]); + CDBG("vdd ptr[%d] :%p\n", i, (*vdd)[i]); + } + + devm_kfree(&pdev->dev, *vdd); + *vdd = NULL; +} +EXPORT_SYMBOL(msm_camera_put_regulators); + +struct resource *msm_camera_get_irq(struct platform_device *pdev, + char *irq_name) +{ + if (!pdev || !irq_name) { + pr_err("Invalid params\n"); + return NULL; + } + + CDBG("Get irq for %s\n", irq_name); + return platform_get_resource_byname(pdev, IORESOURCE_IRQ, irq_name); +} +EXPORT_SYMBOL(msm_camera_get_irq); + +int msm_camera_register_irq(struct platform_device *pdev, + struct resource *irq, irq_handler_t handler, + unsigned long irqflags, char *irq_name, void *dev_id) +{ + int rc = 0; + + if (!pdev || !irq || !handler || !irq_name || !dev_id) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + rc = devm_request_irq(&pdev->dev, irq->start, handler, + irqflags, irq_name, dev_id); + if (rc < 0) { + pr_err("irq request fail\n"); + rc = -EINVAL; + } + + CDBG("Registered irq for %s[resource - %p]\n", irq_name, irq); + + return rc; +} +EXPORT_SYMBOL(msm_camera_register_irq); + +int msm_camera_register_threaded_irq(struct platform_device *pdev, + struct resource *irq, irq_handler_t handler_fn, + irq_handler_t thread_fn, unsigned long irqflags, + char *irq_name, void *dev_id) +{ + int rc = 0; + + if (!pdev || !irq || !handler_fn || !thread_fn || + !irq_name || !dev_id) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + rc = devm_request_threaded_irq(&pdev->dev, irq->start, handler_fn, + thread_fn, irqflags, irq_name, dev_id); + if (rc < 0) { + pr_err("irq request fail\n"); + rc = -EINVAL; + } + + CDBG("Registered irq for %s[resource - %p]\n", irq_name, irq); + + return rc; +} +EXPORT_SYMBOL(msm_camera_register_threaded_irq); + +int msm_camera_enable_irq(struct resource *irq, int enable) +{ + if (!irq) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + CDBG("irq Enable %d\n", enable); + if (enable) + enable_irq(irq->start); + else + disable_irq(irq->start); + + return 0; +} +EXPORT_SYMBOL(msm_camera_enable_irq); + +int msm_camera_unregister_irq(struct platform_device *pdev, + struct resource *irq, void *dev_id) +{ + + if (!pdev || !irq || !dev_id) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + CDBG("Un Registering irq for [resource - %p]\n", irq); + devm_free_irq(&pdev->dev, irq->start, dev_id); + + return 0; +} +EXPORT_SYMBOL(msm_camera_unregister_irq); + +void __iomem *msm_camera_get_reg_base(struct platform_device *pdev, + char *device_name, int reserve_mem) +{ + struct resource *mem; + void *base; + + if (!pdev || !device_name) { + pr_err("Invalid params\n"); + return NULL; + } + + CDBG("device name :%s\n", device_name); + mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, device_name); + if (!mem) { + pr_err("err: mem resource %s not found\n", device_name); + return NULL; + } + + if (reserve_mem) { + CDBG("device:%p, mem : %p, size : %d\n", + &pdev->dev, mem, (int)resource_size(mem)); + if (!devm_request_mem_region(&pdev->dev, mem->start, + resource_size(mem), + device_name)) { + pr_err("err: no valid mem region for device:%s\n", + device_name); + return NULL; + } + } + + base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + if (!base) { + devm_release_mem_region(&pdev->dev, mem->start, + resource_size(mem)); + pr_err("err: ioremap failed: %s\n", device_name); + return NULL; + } + + CDBG("base : %p\n", base); + return base; +} +EXPORT_SYMBOL(msm_camera_get_reg_base); + +uint32_t msm_camera_get_res_size(struct platform_device *pdev, + char *device_name) +{ + struct resource *mem; + + if (!pdev || !device_name) { + pr_err("Invalid params\n"); + return 0; + } + + CDBG("device name :%s\n", device_name); + mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, device_name); + if (!mem) { + pr_err("err: mem resource %s not found\n", device_name); + return 0; + } + return resource_size(mem); +} +EXPORT_SYMBOL(msm_camera_get_res_size); + + +int msm_camera_put_reg_base(struct platform_device *pdev, + void __iomem *base, char *device_name, int reserve_mem) +{ + struct resource *mem; + + if (!pdev || !base || !device_name) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + CDBG("device name :%s\n", device_name); + mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, device_name); + if (!mem) { + pr_err("err: mem resource %s not found\n", device_name); + return -EINVAL; + } + CDBG("mem : %p, size : %d\n", mem, (int)resource_size(mem)); + + devm_iounmap(&pdev->dev, base); + if (reserve_mem) + devm_release_mem_region(&pdev->dev, + mem->start, resource_size(mem)); + + return 0; +} +EXPORT_SYMBOL(msm_camera_put_reg_base); + +/* Register the bus client */ +uint32_t msm_camera_register_bus_client(struct platform_device *pdev, + enum cam_bus_client id) +{ + int rc = 0; + uint32_t bus_client, num_usecases, num_paths; + struct msm_bus_scale_pdata *pdata; + struct device_node *of_node; + + CDBG("Register client ID: %d\n", id); + + if (id >= CAM_BUS_CLIENT_MAX || !pdev) { + pr_err("Invalid params"); + return -EINVAL; + } + + of_node = pdev->dev.of_node; + + if (!g_cv[id].pdata) { + rc = of_property_read_u32(of_node, "qcom,msm-bus,num-cases", + &num_usecases); + if (rc) { + pr_err("num-usecases not found\n"); + return -EINVAL; + } + rc = of_property_read_u32(of_node, "qcom,msm-bus,num-paths", + &num_paths); + if (rc) { + pr_err("num-usecases not found\n"); + return -EINVAL; + } + + if (num_paths != 1) { + pr_err("Exceeds number of paths\n"); + return -EINVAL; + } + + if (of_property_read_bool(of_node, + "qcom,msm-bus-vector-dyn-vote")) { + if (num_usecases != 2) { + pr_err("Excess or less vectors\n"); + return -EINVAL; + } + g_cv[id].dyn_vote = true; + } + + pdata = msm_bus_cl_get_pdata(pdev); + if (!pdata) { + pr_err("failed get_pdata client_id :%d\n", id); + return -EINVAL; + } + bus_client = msm_bus_scale_register_client(pdata); + if (!bus_client) { + pr_err("Unable to register bus client :%d\n", id); + return -EINVAL; + } + } else { + pr_err("vector already setup client_id : %d\n", id); + return -EINVAL; + } + + g_cv[id].pdata = pdata; + g_cv[id].bus_client = bus_client; + g_cv[id].vector_index = 0; + g_cv[id].num_usecases = num_usecases; + g_cv[id].num_paths = num_paths; + mutex_init(&g_cv[id].lock); + CDBG("Exit Client ID: %d\n", id); + return 0; +} +EXPORT_SYMBOL(msm_camera_register_bus_client); + +/* Update the bus bandwidth */ +uint32_t msm_camera_update_bus_bw(int id, uint64_t ab, uint64_t ib) +{ + struct msm_bus_paths *path; + struct msm_bus_scale_pdata *pdata; + int idx = 0; + + if (id >= CAM_BUS_CLIENT_MAX) { + pr_err("Invalid params"); + return -EINVAL; + } + if (g_cv[id].num_usecases != 2 || + g_cv[id].num_paths != 1 || + g_cv[id].dyn_vote != true) { + pr_err("dynamic update not allowed\n"); + return -EINVAL; + } + + mutex_lock(&g_cv[id].lock); + idx = g_cv[id].vector_index; + idx = 1 - idx; + g_cv[id].vector_index = idx; + mutex_unlock(&g_cv[id].lock); + + pdata = g_cv[id].pdata; + path = &(pdata->usecase[idx]); + path->vectors[0].ab = ab; + path->vectors[0].ib = ib; + + CDBG("Register client ID : %d [ab : %llx, ib : %llx], update :%d\n", + id, ab, ib, idx); + msm_bus_scale_client_update_request(g_cv[id].bus_client, idx); + + return 0; +} +EXPORT_SYMBOL(msm_camera_update_bus_bw); + +/* Update the bus vector */ +uint32_t msm_camera_update_bus_vector(enum cam_bus_client id, + int vector_index) +{ + if (id >= CAM_BUS_CLIENT_MAX || g_cv[id].dyn_vote == true) { + pr_err("Invalid params"); + return -EINVAL; + } + + if (vector_index < 0 || vector_index > g_cv[id].num_usecases) { + pr_err("Invalid params"); + return -EINVAL; + } + + CDBG("Register client ID : %d vector idx: %d,\n", id, vector_index); + msm_bus_scale_client_update_request(g_cv[id].bus_client, + vector_index); + + return 0; +} +EXPORT_SYMBOL(msm_camera_update_bus_vector); + +/* Unregister the bus client */ +uint32_t msm_camera_unregister_bus_client(enum cam_bus_client id) +{ + if (id >= CAM_BUS_CLIENT_MAX) { + pr_err("Invalid params"); + return -EINVAL; + } + + CDBG("UnRegister client ID: %d\n", id); + + mutex_destroy(&g_cv[id].lock); + msm_bus_scale_unregister_client(g_cv[id].bus_client); + msm_bus_cl_clear_pdata(g_cv[id].pdata); + memset(&g_cv[id], 0, sizeof(struct msm_cam_bus_pscale_data)); + + return 0; +} +EXPORT_SYMBOL(msm_camera_unregister_bus_client); diff --git a/camera/common/cam_soc_api.h b/camera/common/cam_soc_api.h new file mode 100644 index 00000000..80d9346e --- /dev/null +++ b/camera/common/cam_soc_api.h @@ -0,0 +1,386 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _CAM_SOC_API_H_ +#define _CAM_SOC_API_H_ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/spinlock_types.h> +#include <linux/mutex.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <lenovo_soc/qcom/camera2.h> + +enum cam_bus_client { + CAM_BUS_CLIENT_VFE, + CAM_BUS_CLIENT_CPP, + CAM_BUS_CLIENT_FD, + CAM_BUS_CLIENT_JPEG_ENC0, + CAM_BUS_CLIENT_JPEG_ENC1, + CAM_BUS_CLIENT_JPEG_DEC, + CAM_BUS_CLIENT_JPEG_DMA, + CAM_BUS_CLIENT_MAX +}; + +/** + * @brief : Gets clock information from dtsi + * + * This function extracts the clocks information for a specific + * platform device + * + * @param pdev : Platform device to get clocks information + * @param clk_info : Pointer to populate clock information array + * @param clk_ptr : Pointer to populate clock resource pointers + * @param num_clk: Pointer to populate the number of clocks + * extracted from dtsi + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_get_clk_info(struct platform_device *pdev, + struct msm_cam_clk_info **clk_info, + struct clk ***clk_ptr, + size_t *num_clk); +/** + * @brief : Gets clock information and rates from dtsi + * + * This function extracts the clocks information for a specific + * platform device + * + * @param pdev : Platform device to get clocks information + * @param clk_info : Pointer to populate clock information array + * @param clk_ptr : Pointer to populate clock resource pointers + * @param clk_rates : Pointer to populate clock rates + * @param num_set: Pointer to populate the number of sets of rates + * @param num_clk: Pointer to populate the number of clocks + * extracted from dtsi + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int msm_camera_get_clk_info_and_rates( + struct platform_device *pdev, + struct msm_cam_clk_info **clk_info, + struct clk ***clk_ptr, + uint32_t ***clk_rates, + size_t *num_set, + size_t *num_clk); + +/** + * @brief : Puts clock information + * + * This function releases the memory allocated for the clocks + * + * @param pdev : Pointer to platform device + * @param clk_info : Pointer to release the allocated memory + * @param clk_ptr : Pointer to release the clock resources + * @param cnt : Number of clk resources + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int msm_camera_put_clk_info(struct platform_device *pdev, + struct msm_cam_clk_info **clk_info, + struct clk ***clk_ptr, int cnt); +/** + * @brief : Puts clock information + * + * This function releases the memory allocated for the clocks + * + * @param pdev : Pointer to platform device + * @param clk_info : Pointer to release the allocated memory + * @param clk_ptr : Pointer to release the clock resources + * @param clk_ptr : Pointer to release the clock rates + * @param set : Number of sets of clock rates + * @param cnt : Number of clk resources + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_put_clk_info_and_rates(struct platform_device *pdev, + struct msm_cam_clk_info **clk_info, + struct clk ***clk_ptr, uint32_t ***clk_rates, + size_t set, size_t cnt); +/** + * @brief : Enable clocks + * + * This function enables the clocks for a specified device + * + * @param dev : Device to get clocks information + * @param clk_info : Pointer to populate clock information + * @param clk_ptr : Pointer to populate clock information + * @param num_clk: Pointer to populate the number of clocks + * extracted from dtsi + * @param enable : Flag to specify enable/disable + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_clk_enable(struct device *dev, + struct msm_cam_clk_info *clk_info, + struct clk **clk_ptr, + int num_clk, + int enable); +/** + * @brief : Set clock rate + * + * This function sets the rate for a specified clock and + * returns the rounded value + * + * @param dev : Device to get clocks information + * @param clk : Pointer to clock to set rate + * @param clk_rate : Rate to be set + * + * @return Status of operation. Negative in case of error. clk rate otherwise. + */ + +long msm_camera_clk_set_rate(struct device *dev, + struct clk *clk, + long clk_rate); +/** + * @brief : Gets regulator info + * + * This function extracts the regulator information for a specific + * platform device + * + * @param pdev : platform device to get regulator information + * @param vdd: Pointer to populate the regulator names + * @param num_reg: Pointer to populate the number of regulators + * extracted from dtsi + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ +int msm_camera_get_regulator_info(struct platform_device *pdev, + struct regulator ***vddd, int *num_reg); +/** + * @brief : Enable/Disable the regultors + * + * This function enables/disables the regulators for a specific + * platform device + * + * @param vdd: Pointer to list of regulators + * @param cnt: Number of regulators to enable/disable + * @param enable: Flags specifies either enable/disable + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_regulator_enable(struct regulator **vdd, + int cnt, int enable); + +/** + * @brief : Release the regulators + * + * This function releases the regulator resources. + * + * @param pdev: Pointer to platform device + * @param vdd: Pointer to list of regulators + * @param cnt: Number of regulators to release + */ + +void msm_camera_put_regulators(struct platform_device *pdev, + struct regulator ***vdd, + int cnt); +/** + * @brief : Get the IRQ resource + * + * This function gets the irq resource from dtsi for a specific + * platform device + * + * @param pdev : Platform device to get IRQ + * @param irq_name: Name of the IRQ resource to get from DTSI + * + * @return Pointer to resource if success else null + */ + +struct resource *msm_camera_get_irq(struct platform_device *pdev, + char *irq_name); +/** + * @brief : Register the IRQ + * + * This function registers the irq resource for specified hardware + * + * @param pdev : Platform device to register IRQ resource + * @param irq : IRQ resource + * @param handler : IRQ handler + * @param irqflags : IRQ flags + * @param irq_name: Name of the IRQ + * @param dev : Token of the device + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_register_irq(struct platform_device *pdev, + struct resource *irq, + irq_handler_t handler, + unsigned long irqflags, + char *irq_name, + void *dev); + +/** + * @brief : Register the threaded IRQ + * + * This function registers the irq resource for specified hardware + * + * @param pdev : Platform device to register IRQ resource + * @param irq : IRQ resource + * @param handler_fn : IRQ handler function + * @param thread_fn : thread handler function + * @param irqflags : IRQ flags + * @param irq_name: Name of the IRQ + * @param dev : Token of the device + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_register_threaded_irq(struct platform_device *pdev, + struct resource *irq, + irq_handler_t handler_fn, + irq_handler_t thread_fn, + unsigned long irqflags, + char *irq_name, + void *dev); + +/** + * @brief : Enable/Disable the IRQ + * + * This function enables or disables a specific IRQ + * + * @param irq : IRQ resource + * @param flag : flag to enable/disable + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_enable_irq(struct resource *irq, int flag); + +/** + * @brief : UnRegister the IRQ + * + * This function Unregisters/Frees the irq resource + * + * @param pdev : Pointer to platform device + * @param irq : IRQ resource + * @param dev : Token of the device + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_unregister_irq(struct platform_device *pdev, + struct resource *irq, void *dev_id); + +/** + * @brief : Gets device register base + * + * This function extracts the device's register base from the dtsi + * for the specified platform device + * + * @param pdev : Platform device to get regulator infor + * @param device_name : Name of the device to fetch the register base + * @param reserve_mem : Flag to decide whether to reserve memory + * region or not. + * + * @return Pointer to resource if success else null + */ + +void __iomem *msm_camera_get_reg_base(struct platform_device *pdev, + char *device_name, int reserve_mem); + +/** + * @brief : Puts device register base + * + * This function releases the memory region for the specified + * resource + * + * @param pdev : Pointer to platform device + * @param base : Pointer to base to unmap + * @param device_name : Device name + * @param reserve_mem : Flag to decide whether to release memory + * region or not. + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base, + char *device_name, int reserve_mem); + +/** + * @brief : Register the bus client + * + * This function registers the bus client + * + * @param pdev : Pointer to platform device + * @param id : client identifier + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +uint32_t msm_camera_register_bus_client(struct platform_device *pdev, + enum cam_bus_client id); + +/** + * @brief : Update bus vector + * + * This function votes for the specified vector to the bus + * + * @param id : client identifier + * @param vector_index : vector index to register + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +uint32_t msm_camera_update_bus_vector(enum cam_bus_client id, + int vector_index); + +/** + * @brief : Update the bus bandwidth + * + * This function updates the bandwidth for the specific client + * + * @param client_id : client identifier + * @param ab : Asolute bandwidth + * @param ib : Instantaneous bandwidth + * + * @return non-zero as client id if success else fail + */ + +uint32_t msm_camera_update_bus_bw(int id, uint64_t ab, uint64_t ib); + +/** + * @brief : UnRegister the bus client + * + * This function unregisters the bus client + * + * @param id : client identifier + * + * @return Status of operation. Negative in case of error. Zero otherwise. + */ + +uint32_t msm_camera_unregister_bus_client(enum cam_bus_client id); + +/** + * @brief : Gets resource size + * + * This function returns the size of the resource for the + * specified platform device + * + * @param pdev : Platform device to get regulator infor + * @param device_name : Name of the device to fetch the register base + * + * @return size of the resource + */ + +uint32_t msm_camera_get_res_size(struct platform_device *pdev, + char *device_name); + +#endif diff --git a/camera/common/msm_camera_io_util.c b/camera/common/msm_camera_io_util.c new file mode 100644 index 00000000..f978f97d --- /dev/null +++ b/camera/common/msm_camera_io_util.c @@ -0,0 +1,843 @@ +/* Copyright (c) 2011-2014, The Linux Foundataion. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/gpio.h> +#include <linux/regulator/consumer.h> +#include <linux/io.h> +#include <linux/err.h> +#include <soc/qcom/camera2.h> +#include <linux/msm-bus.h> +#include "msm_camera_io_util.h" + +#define BUFF_SIZE_128 128 + +#undef CDBG +#define CDBG(fmt, args...) pr_debug(fmt, ##args) + +void msm_camera_io_w(u32 data, void __iomem *addr) +{ + CDBG("%s: 0x%p %08x\n", __func__, (addr), (data)); + writel_relaxed((data), (addr)); +} + +/* This API is to write a block of data +* to same address +*/ +int32_t msm_camera_io_w_block(const u32 *addr, void __iomem *base, + u32 len) +{ + int i; + + if (!addr || !len || !base) + return -EINVAL; + + for (i = 0; i < len; i++) { + CDBG("%s: len =%d val=%x base =%p\n", __func__, + len, addr[i], base); + writel_relaxed(addr[i], base); + } + return 0; +} + +/* This API is to write a block of registers +* which is like a 2 dimensional array table with +* register offset and data */ +int32_t msm_camera_io_w_reg_block(const u32 *addr, void __iomem *base, + u32 len) +{ + int i; + + if (!addr || !len || !base) + return -EINVAL; + + for (i = 0; i < len; i = i + 2) { + CDBG("%s: len =%d val=%x base =%p reg=%x\n", __func__, + len, addr[i + 1], base, addr[i]); + writel_relaxed(addr[i + 1], base + addr[i]); + } + return 0; +} + +void msm_camera_io_w_mb(u32 data, void __iomem *addr) +{ + CDBG("%s: 0x%p %08x\n", __func__, (addr), (data)); + /* ensure write is done */ + wmb(); + writel_relaxed((data), (addr)); + /* ensure write is done */ + wmb(); +} + +int32_t msm_camera_io_w_mb_block(const u32 *addr, void __iomem *base, u32 len) +{ + int i; + + if (!addr || !len || !base) + return -EINVAL; + + for (i = 0; i < len; i++) { + /* ensure write is done */ + wmb(); + CDBG("%s: len =%d val=%x base =%p\n", __func__, + len, addr[i], base); + writel_relaxed(addr[i], base); + } + /* ensure last write is done */ + wmb(); + return 0; +} + +u32 msm_camera_io_r(void __iomem *addr) +{ + uint32_t data = readl_relaxed(addr); + + CDBG("%s: 0x%p %08x\n", __func__, (addr), (data)); + return data; +} + +u32 msm_camera_io_r_mb(void __iomem *addr) +{ + uint32_t data; + /* ensure read is done */ + rmb(); + data = readl_relaxed(addr); + /* ensure read is done */ + rmb(); + CDBG("%s: 0x%p %08x\n", __func__, (addr), (data)); + return data; +} + +void msm_camera_io_memcpy_toio(void __iomem *dest_addr, + void __iomem *src_addr, u32 len) +{ + int i; + u32 *d = (u32 *) dest_addr; + u32 *s = (u32 *) src_addr; + + for (i = 0; i < len; i++) + writel_relaxed(*s++, d++); +} + +int32_t msm_camera_io_poll_value(void __iomem *addr, u32 wait_data, u32 retry, + unsigned long min_usecs, unsigned long max_usecs) +{ + uint32_t tmp, cnt = 0; + int32_t rc = 0; + + if (!addr) + return -EINVAL; + + tmp = msm_camera_io_r(addr); + while ((tmp != wait_data) && (cnt++ < retry)) { + if (min_usecs > 0 && max_usecs > 0) + usleep_range(min_usecs, max_usecs); + tmp = msm_camera_io_r(addr); + } + if (cnt > retry) { + pr_debug("Poll failed by value\n"); + rc = -EINVAL; + } + return rc; +} + +int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data, + u32 bmask, u32 retry, unsigned long min_usecs, unsigned long max_usecs) +{ + uint32_t tmp, cnt = 0; + int32_t rc = 0; + + if (!addr) + return -EINVAL; + + tmp = msm_camera_io_r(addr); + while (((tmp & bmask) != wait_data) && (cnt++ < retry)) { + if (min_usecs > 0 && max_usecs > 0) + usleep_range(min_usecs, max_usecs); + tmp = msm_camera_io_r(addr); + } + if (cnt > retry) { + pr_debug("Poll failed with mask\n"); + rc = -EINVAL; + } + return rc; +} + +void msm_camera_io_dump(void __iomem *addr, int size, int enable) +{ + char line_str[128], *p_str; + int i; + u32 *p = (u32 *) addr; + u32 data; + + CDBG("%s: addr=%p size=%d\n", __func__, addr, size); + + if (!p || (size <= 0) || !enable) + return; + + line_str[0] = '\0'; + p_str = line_str; + for (i = 0; i < size/4; i++) { + if (i % 4 == 0) { +#ifdef CONFIG_COMPAT + snprintf(p_str, 20, "%016lx: ", (unsigned long) p); + p_str += 18; +#else + snprintf(p_str, 12, "%08lx: ", (unsigned long) p); + p_str += 10; +#endif + } + data = readl_relaxed(p++); + snprintf(p_str, 12, "%08x ", data); + p_str += 9; + if ((i + 1) % 4 == 0) { + pr_err("%s\n", line_str); + line_str[0] = '\0'; + p_str = line_str; + } + } + if (line_str[0] != '\0') + pr_err("%s\n", line_str); +} + +void msm_camera_io_dump_wstring_base(void __iomem *addr, + struct msm_cam_dump_string_info *dump_data, + int size) +{ + int i, u = sizeof(struct msm_cam_dump_string_info); + + pr_debug("%s: addr=%p data=%p size=%d u=%d, cnt=%d\n", __func__, + addr, dump_data, size, u, + (size/u)); + + if (!addr || (size <= 0) || !dump_data) { + pr_err("%s: addr=%p data=%p size=%d\n", __func__, + addr, dump_data, size); + return; + } + for (i = 0; i < (size / u); i++) + pr_debug("%s 0x%x\n", (dump_data + i)->print, + readl_relaxed((dump_data + i)->offset + addr)); +} + +void msm_camera_io_memcpy(void __iomem *dest_addr, + void __iomem *src_addr, u32 len) +{ + CDBG("%s: %p %p %d\n", __func__, dest_addr, src_addr, len); + msm_camera_io_memcpy_toio(dest_addr, src_addr, len / 4); +} + +void msm_camera_io_memcpy_mb(void __iomem *dest_addr, + void __iomem *src_addr, u32 len) +{ + int i; + u32 *d = (u32 *) dest_addr; + u32 *s = (u32 *) src_addr; + /* This is generic function called who needs to register + writes with memory barrier */ + wmb(); + for (i = 0; i < (len / 4); i++) { + msm_camera_io_w(*s++, d++); + /* ensure write is done after every iteration */ + wmb(); + } +} + +int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info, + struct msm_cam_clk_info *clk_src_info, int num_clk) +{ + int i; + int rc = 0; + struct clk *mux_clk = NULL; + struct clk *src_clk = NULL; + + for (i = 0; i < num_clk; i++) { + if (clk_src_info[i].clk_name) { + mux_clk = clk_get(dev, clk_info[i].clk_name); + if (IS_ERR(mux_clk)) { + pr_err("%s get failed\n", + clk_info[i].clk_name); + continue; + } + src_clk = clk_get(dev, clk_src_info[i].clk_name); + if (IS_ERR(src_clk)) { + pr_err("%s get failed\n", + clk_src_info[i].clk_name); + continue; + } + clk_set_parent(mux_clk, src_clk); + } + } + return rc; +} + +int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info, + struct clk **clk_ptr, int num_clk, int enable) +{ + int i; + int rc = 0; + long clk_rate; + + if (enable) { + for (i = 0; i < num_clk; i++) { + CDBG("%s enable %s\n", __func__, clk_info[i].clk_name); + clk_ptr[i] = clk_get(dev, clk_info[i].clk_name); + if (IS_ERR(clk_ptr[i])) { + pr_err("%s get failed\n", clk_info[i].clk_name); + rc = PTR_ERR(clk_ptr[i]); + goto cam_clk_get_err; + } + if (clk_info[i].clk_rate > 0) { + clk_rate = clk_round_rate(clk_ptr[i], + clk_info[i].clk_rate); + if (clk_rate < 0) { + pr_err("%s round failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + rc = clk_set_rate(clk_ptr[i], + clk_rate); + if (rc < 0) { + pr_err("%s set failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + + } else if (clk_info[i].clk_rate == INIT_RATE) { + clk_rate = clk_get_rate(clk_ptr[i]); + if (clk_rate == 0) { + clk_rate = + clk_round_rate(clk_ptr[i], 0); + if (clk_rate < 0) { + pr_err("%s round rate failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + rc = clk_set_rate(clk_ptr[i], + clk_rate); + if (rc < 0) { + pr_err("%s set rate failed\n", + clk_info[i].clk_name); + goto cam_clk_set_err; + } + } + } + rc = clk_prepare(clk_ptr[i]); + if (rc < 0) { + pr_err("%s prepare failed\n", + clk_info[i].clk_name); + goto cam_clk_prepare_err; + } + + rc = clk_enable(clk_ptr[i]); + if (rc < 0) { + pr_err("%s enable failed\n", + clk_info[i].clk_name); + goto cam_clk_enable_err; + } + if (clk_info[i].delay > 20) { + msleep(clk_info[i].delay); + } else if (clk_info[i].delay) { + usleep_range(clk_info[i].delay * 1000, + (clk_info[i].delay * 1000) + 1000); + } + } + } else { + for (i = num_clk - 1; i >= 0; i--) { + if (clk_ptr[i] != NULL) { + CDBG("%s disable %s\n", __func__, + clk_info[i].clk_name); + clk_disable(clk_ptr[i]); + clk_unprepare(clk_ptr[i]); + clk_put(clk_ptr[i]); + } + } + } + return rc; + + +cam_clk_enable_err: + clk_unprepare(clk_ptr[i]); +cam_clk_prepare_err: +cam_clk_set_err: + clk_put(clk_ptr[i]); +cam_clk_get_err: + for (i--; i >= 0; i--) { + if (clk_ptr[i] != NULL) { + clk_disable(clk_ptr[i]); + clk_unprepare(clk_ptr[i]); + clk_put(clk_ptr[i]); + } + } + return rc; +} + +int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg, + int num_vreg, enum msm_camera_vreg_name_t *vreg_seq, + int num_vreg_seq, struct regulator **reg_ptr, int config) +{ + int i = 0, j = 0; + int rc = 0; + struct camera_vreg_t *curr_vreg; + + if (num_vreg_seq > num_vreg) { + pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__); + return -EINVAL; + } + if (!num_vreg_seq) + num_vreg_seq = num_vreg; + + if (config) { + for (i = 0; i < num_vreg_seq; i++) { + if (vreg_seq) { + j = vreg_seq[i]; + if (j >= num_vreg) + continue; + } else + j = i; + curr_vreg = &cam_vreg[j]; + reg_ptr[j] = regulator_get(dev, + curr_vreg->reg_name); + if (IS_ERR(reg_ptr[j])) { + pr_err("%s: %s get failed\n", + __func__, + curr_vreg->reg_name); + reg_ptr[j] = NULL; + goto vreg_get_fail; + } + if (regulator_count_voltages(reg_ptr[j]) > 0) { + rc = regulator_set_voltage( + reg_ptr[j], + curr_vreg->min_voltage, + curr_vreg->max_voltage); + if (rc < 0) { + pr_err("%s: %s set voltage failed\n", + __func__, + curr_vreg->reg_name); + goto vreg_set_voltage_fail; + } + if (curr_vreg->op_mode >= 0) { + rc = regulator_set_optimum_mode( + reg_ptr[j], + curr_vreg->op_mode); + if (rc < 0) { + pr_err( + "%s:%s set optimum mode fail\n", + __func__, + curr_vreg->reg_name); + goto vreg_set_opt_mode_fail; + } + } + } + } + } else { + for (i = num_vreg_seq-1; i >= 0; i--) { + if (vreg_seq) { + j = vreg_seq[i]; + if (j >= num_vreg) + continue; + } else + j = i; + curr_vreg = &cam_vreg[j]; + if (reg_ptr[j]) { + if (regulator_count_voltages(reg_ptr[j]) > 0) { + if (curr_vreg->op_mode >= 0) { + regulator_set_optimum_mode( + reg_ptr[j], 0); + } + regulator_set_voltage( + reg_ptr[j], 0, curr_vreg-> + max_voltage); + } + regulator_put(reg_ptr[j]); + reg_ptr[j] = NULL; + } + } + } + return 0; + +vreg_unconfig: +if (regulator_count_voltages(reg_ptr[j]) > 0) + regulator_set_optimum_mode(reg_ptr[j], 0); + +vreg_set_opt_mode_fail: +if (regulator_count_voltages(reg_ptr[j]) > 0) + regulator_set_voltage(reg_ptr[j], 0, + curr_vreg->max_voltage); + +vreg_set_voltage_fail: + regulator_put(reg_ptr[j]); + reg_ptr[j] = NULL; + +vreg_get_fail: + for (i--; i >= 0; i--) { + if (vreg_seq) { + j = vreg_seq[i]; + if (j >= num_vreg) + continue; + } else + j = i; + curr_vreg = &cam_vreg[j]; + goto vreg_unconfig; + } + return -ENODEV; +} + +int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg, + int num_vreg, enum msm_camera_vreg_name_t *vreg_seq, + int num_vreg_seq, struct regulator **reg_ptr, int enable) +{ + int i = 0, j = 0, rc = 0; + + if (num_vreg_seq > num_vreg) { + pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__); + return -EINVAL; + } + if (!num_vreg_seq) + num_vreg_seq = num_vreg; + + if (enable) { + for (i = 0; i < num_vreg_seq; i++) { + if (vreg_seq) { + j = vreg_seq[i]; + if (j >= num_vreg) + continue; + } else + j = i; + if (IS_ERR(reg_ptr[j])) { + pr_err("%s: %s null regulator\n", + __func__, cam_vreg[j].reg_name); + goto disable_vreg; + } + rc = regulator_enable(reg_ptr[j]); + if (rc < 0) { + pr_err("%s: %s enable failed\n", + __func__, cam_vreg[j].reg_name); + goto disable_vreg; + } + if (cam_vreg[j].delay > 20) + msleep(cam_vreg[j].delay); + else if (cam_vreg[j].delay) + usleep_range(cam_vreg[j].delay * 1000, + (cam_vreg[j].delay * 1000) + 1000); + } + } else { + for (i = num_vreg_seq-1; i >= 0; i--) { + if (vreg_seq) { + j = vreg_seq[i]; + if (j >= num_vreg) + continue; + } else + j = i; + regulator_disable(reg_ptr[j]); + if (cam_vreg[j].delay > 20) + msleep(cam_vreg[j].delay); + else if (cam_vreg[j].delay) + usleep_range(cam_vreg[j].delay * 1000, + (cam_vreg[j].delay * 1000) + 1000); + } + } + return rc; +disable_vreg: + for (i--; i >= 0; i--) { + if (vreg_seq) { + j = vreg_seq[i]; + if (j >= num_vreg) + continue; + } else + j = i; + regulator_disable(reg_ptr[j]); + if (cam_vreg[j].delay > 20) + msleep(cam_vreg[j].delay); + else if (cam_vreg[j].delay) + usleep_range(cam_vreg[j].delay * 1000, + (cam_vreg[j].delay * 1000) + 1000); + } + return rc; +} + +void msm_camera_bus_scale_cfg(uint32_t bus_perf_client, + enum msm_bus_perf_setting perf_setting) +{ + int rc = 0; + + if (!bus_perf_client) { + pr_err("%s: Bus Client NOT Registered!!!\n", __func__); + return; + } + + switch (perf_setting) { + case S_EXIT: + rc = msm_bus_scale_client_update_request(bus_perf_client, 1); + msm_bus_scale_unregister_client(bus_perf_client); + break; + case S_PREVIEW: + rc = msm_bus_scale_client_update_request(bus_perf_client, 1); + break; + case S_VIDEO: + rc = msm_bus_scale_client_update_request(bus_perf_client, 2); + break; + case S_CAPTURE: + rc = msm_bus_scale_client_update_request(bus_perf_client, 3); + break; + case S_ZSL: + rc = msm_bus_scale_client_update_request(bus_perf_client, 4); + break; + case S_LIVESHOT: + rc = msm_bus_scale_client_update_request(bus_perf_client, 5); + break; + case S_DEFAULT: + break; + default: + pr_err("%s: INVALID CASE\n", __func__); + } +} + +int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl, + uint8_t gpio_tbl_size, int gpio_en) +{ + int rc = 0, i; + + if (gpio_en) { + for (i = 0; i < gpio_tbl_size; i++) { + gpio_set_value_cansleep(gpio_tbl[i].gpio, + gpio_tbl[i].flags); + usleep_range(gpio_tbl[i].delay, + gpio_tbl[i].delay + 1000); + } + } else { + for (i = gpio_tbl_size - 1; i >= 0; i--) { + if (gpio_tbl[i].flags) + gpio_set_value_cansleep(gpio_tbl[i].gpio, + GPIOF_OUT_INIT_LOW); + } + } + return rc; +} + +int msm_camera_config_single_vreg(struct device *dev, + struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config) +{ + int rc = 0; + const char *vreg_name = NULL; + + if (!dev || !cam_vreg || !reg_ptr) { + pr_err("%s: get failed NULL parameter\n", __func__); + goto vreg_get_fail; + } + if (cam_vreg->type == VREG_TYPE_CUSTOM) { + if (cam_vreg->custom_vreg_name == NULL) { + pr_err("%s : can't find sub reg name", + __func__); + goto vreg_get_fail; + } + vreg_name = cam_vreg->custom_vreg_name; + } else { + if (cam_vreg->reg_name == NULL) { + pr_err("%s : can't find reg name", __func__); + goto vreg_get_fail; + } + vreg_name = cam_vreg->reg_name; + } + + if (config) { + CDBG("%s enable %s\n", __func__, vreg_name); + *reg_ptr = regulator_get(dev, vreg_name); + if (IS_ERR(*reg_ptr)) { + pr_err("%s: %s get failed\n", __func__, vreg_name); + *reg_ptr = NULL; + goto vreg_get_fail; + } + if (regulator_count_voltages(*reg_ptr) > 0) { + CDBG("%s: voltage min=%d, max=%d\n", + __func__, cam_vreg->min_voltage, + cam_vreg->max_voltage); + rc = regulator_set_voltage( + *reg_ptr, cam_vreg->min_voltage, + cam_vreg->max_voltage); + if (rc < 0) { + pr_err("%s: %s set voltage failed\n", + __func__, vreg_name); + goto vreg_set_voltage_fail; + } + if (cam_vreg->op_mode >= 0) { + rc = regulator_set_optimum_mode(*reg_ptr, + cam_vreg->op_mode); + if (rc < 0) { + pr_err( + "%s: %s set optimum mode failed\n", + __func__, vreg_name); + goto vreg_set_opt_mode_fail; + } + } + } + rc = regulator_enable(*reg_ptr); + if (rc < 0) { + pr_err("%s: %s regulator_enable failed\n", __func__, + vreg_name); + goto vreg_unconfig; + } + } else { + CDBG("%s disable %s\n", __func__, vreg_name); + if (*reg_ptr) { + CDBG("%s disable %s\n", __func__, vreg_name); + regulator_disable(*reg_ptr); + if (regulator_count_voltages(*reg_ptr) > 0) { + if (cam_vreg->op_mode >= 0) + regulator_set_optimum_mode(*reg_ptr, 0); + regulator_set_voltage( + *reg_ptr, 0, cam_vreg->max_voltage); + } + regulator_put(*reg_ptr); + *reg_ptr = NULL; + } else { + pr_err("%s can't disable %s\n", __func__, vreg_name); + } + } + return 0; + +vreg_unconfig: +if (regulator_count_voltages(*reg_ptr) > 0) + regulator_set_optimum_mode(*reg_ptr, 0); + +vreg_set_opt_mode_fail: +if (regulator_count_voltages(*reg_ptr) > 0) + regulator_set_voltage(*reg_ptr, 0, cam_vreg->max_voltage); + +vreg_set_voltage_fail: + regulator_put(*reg_ptr); + *reg_ptr = NULL; + +vreg_get_fail: + return -ENODEV; +} + +int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size, + int gpio_en) +{ + int rc = 0, i = 0, err = 0; + + if (!gpio_tbl || !size) { + pr_err("%s:%d invalid gpio_tbl %p / size %d\n", __func__, + __LINE__, gpio_tbl, size); + return -EINVAL; + } + for (i = 0; i < size; i++) { + CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i, + gpio_tbl[i].gpio, gpio_tbl[i].flags); + } + if (gpio_en) { + for (i = 0; i < size; i++) { + err = gpio_request_one(gpio_tbl[i].gpio, + gpio_tbl[i].flags, gpio_tbl[i].label); + if (err) { + /* + * After GPIO request fails, contine to + * apply new gpios, outout a error message + * for driver bringup debug + */ + pr_err("%s:%d gpio %d:%s request fails\n", + __func__, __LINE__, + gpio_tbl[i].gpio, gpio_tbl[i].label); + } + } + } else { + gpio_free_array(gpio_tbl, size); + } + return rc; +} + +/* + * msm_camera_get_dt_reg_settings - Get dt reg settings from device-tree. + * @of_node: Pointer to device of_node from dev. + * @dt_prop_name: String of the property to search in of_node from dev. + * @reg_s: Double pointer will be allocated by this function and filled. + * @size: Pointer to fill the length of the available entries. + */ +int msm_camera_get_dt_reg_settings(struct device_node *of_node, + const char *dt_prop_name, uint32_t **reg_s, + unsigned int *size) +{ + int ret; + unsigned int cnt; + + if (!of_node || !dt_prop_name || !size || !reg_s) { + pr_err("%s: Error invalid args %p:%p:%p:%p\n", + __func__, size, reg_s, of_node, dt_prop_name); + return -EINVAL; + } + if (!of_get_property(of_node, dt_prop_name, &cnt)) { + pr_debug("Missing dt reg settings for %s\n", dt_prop_name); + return -ENOENT; + } + + if (!cnt || (cnt % 8)) { + pr_err("%s: Error invalid number of entries cnt=%d\n", + __func__, cnt); + return -EINVAL; + } + cnt /= 4; + if (cnt != 0) { + *reg_s = kcalloc(cnt, sizeof(uint32_t), + GFP_KERNEL); + if (!*reg_s) + return -ENOMEM; + ret = of_property_read_u32_array(of_node, + dt_prop_name, + *reg_s, + cnt); + if (ret < 0) { + pr_err("%s: No dt reg info read for %s ret=%d\n", + __func__, dt_prop_name, ret); + kfree(*reg_s); + return -ENOENT; + } + *size = cnt; + } else { + pr_err("%s: Error invalid entries\n", __func__); + return -EINVAL; + } + + return ret; +} + +/* + * msm_camera_get_dt_reg_settings - Free dt reg settings memory. + * @reg_s: Double pointer will be allocated by this function and filled. + * @size: Pointer to set the length as invalid. + */ +void msm_camera_put_dt_reg_settings(uint32_t **reg_s, + unsigned int *size) +{ + kfree(*reg_s); + *reg_s = NULL; + *size = 0; +} + +int msm_camera_hw_write_dt_reg_settings(void __iomem *base, + uint32_t *reg_s, + unsigned int size) +{ + int32_t rc = 0; + + if (!reg_s || !base || !size) { + pr_err("%s: Error invalid args\n", __func__); + return -EINVAL; + } + rc = msm_camera_io_w_reg_block((const u32 *) reg_s, + base, size); + if (rc < 0) + pr_err("%s: Failed dt reg setting write\n", __func__); + return rc; +} + diff --git a/camera/common/msm_camera_io_util.h b/camera/common/msm_camera_io_util.h new file mode 100644 index 00000000..359e3996 --- /dev/null +++ b/camera/common/msm_camera_io_util.h @@ -0,0 +1,93 @@ +/* Copyright (c) 2011-2014, The Linux Foundataion. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_CAMERA_IO_UTIL_H +#define __MSM_CAMERA_IO_UTIL_H + +#include <linux/regulator/consumer.h> +#include <linux/gpio.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <lenovo_soc/qcom/camera2.h> +#include <lenovo_media/msm_cam_sensor.h> +#include <media/v4l2-ioctl.h> + +#define NO_SET_RATE -1 +#define INIT_RATE -2 + +struct msm_gpio_set_tbl { + unsigned gpio; + unsigned long flags; + uint32_t delay; +}; + +struct msm_cam_dump_string_info { + const char *print; + uint32_t offset; +}; + +void msm_camera_io_w(u32 data, void __iomem *addr); +void msm_camera_io_w_mb(u32 data, void __iomem *addr); +u32 msm_camera_io_r(void __iomem *addr); +u32 msm_camera_io_r_mb(void __iomem *addr); +void msm_camera_io_dump(void __iomem *addr, int size, int enable); +void msm_camera_io_memcpy(void __iomem *dest_addr, + void __iomem *src_addr, u32 len); +void msm_camera_io_memcpy_mb(void __iomem *dest_addr, + void __iomem *src_addr, u32 len); +int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info, + struct msm_cam_clk_info *clk_src_info, int num_clk); +int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info, + struct clk **clk_ptr, int num_clk, int enable); + +int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg, + int num_vreg, enum msm_camera_vreg_name_t *vreg_seq, + int num_vreg_seq, struct regulator **reg_ptr, int config); +int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg, + int num_vreg, enum msm_camera_vreg_name_t *vreg_seq, + int num_vreg_seq, struct regulator **reg_ptr, int enable); + +void msm_camera_bus_scale_cfg(uint32_t bus_perf_client, + enum msm_bus_perf_setting perf_setting); + +int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl, + uint8_t gpio_tbl_size, int gpio_en); + +void msm_camera_config_single_gpio(uint16_t gpio, unsigned long flags, + int gpio_en); + +int msm_camera_config_single_vreg(struct device *dev, + struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config); + +int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size, + int gpio_en); +void msm_camera_io_dump_wstring_base(void __iomem *addr, + struct msm_cam_dump_string_info *dump_data, + int size); +int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data, + u32 bmask, u32 retry, unsigned long min_usecs, + unsigned long max_usecs); +int32_t msm_camera_io_poll_value(void __iomem *addr, u32 wait_data, u32 retry, + unsigned long min_usecs, unsigned long max_usecs); +int32_t msm_camera_io_w_block(const u32 *addr, void __iomem *base, u32 len); +int32_t msm_camera_io_w_reg_block(const u32 *addr, void __iomem *base, u32 len); +int32_t msm_camera_io_w_mb_block(const u32 *addr, void __iomem *base, u32 len); +int msm_camera_get_dt_reg_settings(struct device_node *of_node, + const char *dt_prop_name, uint32_t **reg_s, + unsigned int *size); +void msm_camera_put_dt_reg_settings(uint32_t **reg_s, + unsigned int *size); +int msm_camera_hw_write_dt_reg_settings(void __iomem *base, + uint32_t *reg_s, + unsigned int size); +#endif |
