aboutsummaryrefslogtreecommitdiff
path: root/drivers/clk
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk')
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/clock-alpha-pll.c315
-rw-r--r--drivers/clk/qcom/clock-local2.c1353
-rw-r--r--drivers/clk/qcom/clock-pll.c624
-rw-r--r--drivers/clk/qcom/clock-rpm.c308
-rw-r--r--drivers/clk/qcom/clock-voter.c175
-rw-r--r--drivers/clk/qcom/clock.c10
-rw-r--r--drivers/clk/qcom/gdsc.c18
8 files changed, 2797 insertions, 13 deletions
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 474720bf434..0456415a72e 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -1,7 +1,14 @@
obj-y += clock.o
obj-y += clock-dummy.o
obj-y += clock-generic.o
+obj-y += clock-local2.o
+obj-y += clock-pll.o
+obj-y += clock-rpm.o
+obj-y += clock-voter.o
obj-$(CONFIG_DEBUG_FS) += clock-debug.o
+# MDM9630
+obj-$(CONFIG_ARCH_MDM9630) += clock-alpha-pll.o
+
obj-y += gdsc.o
diff --git a/drivers/clk/qcom/clock-alpha-pll.c b/drivers/clk/qcom/clock-alpha-pll.c
new file mode 100644
index 00000000000..1b52b446967
--- /dev/null
+++ b/drivers/clk/qcom/clock-alpha-pll.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include "clock.h"
+
+#define WAIT_MAX_LOOPS 100
+
+#define MODE_REG(pll) (*pll->base + pll->offset + 0x0)
+#define LOCK_REG(pll) (*pll->base + pll->offset + 0x0)
+#define UPDATE_REG(pll) (*pll->base + pll->offset + 0x0)
+#define L_REG(pll) (*pll->base + pll->offset + 0x4)
+#define A_REG(pll) (*pll->base + pll->offset + 0x8)
+#define VCO_REG(pll) (*pll->base + pll->offset + 0x10)
+#define ALPHA_EN_REG(pll) (*pll->base + pll->offset + 0x10)
+
+#define PLL_BYPASSNL 0x2
+#define PLL_RESET_N 0x4
+#define PLL_OUTCTRL 0x1
+
+/*
+ * Even though 40 bits are present, only the upper 16 bits are
+ * signfigant due to the natural randomness in the XO clock
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_BITWIDTH 16
+
+static unsigned long compute_rate(u64 parent_rate,
+ u32 l_val, u64 a_val)
+{
+ unsigned long rate;
+
+ /*
+ * assuming parent_rate < 2^25, we need a_val < 2^39 to avoid
+ * overflow when multipling below.
+ */
+ a_val = a_val >> 1;
+ rate = parent_rate * l_val;
+ rate += (unsigned long)((parent_rate * a_val) >>
+ (ALPHA_REG_BITWIDTH - 1));
+ return rate;
+}
+
+static bool is_locked(struct alpha_pll_clk *pll)
+{
+ u32 reg = readl_relaxed(LOCK_REG(pll));
+ u32 mask = pll->masks->lock_mask;
+ return (reg & mask) == mask;
+}
+
+static int alpha_pll_enable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ int count;
+ u32 mode;
+
+ mode = readl_relaxed(MODE_REG(pll));
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+ if (is_locked(pll))
+ break;
+ udelay(1);
+ }
+
+ if (!count) {
+ pr_err("%s didn't lock after enabling it!\n", c->dbg_name);
+ return -EINVAL;
+ }
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return 0;
+}
+
+static void alpha_pll_disable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ u32 mode;
+
+ mode = readl_relaxed(MODE_REG(pll));
+ mode &= ~PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mode &= ~(PLL_BYPASSNL | PLL_RESET_N);
+ writel_relaxed(mode, MODE_REG(pll));
+}
+
+static u32 find_vco(struct alpha_pll_clk *pll, unsigned long rate)
+{
+ unsigned long i;
+ struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+
+ for (i = 0; i < pll->num_vco; i++) {
+ if (rate >= v[i].min_freq && rate <= v[i].max_freq)
+ return v[i].vco_val;
+ }
+
+ return -EINVAL;
+}
+
+static unsigned long __calc_values(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val, bool round_up)
+{
+ u64 parent_rate;
+ u64 remainder;
+ u64 quotient;
+ unsigned long freq_hz;
+
+ parent_rate = clk_get_rate(pll->c.parent);
+ quotient = rate;
+ remainder = do_div(quotient, parent_rate);
+ *l_val = quotient;
+
+ if (!remainder) {
+ *a_val = 0;
+ return rate;
+ }
+
+ /* Upper 16 bits of Alpha */
+ quotient = remainder << ALPHA_BITWIDTH;
+ remainder = do_div(quotient, parent_rate);
+
+ if (remainder && round_up)
+ quotient++;
+
+ /* Convert to 40 bit format */
+ *a_val = quotient << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+ freq_hz = compute_rate(parent_rate, *l_val, *a_val);
+ return freq_hz;
+}
+
+static unsigned long round_rate_down(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val)
+{
+ return __calc_values(pll, rate, l_val, a_val, false);
+}
+
+static unsigned long round_rate_up(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val)
+{
+ return __calc_values(pll, rate, l_val, a_val, true);
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ unsigned long flags, freq_hz;
+ u32 a_upper, a_lower, regval, l_val, vco_val;
+ u64 a_val;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ if (freq_hz != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ vco_val = find_vco(pll, freq_hz);
+ if (IS_ERR_VALUE(vco_val)) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure PLL is off before changing rate. For optimization reasons,
+ * assume no downstream clock is actively using it. No support
+ * for dynamic update at the moment.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->count)
+ alpha_pll_disable(c);
+
+ a_upper = (a_val >> 32) & 0xFF;
+ a_lower = (a_val & 0xFFFFFFFF);
+
+ writel_relaxed(l_val, L_REG(pll));
+ writel_relaxed(a_lower, A_REG(pll));
+ writel_relaxed(a_upper, A_REG(pll) + 0x4);
+
+ regval = readl_relaxed(VCO_REG(pll));
+ regval &= ~(masks->vco_mask << masks->vco_shift);
+ regval |= vco_val << masks->vco_shift;
+ writel_relaxed(regval, VCO_REG(pll));
+
+ regval = readl_relaxed(ALPHA_EN_REG(pll));
+ regval |= masks->alpha_en_mask;
+ writel_relaxed(regval, ALPHA_EN_REG(pll));
+
+ if (c->count)
+ alpha_pll_enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+static long alpha_pll_round_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+ u32 ret, l_val;
+ unsigned long freq_hz;
+ u64 a_val;
+ int i;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ ret = find_vco(pll, freq_hz);
+ if (!IS_ERR_VALUE(ret))
+ return freq_hz;
+
+ freq_hz = 0;
+ for (i = 0; i < pll->num_vco; i++) {
+ if (is_better_rate(rate, freq_hz, v[i].min_freq))
+ freq_hz = v[i].min_freq;
+ if (is_better_rate(rate, freq_hz, v[i].max_freq))
+ freq_hz = v[i].max_freq;
+ }
+ if (!freq_hz)
+ return -EINVAL;
+ return freq_hz;
+}
+
+static void update_vco_tbl(struct alpha_pll_clk *pll)
+{
+ int i, l_val;
+ u64 a_val;
+ unsigned long hz;
+
+ /* Round vco limits to valid rates */
+ for (i = 0; i < pll->num_vco; i++) {
+ hz = round_rate_up(pll, pll->vco_tbl[i].min_freq, &l_val,
+ &a_val);
+ pll->vco_tbl[i].min_freq = hz;
+
+ hz = round_rate_down(pll, pll->vco_tbl[i].max_freq, &l_val,
+ &a_val);
+ pll->vco_tbl[i].max_freq = hz;
+ }
+}
+
+static enum handoff alpha_pll_handoff(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ u64 parent_rate, a_val;
+ u32 alpha_en, l_val;
+
+ update_vco_tbl(pll);
+
+ if (!is_locked(pll))
+ return HANDOFF_DISABLED_CLK;
+
+ alpha_en = readl_relaxed(ALPHA_EN_REG(pll));
+ alpha_en &= masks->alpha_en_mask;
+
+ l_val = readl_relaxed(L_REG(pll));
+ a_val = readl_relaxed(A_REG(pll));
+ a_val |= ((u64)readl_relaxed(A_REG(pll) + 0x4)) << 32;
+
+ if (!alpha_en)
+ a_val = 0;
+
+ parent_rate = clk_get_rate(c->parent);
+ c->rate = compute_rate(parent_rate, l_val, a_val);
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_alpha_pll = {
+ .enable = alpha_pll_enable,
+ .disable = alpha_pll_disable,
+ .round_rate = alpha_pll_round_rate,
+ .set_rate = alpha_pll_set_rate,
+ .handoff = alpha_pll_handoff,
+};
+
+
diff --git a/drivers/clk/qcom/clock-local2.c b/drivers/clk/qcom/clock-local2.c
new file mode 100644
index 00000000000..76bca85d7c2
--- /dev/null
+++ b/drivers/clk/qcom/clock-local2.c
@@ -0,0 +1,1353 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/clock-local2.h>
+
+/*
+ * When enabling/disabling a clock, check the halt bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define HALT_CHECK_MAX_LOOPS 500
+/* For clock without halt checking, wait this long after enables/disables. */
+#define HALT_CHECK_DELAY_US 10
+
+/*
+ * When updating an RCG configuration, check the update bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define UPDATE_CHECK_MAX_LOOPS 500
+
+DEFINE_SPINLOCK(local_clock_reg_lock);
+struct clk_freq_tbl rcg_dummy_freq = F_END;
+
+#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
+#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
+#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
+#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
+#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
+#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
+#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
+#define RST_REG(x) (*(x)->base + (x)->reset_reg)
+#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
+#define GATE_EN_REG(x) (*(x)->base + (x)->en_reg)
+
+/*
+ * Important clock bit positions and masks
+ */
+#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
+#define CBCR_BRANCH_ENABLE_BIT BIT(0)
+#define CBCR_BRANCH_OFF_BIT BIT(31)
+#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
+#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
+#define BCR_BLK_ARES_BIT BIT(0)
+#define CBCR_HW_CTL_BIT BIT(1)
+#define CFG_RCGR_DIV_MASK BM(4, 0)
+#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
+#define MND_MODE_MASK BM(13, 12)
+#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
+#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
+#define CBCR_CDIV_LSB 16
+#define CBCR_CDIV_MSB 19
+
+enum branch_state {
+ BRANCH_ON,
+ BRANCH_OFF,
+};
+
+/*
+ * RCG functions
+ */
+
+/*
+ * Update an RCG with a new configuration. This may include a new M, N, or D
+ * value, source selection or pre-divider value.
+ *
+ */
+static void rcg_update_config(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval, count;
+
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+ writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+
+ /* Wait for update to take effect */
+ for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+ if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+ CMD_RCGR_CONFIG_UPDATE_BIT))
+ return;
+ udelay(1);
+ }
+
+ CLK_WARN(&rcg->c, count == 0, "rcg didn't update its configuration.");
+}
+
+/* RCG set rate function for clocks with Half Integer Dividers. */
+void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ u32 cfg_regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+ cfg_regval |= nf->div_src_val;
+ writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+ rcg_update_config(rcg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+/* RCG set rate function for clocks with MND & Half Integer Dividers. */
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ u32 cfg_regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ writel_relaxed(nf->m_val, M_REG(rcg));
+ writel_relaxed(nf->n_val, N_REG(rcg));
+ writel_relaxed(nf->d_val, D_REG(rcg));
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+ cfg_regval |= nf->div_src_val;
+
+ /* Activate or disable the M/N:D divider as necessary */
+ cfg_regval &= ~MND_MODE_MASK;
+ if (nf->n_val != 0)
+ cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
+ writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+ rcg_update_config(rcg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int rcg_clk_prepare(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+
+ WARN(rcg->current_freq == &rcg_dummy_freq,
+ "Attempting to prepare %s before setting its rate. "
+ "Set the rate first!\n", rcg->c.dbg_name);
+
+ return 0;
+}
+
+static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct clk_freq_tbl *cf, *nf;
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ int rc;
+ unsigned long flags;
+
+ for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
+ && nf->freq_hz != rate; nf++)
+ ;
+
+ if (nf->freq_hz == FREQ_END)
+ return -EINVAL;
+
+ cf = rcg->current_freq;
+
+ rc = __clk_pre_reparent(c, nf->src_clk, &flags);
+ if (rc)
+ return rc;
+
+ BUG_ON(!rcg->set_rate);
+
+ /* Perform clock-specific frequency switch operations. */
+ rcg->set_rate(rcg, nf);
+ rcg->current_freq = nf;
+ c->parent = nf->src_clk;
+
+ __clk_post_reparent(c, cf->src_clk, &flags);
+
+ return 0;
+}
+
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
+static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *f;
+
+ for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
+ if (f->freq_hz >= rate)
+ return f->freq_hz;
+
+ f--;
+ return f->freq_hz;
+}
+
+/* Return the nth supported frequency for a given clock. */
+static long rcg_clk_list_rate(struct clk *c, unsigned n)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+
+ if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
+ return -ENXIO;
+
+ return (rcg->freq_tbl + n)->freq_hz;
+}
+
+static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
+{
+ u32 n_regval = 0, m_regval = 0, d_regval = 0;
+ u32 cfg_regval;
+ struct clk_freq_tbl *freq;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ /* Get values of m, n, d, div and src_sel registers. */
+ if (has_mnd) {
+ m_regval = readl_relaxed(M_REG(rcg));
+ n_regval = readl_relaxed(N_REG(rcg));
+ d_regval = readl_relaxed(D_REG(rcg));
+
+ /*
+ * The n and d values stored in the frequency tables are sign
+ * extended to 32 bits. The n and d values in the registers are
+ * sign extended to 8 or 16 bits. Sign extend the values read
+ * from the registers so that they can be compared to the
+ * values in the frequency tables.
+ */
+ n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
+ d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
+ }
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
+ | MND_MODE_MASK;
+
+ /* If mnd counter is present, check if it's in use. */
+ has_mnd = (has_mnd) &&
+ ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
+
+ /*
+ * Clear out the mn counter mode bits since we now want to compare only
+ * the source mux selection and pre-divider values in the registers.
+ */
+ cfg_regval &= ~MND_MODE_MASK;
+
+ /* Figure out what rate the rcg is running at */
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ if (freq->div_src_val != cfg_regval)
+ continue;
+ if (has_mnd) {
+ if (freq->m_val != m_regval)
+ continue;
+ if (freq->n_val != n_regval)
+ continue;
+ if (freq->d_val != d_regval)
+ continue;
+ }
+ break;
+ }
+
+ /* No known frequency found */
+ if (freq->freq_hz == FREQ_END)
+ return NULL;
+
+ rcg->current_freq = freq;
+ return freq->src_clk;
+}
+
+static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval;
+
+ if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
+ rcg->c.rate = rcg->current_freq->freq_hz;
+
+ /* Is the root enabled? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
+{
+ return _rcg_clk_get_parent(to_rcg_clk(c), 1);
+}
+
+static struct clk *rcg_clk_get_parent(struct clk *c)
+{
+ return _rcg_clk_get_parent(to_rcg_clk(c), 0);
+}
+
+static enum handoff rcg_mnd_clk_handoff(struct clk *c)
+{
+ return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static enum handoff rcg_clk_handoff(struct clk *c)
+{
+ return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static void __iomem *rcg_hid_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CMD_RCGR_REG(rcg);
+}
+
+static void __iomem *rcg_mnd_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ {"M_VAL", 0x8},
+ {"N_VAL", 0xC},
+ {"D_VAL", 0x10},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CMD_RCGR_REG(rcg);
+}
+
+#define BRANCH_CHECK_MASK BM(31, 28)
+#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
+#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
+#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
+
+/*
+ * Branch clock functions
+ */
+static void branch_clk_halt_check(struct clk *c, u32 halt_check,
+ void __iomem *cbcr_reg, enum branch_state br_status)
+{
+ char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
+
+ /*
+ * Use a memory barrier since some halt status registers are
+ * not within the same 1K segment as the branch/root enable
+ * registers. It's also needed in the udelay() case to ensure
+ * the delay starts after the branch disable.
+ */
+ mb();
+
+ if (halt_check == DELAY || halt_check == HALT_VOTED) {
+ udelay(HALT_CHECK_DELAY_US);
+ } else if (halt_check == HALT) {
+ int count;
+ u32 val;
+ for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+ val = readl_relaxed(cbcr_reg);
+ val &= BRANCH_CHECK_MASK;
+ switch (br_status) {
+ case BRANCH_ON:
+ if (val == BRANCH_ON_VAL
+ || val == BRANCH_NOC_FSM_ON_VAL)
+ return;
+ break;
+
+ case BRANCH_OFF:
+ if (val == BRANCH_OFF_VAL)
+ return;
+ break;
+ };
+ udelay(1);
+ }
+ CLK_WARN(c, count == 0, "status stuck %s", status_str);
+ }
+}
+
+static int branch_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 cbcr_val;
+ struct branch_clk *branch = to_branch_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cbcr_val = readl_relaxed(CBCR_REG(branch));
+ cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(cbcr_val, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Wait for clock to enable before continuing. */
+ branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+ BRANCH_ON);
+
+ return 0;
+}
+
+static void branch_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct branch_clk *branch = to_branch_clk(c);
+ u32 reg_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ reg_val = readl_relaxed(CBCR_REG(branch));
+ reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(reg_val, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Wait for clock to disable before continuing. */
+ branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+ BRANCH_OFF);
+}
+
+static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
+{
+ unsigned long flags;
+ u32 regval;
+
+ if (rate > branch->max_div)
+ return -EINVAL;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(CBCR_REG(branch));
+ regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
+ writel_relaxed(regval, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static int branch_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return branch_cdiv_set_rate(branch, rate);
+
+ if (!branch->has_sibling)
+ return clk_set_rate(c->parent, rate);
+
+ return -EPERM;
+}
+
+static long branch_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return rate <= (branch->max_div) ? rate : -EPERM;
+
+ if (!branch->has_sibling)
+ return clk_round_rate(c->parent, rate);
+
+ return -EPERM;
+}
+
+static unsigned long branch_clk_get_rate(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return branch->c.rate;
+
+ return clk_get_rate(c->parent);
+}
+
+static long branch_clk_list_rate(struct clk *c, unsigned n)
+{
+ int level;
+ unsigned long fmax = 0, rate;
+ struct branch_clk *branch = to_branch_clk(c);
+ struct clk *parent = c->parent;
+
+ if (branch->has_sibling == 1)
+ return -ENXIO;
+
+ if (!parent || !parent->ops->list_rate)
+ return -ENXIO;
+
+ /* Find max frequency supported within voltage constraints. */
+ if (!parent->vdd_class) {
+ fmax = ULONG_MAX;
+ } else {
+ for (level = 0; level < parent->num_fmax; level++)
+ if (parent->fmax[level])
+ fmax = parent->fmax[level];
+ }
+
+ rate = parent->ops->list_rate(parent, n);
+ if (rate <= fmax)
+ return rate;
+ else
+ return -ENXIO;
+}
+
+static enum handoff branch_clk_handoff(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ u32 cbcr_regval;
+
+ cbcr_regval = readl_relaxed(CBCR_REG(branch));
+
+ /* Set the cdiv to c->rate for fixed divider branch clock */
+ if (c->rate && (c->rate < branch->max_div)) {
+ cbcr_regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ cbcr_regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, c->rate);
+ writel_relaxed(cbcr_regval, CBCR_REG(branch));
+ }
+
+ if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
+ return HANDOFF_DISABLED_CLK;
+
+ if (branch->max_div) {
+ cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ cbcr_regval >>= CBCR_CDIV_LSB;
+ c->rate = cbcr_regval;
+ } else if (!branch->has_sibling) {
+ c->rate = clk_get_rate(c->parent);
+ }
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static int __branch_clk_reset(void __iomem *bcr_reg,
+ enum clk_reset_action action)
+{
+ int ret = 0;
+ unsigned long flags;
+ u32 reg_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ reg_val = readl_relaxed(bcr_reg);
+ switch (action) {
+ case CLK_RESET_ASSERT:
+ reg_val |= BCR_BLK_ARES_BIT;
+ break;
+ case CLK_RESET_DEASSERT:
+ reg_val &= ~BCR_BLK_ARES_BIT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ writel_relaxed(reg_val, bcr_reg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Make sure write is issued before returning. */
+ mb();
+
+ return ret;
+}
+
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (!branch->bcr_reg)
+ return -EPERM;
+ return __branch_clk_reset(BCR_REG(branch), action);
+}
+
+static int branch_clk_set_flags(struct clk *c, unsigned flags)
+{
+ u32 cbcr_val;
+ unsigned long irq_flags;
+ struct branch_clk *branch = to_branch_clk(c);
+ int delay_us = 0, ret = 0;
+
+ spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
+ cbcr_val = readl_relaxed(CBCR_REG(branch));
+ switch (flags) {
+ case CLKFLAG_RETAIN_PERIPH:
+ cbcr_val |= BIT(13);
+ delay_us = 1;
+ break;
+ case CLKFLAG_NORETAIN_PERIPH:
+ cbcr_val &= ~BIT(13);
+ break;
+ case CLKFLAG_RETAIN_MEM:
+ cbcr_val |= BIT(14);
+ delay_us = 1;
+ break;
+ case CLKFLAG_NORETAIN_MEM:
+ cbcr_val &= ~BIT(14);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ writel_relaxed(cbcr_val, CBCR_REG(branch));
+ /* Make sure power is enabled before returning. */
+ mb();
+ udelay(delay_us);
+
+ spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
+
+ return ret;
+}
+
+static void __iomem *branch_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ static struct clk_register_data data[] = {
+ {"CBCR", 0x0},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CBCR_REG(branch);
+}
+
+/*
+ * Voteable clock functions
+ */
+static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ if (!vclk->bcr_reg) {
+ WARN("clk_reset called on an unsupported clock (%s)\n",
+ c->dbg_name);
+ return -EPERM;
+ }
+ return __branch_clk_reset(BCR_REG(vclk), action);
+}
+
+static int local_vote_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 ena;
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ ena = readl_relaxed(VOTE_REG(vclk));
+ ena |= vclk->en_mask;
+ writel_relaxed(ena, VOTE_REG(vclk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ branch_clk_halt_check(c, vclk->halt_check, CBCR_REG(vclk), BRANCH_ON);
+
+ return 0;
+}
+
+static void local_vote_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ u32 ena;
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ ena = readl_relaxed(VOTE_REG(vclk));
+ ena &= ~vclk->en_mask;
+ writel_relaxed(ena, VOTE_REG(vclk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static enum handoff local_vote_clk_handoff(struct clk *c)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+ u32 vote_regval;
+
+ /* Is the branch voted on by apps? */
+ vote_regval = readl_relaxed(VOTE_REG(vclk));
+ if (!(vote_regval & vclk->en_mask))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned ticks, void __iomem *ctl_reg,
+ void __iomem *status_reg)
+{
+ /* Stop counters and set the XO4 counter start value. */
+ writel_relaxed(ticks, ctl_reg);
+
+ /* Wait for timer to become ready. */
+ while ((readl_relaxed(status_reg) & BIT(25)) != 0)
+ cpu_relax();
+
+ /* Run measurement and wait for completion. */
+ writel_relaxed(BIT(20)|ticks, ctl_reg);
+ while ((readl_relaxed(status_reg) & BIT(25)) == 0)
+ cpu_relax();
+
+ /* Return measured ticks. */
+ return readl_relaxed(status_reg) & BM(24, 0);
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+unsigned long measure_get_rate(struct clk *c)
+{
+ unsigned long flags;
+ u32 gcc_xo4_reg_backup;
+ u64 raw_count_short, raw_count_full;
+ unsigned ret;
+ u32 sample_ticks = 0x10000;
+ u32 multiplier = 0x1;
+ struct measure_clk_data *data = to_mux_clk(c)->priv;
+
+ ret = clk_prepare_enable(data->cxo);
+ if (ret) {
+ pr_warn("CXO clock failed to enable. Can't measure\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+
+ /* Enable CXO/4 and RINGOSC branch. */
+ gcc_xo4_reg_backup = readl_relaxed(*data->base + data->xo_div4_cbcr);
+ writel_relaxed(0x1, *data->base + data->xo_div4_cbcr);
+
+ /*
+ * The ring oscillator counter will not reset if the measured clock
+ * is not running. To detect this, run a short measurement before
+ * the full measurement. If the raw results of the two are the same
+ * then the clock must be off.
+ */
+
+ /* Run a short measurement. (~1 ms) */
+ raw_count_short = run_measurement(0x1000, *data->base + data->ctl_reg,
+ *data->base + data->status_reg);
+ /* Run a full measurement. (~14 ms) */
+ raw_count_full = run_measurement(sample_ticks,
+ *data->base + data->ctl_reg,
+ *data->base + data->status_reg);
+ writel_relaxed(gcc_xo4_reg_backup, *data->base + data->xo_div4_cbcr);
+
+ /* Return 0 if the clock is off. */
+ if (raw_count_full == raw_count_short) {
+ ret = 0;
+ } else {
+ /* Compute rate in Hz. */
+ raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
+ do_div(raw_count_full, ((sample_ticks * 10) + 35));
+ ret = (raw_count_full * multiplier);
+ }
+ writel_relaxed(data->plltest_val, *data->base + data->plltest_reg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ clk_disable_unprepare(data->cxo);
+
+ return ret;
+}
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static void __iomem *local_vote_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+ static struct clk_register_data data1[] = {
+ {"CBCR", 0x0},
+ };
+ static struct clk_register_data data2[] = {
+ {"APPS_VOTE", 0x0},
+ {"APPS_SLEEP_VOTE", 0x4},
+ };
+ switch (n) {
+ case 0:
+ *regs = data1;
+ *size = ARRAY_SIZE(data1);
+ return CBCR_REG(vclk);
+ case 1:
+ *regs = data2;
+ *size = ARRAY_SIZE(data2);
+ return VOTE_REG(vclk);
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ {52, 295}, /* 119 M */
+ {11, 57}, /* 130.25 M */
+ {63, 307}, /* 138.50 M */
+ {11, 50}, /* 148.50 M */
+ {47, 206}, /* 154 M */
+ {31, 100}, /* 205.25 M */
+ {107, 269}, /* 268.50 M */
+ {0, 0},
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ {31, 211}, /* 119 M */
+ {32, 199}, /* 130.25 M */
+ {63, 307}, /* 138.50 M */
+ {11, 60}, /* 148.50 M */
+ {50, 263}, /* 154 M */
+ {31, 120}, /* 205.25 M */
+ {119, 359}, /* 268.50 M */
+ {0, 0},
+};
+
+static int set_rate_edp_pixel(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+ struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ s64 src_rate;
+
+ src_rate = clk_get_rate(clk->parent);
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ while (frac->num) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta))) {
+ frac++;
+ continue;
+ }
+
+ pixel_freq->div_src_val &= ~BM(4, 0);
+ if (frac->den == frac->num) {
+ pixel_freq->m_val = 0;
+ pixel_freq->n_val = 0;
+ } else {
+ pixel_freq->m_val = frac->num;
+ pixel_freq->n_val = ~(frac->den - frac->num);
+ pixel_freq->d_val = ~frac->den;
+ }
+ set_rate_mnd(rcg, pixel_freq);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+enum handoff byte_rcg_handoff(struct clk *clk)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ u32 div_val;
+ unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+ /* If the pre-divider is used, find the rate after the division */
+ div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
+ if (div_val > 1)
+ pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+ else
+ pre_div_rate = parent_rate;
+
+ clk->rate = pre_div_rate;
+
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static int set_rate_byte(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk *pll = clk->parent;
+ unsigned long source_rate, div;
+ struct clk_freq_tbl *byte_freq = rcg->current_freq;
+ int rc;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ rc = clk_set_rate(pll, rate);
+ if (rc)
+ return rc;
+
+ source_rate = clk_round_rate(pll, rate);
+ if ((2 * source_rate) % rate)
+ return -EINVAL;
+
+ div = ((2 * source_rate)/rate) - 1;
+ if (div > CFG_RCGR_DIV_MASK)
+ return -EINVAL;
+
+ byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ byte_freq->div_src_val |= BVAL(4, 0, div);
+ set_rate_hid(rcg, byte_freq);
+
+ return 0;
+}
+
+enum handoff pixel_rcg_handoff(struct clk *clk)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
+ unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+ /* If the pre-divider is used, find the rate after the division */
+ div_val = cfg_regval & CFG_RCGR_DIV_MASK;
+ if (div_val > 1)
+ pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+ else
+ pre_div_rate = parent_rate;
+
+ clk->rate = pre_div_rate;
+
+ /*
+ * Pixel clocks have one frequency entry in their frequency table.
+ * Update that entry.
+ */
+ if (rcg->current_freq) {
+ rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ rcg->current_freq->div_src_val |= div_val;
+ }
+
+ /* If MND is used, find the rate after the MND division */
+ if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+ mval = readl_relaxed(M_REG(rcg));
+ nval = readl_relaxed(N_REG(rcg));
+ if (!nval)
+ return HANDOFF_DISABLED_CLK;
+ nval = (~nval) + mval;
+ if (rcg->current_freq) {
+ rcg->current_freq->n_val = ~(nval - mval);
+ rcg->current_freq->m_val = mval;
+ rcg->current_freq->d_val = ~nval;
+ }
+ clk->rate = (pre_div_rate * mval) / nval;
+ }
+
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static long round_rate_pixel(struct clk *clk, unsigned long rate)
+{
+ int frac_num[] = {3, 2, 4, 1};
+ int frac_den[] = {8, 9, 9, 1};
+ int delta = 100000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+ unsigned long request = (rate * frac_den[i]) / frac_num[i];
+ unsigned long src_rate;
+
+ src_rate = clk_round_rate(clk->parent, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ return (src_rate * frac_num[i]) / frac_den[i];
+ }
+
+ return -EINVAL;
+}
+
+
+static int set_rate_pixel(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+ int frac_num[] = {3, 2, 4, 1};
+ int frac_den[] = {8, 9, 9, 1};
+ int delta = 100000;
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+ unsigned long request = (rate * frac_den[i]) / frac_num[i];
+ unsigned long src_rate;
+
+ src_rate = clk_round_rate(clk->parent, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ rc = clk_set_rate(clk->parent, src_rate);
+ if (rc)
+ return rc;
+
+ pixel_freq->div_src_val &= ~BM(4, 0);
+ if (frac_den[i] == frac_num[i]) {
+ pixel_freq->m_val = 0;
+ pixel_freq->n_val = 0;
+ } else {
+ pixel_freq->m_val = frac_num[i];
+ pixel_freq->n_val = ~(frac_den[i] - frac_num[i]);
+ pixel_freq->d_val = ~frac_den[i];
+ }
+ set_rate_mnd(rcg, pixel_freq);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Unlike other clocks, the HDMI rate is adjusted through PLL
+ * re-programming. It is also routed through an HID divider.
+ */
+static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *nf = rcg->freq_tbl;
+ int rc;
+
+ rc = clk_set_rate(nf->src_clk, rate);
+ if (rc < 0)
+ goto out;
+ set_rate_hid(rcg, nf);
+
+ rcg->current_freq = nf;
+out:
+ return rc;
+}
+
+static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *freq = rcg->freq_tbl;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ rcg->current_freq->freq_hz = clk_get_rate(c->parent);
+
+ return freq->src_clk;
+}
+
+static int rcg_clk_set_rate_edp(struct clk *c, unsigned long rate)
+{
+ struct clk_freq_tbl *nf;
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ int rc;
+
+ for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
+ if (nf->freq_hz == FREQ_END) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = clk_set_rate(nf->src_clk, rate);
+ if (rc < 0)
+ goto out;
+ set_rate_hid(rcg, nf);
+
+ rcg->current_freq = nf;
+ c->parent = nf->src_clk;
+out:
+ return rc;
+}
+
+static struct clk *edp_clk_get_parent(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk *clk;
+ struct clk_freq_tbl *freq;
+ unsigned long rate;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ /* Figure out what rate the rcg is running at */
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ clk = freq->src_clk;
+ if (clk && clk->ops->get_rate) {
+ rate = clk->ops->get_rate(clk);
+ if (rate == freq->freq_hz)
+ break;
+ }
+ }
+
+ /* No known frequency found */
+ if (freq->freq_hz == FREQ_END)
+ return NULL;
+
+ rcg->current_freq = freq;
+ return freq->src_clk;
+}
+
+static int gate_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 regval;
+ struct gate_clk *g = to_gate_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(GATE_EN_REG(g));
+ regval |= g->en_mask;
+ writel_relaxed(regval, GATE_EN_REG(g));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static void gate_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ u32 regval;
+ struct gate_clk *g = to_gate_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(GATE_EN_REG(g));
+ regval &= ~(g->en_mask);
+ writel_relaxed(regval, GATE_EN_REG(g));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void __iomem *gate_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct gate_clk *g = to_gate_clk(c);
+ static struct clk_register_data data[] = {
+ {"EN_REG", 0x0},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return GATE_EN_REG(g);
+}
+
+static enum handoff gate_clk_handoff(struct clk *c)
+{
+ struct gate_clk *g = to_gate_clk(c);
+ u32 regval;
+
+ regval = readl_relaxed(GATE_EN_REG(g));
+ if (regval & g->en_mask)
+ return HANDOFF_ENABLED_CLK;
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+static int reset_clk_rst(struct clk *c, enum clk_reset_action action)
+{
+ struct reset_clk *rst = to_reset_clk(c);
+
+ if (!rst->reset_reg)
+ return -EPERM;
+
+ return __branch_clk_reset(RST_REG(rst), action);
+}
+
+static DEFINE_SPINLOCK(mux_reg_lock);
+
+static int mux_reg_enable(struct mux_clk *clk)
+{
+ u32 regval;
+ unsigned long flags;
+ u32 offset = clk->en_reg ? clk->en_offset : clk->offset;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + offset);
+ regval |= clk->en_mask;
+ writel_relaxed(regval, *clk->base + offset);
+ /* Ensure enable request goes through before returning */
+ mb();
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+ return 0;
+}
+
+static void mux_reg_disable(struct mux_clk *clk)
+{
+ u32 regval;
+ unsigned long flags;
+ u32 offset = clk->en_reg ? clk->en_offset : clk->offset;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + offset);
+ regval &= ~clk->en_mask;
+ writel_relaxed(regval, *clk->base + offset);
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+}
+
+static int mux_reg_set_mux_sel(struct mux_clk *clk, int sel)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + clk->offset);
+ regval &= ~(clk->mask << clk->shift);
+ regval |= (sel & clk->mask) << clk->shift;
+ writel_relaxed(regval, *clk->base + clk->offset);
+ /* Ensure switch request goes through before returning */
+ mb();
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+ return 0;
+}
+
+static int mux_reg_get_mux_sel(struct mux_clk *clk)
+{
+ u32 regval = readl_relaxed(*clk->base + clk->offset);
+ return !!((regval >> clk->shift) & clk->mask);
+}
+
+static bool mux_reg_is_enabled(struct mux_clk *clk)
+{
+ u32 regval = readl_relaxed(*clk->base + clk->offset);
+ return !!(regval & clk->en_mask);
+}
+
+struct clk_ops clk_ops_empty;
+
+struct clk_ops clk_ops_rst = {
+ .reset = reset_clk_rst,
+};
+
+struct clk_ops clk_ops_rcg = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = rcg_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_mnd = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_mnd_clk_handoff,
+ .get_parent = rcg_mnd_clk_get_parent,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_pixel = {
+ .enable = rcg_clk_prepare,
+ .set_rate = set_rate_pixel,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = round_rate_pixel,
+ .handoff = pixel_rcg_handoff,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_edppixel = {
+ .enable = rcg_clk_prepare,
+ .set_rate = set_rate_edp_pixel,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = pixel_rcg_handoff,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_byte = {
+ .enable = rcg_clk_prepare,
+ .set_rate = set_rate_byte,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = byte_rcg_handoff,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_hdmi = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate_hdmi,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = rcg_hdmi_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_edp = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate_edp,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = edp_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_branch = {
+ .enable = branch_clk_enable,
+ .disable = branch_clk_disable,
+ .set_rate = branch_clk_set_rate,
+ .get_rate = branch_clk_get_rate,
+ .list_rate = branch_clk_list_rate,
+ .round_rate = branch_clk_round_rate,
+ .reset = branch_clk_reset,
+ .set_flags = branch_clk_set_flags,
+ .handoff = branch_clk_handoff,
+ .list_registers = branch_clk_list_registers,
+};
+
+struct clk_ops clk_ops_vote = {
+ .enable = local_vote_clk_enable,
+ .disable = local_vote_clk_disable,
+ .reset = local_vote_clk_reset,
+ .handoff = local_vote_clk_handoff,
+ .list_registers = local_vote_clk_list_registers,
+};
+
+struct clk_ops clk_ops_gate = {
+ .enable = gate_clk_enable,
+ .disable = gate_clk_disable,
+ .handoff = gate_clk_handoff,
+ .list_registers = gate_clk_list_registers,
+};
+
+struct clk_mux_ops mux_reg_ops = {
+ .enable = mux_reg_enable,
+ .disable = mux_reg_disable,
+ .set_mux_sel = mux_reg_set_mux_sel,
+ .get_mux_sel = mux_reg_get_mux_sel,
+ .is_enabled = mux_reg_is_enabled,
+};
diff --git a/drivers/clk/qcom/clock-pll.c b/drivers/clk/qcom/clock-pll.c
new file mode 100644
index 00000000000..6270a6a3975
--- /dev/null
+++ b/drivers/clk/qcom/clock-pll.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-pll.h>
+
+#include "clock.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_MODE_MASK BM(3, 0)
+
+#define PLL_EN_REG(x) (*(x)->base + (unsigned long) (x)->en_reg)
+#define PLL_STATUS_REG(x) (*(x)->base + (unsigned long) (x)->status_reg)
+#define PLL_MODE_REG(x) (*(x)->base + (unsigned long) (x)->mode_reg)
+#define PLL_L_REG(x) (*(x)->base + (unsigned long) (x)->l_reg)
+#define PLL_M_REG(x) (*(x)->base + (unsigned long) (x)->m_reg)
+#define PLL_N_REG(x) (*(x)->base + (unsigned long) (x)->n_reg)
+#define PLL_CONFIG_REG(x) (*(x)->base + (unsigned long) (x)->config_reg)
+#define PLL_CFG_CTL_REG(x) (*(x)->base + (unsigned long) \
+ (x)->config_alt_reg)
+
+static DEFINE_SPINLOCK(pll_reg_lock);
+
+#define ENABLE_WAIT_MAX_LOOPS 200
+#define PLL_LOCKED_BIT BIT(16)
+
+static long fixed_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ return c->rate;
+}
+
+static int pll_vote_clk_enable(struct clk *c)
+{
+ u32 ena, count;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena |= pllv->en_mask;
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ /*
+ * Use a memory barrier since some PLL status registers are
+ * not within the same 1K segment as the voting registers.
+ */
+ mb();
+
+ /* Wait for pll to enable. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
+ return 0;
+ udelay(1);
+ }
+
+ WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
+
+ return -ETIMEDOUT;
+}
+
+static void pll_vote_clk_disable(struct clk *c)
+{
+ u32 ena;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena &= ~(pllv->en_mask);
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static int pll_vote_clk_is_enabled(struct clk *c)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
+}
+
+static enum handoff pll_vote_clk_handoff(struct clk *c)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
+ return HANDOFF_ENABLED_CLK;
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *pll_vote_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ static struct clk_register_data data1[] = {
+ {"APPS_VOTE", 0x0},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data1;
+ *size = ARRAY_SIZE(data1);
+ return PLL_EN_REG(pllv);
+}
+
+struct clk_ops clk_ops_pll_vote = {
+ .enable = pll_vote_clk_enable,
+ .disable = pll_vote_clk_disable,
+ .is_enabled = pll_vote_clk_is_enabled,
+ .round_rate = fixed_pll_clk_round_rate,
+ .handoff = pll_vote_clk_handoff,
+ .list_registers = pll_vote_clk_list_registers,
+};
+
+static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
+ struct pll_config_masks *masks)
+{
+ u32 regval;
+
+ regval = readl_relaxed(pll_config);
+
+ /* Enable the MN counter if used */
+ if (f->m_val)
+ regval |= masks->mn_en_mask;
+
+ /* Set pre-divider and post-divider values */
+ regval &= ~masks->pre_div_mask;
+ regval |= f->pre_div_val;
+ regval &= ~masks->post_div_mask;
+ regval |= f->post_div_val;
+
+ /* Select VCO setting */
+ regval &= ~masks->vco_mask;
+ regval |= f->vco_val;
+
+ /* Enable main output if it has not been enabled */
+ if (masks->main_output_mask && !(regval & masks->main_output_mask))
+ regval |= masks->main_output_mask;
+
+ writel_relaxed(regval, pll_config);
+}
+
+static int sr2_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ int ret = 0, count;
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT))
+ pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return ret;
+}
+
+static void __pll_clk_enable_reg(void __iomem *mode_reg)
+{
+ u32 mode = readl_relaxed(mode_reg);
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, mode_reg);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, mode_reg);
+
+ /* Wait until PLL is locked. */
+ mb();
+ udelay(50);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, mode_reg);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+}
+
+static int local_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ __pll_clk_enable_reg(PLL_MODE_REG(pll));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return 0;
+}
+
+static void __pll_clk_disable_reg(void __iomem *mode_reg)
+{
+ u32 mode = readl_relaxed(mode_reg);
+ mode &= ~PLL_MODE_MASK;
+ writel_relaxed(mode, mode_reg);
+}
+
+static void local_pll_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ /*
+ * Disable the PLL output, disable test mode, enable
+ * the bypass mode, and assert the reset.
+ */
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ __pll_clk_disable_reg(PLL_MODE_REG(pll));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff local_pll_clk_handoff(struct clk *c)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+ u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+ unsigned long parent_rate;
+ u32 lval, mval, nval, userval;
+
+ if ((mode & mask) != mask)
+ return HANDOFF_DISABLED_CLK;
+
+ /* Assume bootloaders configure PLL to c->rate */
+ if (c->rate)
+ return HANDOFF_ENABLED_CLK;
+
+ parent_rate = clk_get_rate(c->parent);
+ lval = readl_relaxed(PLL_L_REG(pll));
+ mval = readl_relaxed(PLL_M_REG(pll));
+ nval = readl_relaxed(PLL_N_REG(pll));
+ userval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+ c->rate = parent_rate * lval;
+
+ if (pll->masks.mn_en_mask && userval) {
+ if (!nval)
+ nval = 1;
+ c->rate += (parent_rate * mval) / nval;
+ }
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static long local_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_freq_tbl *nf;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ if (!pll->freq_tbl)
+ return -EINVAL;
+
+ for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END; nf++)
+ if (nf->freq_hz >= rate)
+ return nf->freq_hz;
+
+ nf--;
+ return nf->freq_hz;
+}
+
+static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_freq_tbl *nf;
+ struct pll_clk *pll = to_pll_clk(c);
+ unsigned long flags;
+
+ for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
+ && nf->freq_hz != rate; nf++)
+ ;
+
+ if (nf->freq_hz == PLL_FREQ_END)
+ return -EINVAL;
+
+ /*
+ * Ensure PLL is off before changing rate. For optimization reasons,
+ * assume no downstream clock is using actively using it.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->count)
+ c->ops->disable(c);
+
+ writel_relaxed(nf->l_val, PLL_L_REG(pll));
+ writel_relaxed(nf->m_val, PLL_M_REG(pll));
+ writel_relaxed(nf->n_val, PLL_N_REG(pll));
+
+ __pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
+
+ if (c->count)
+ c->ops->enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+int sr_pll_clk_enable(struct clk *c)
+{
+ u32 mode;
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ mode = readl_relaxed(PLL_MODE_REG(pll));
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait until PLL is locked. */
+ mb();
+ udelay(60);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return 0;
+}
+
+int sr_hpm_lp_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 count, mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ /* Disable PLL bypass mode and de-assert reset. */
+ mode = PLL_BYPASSNL | PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
+ WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure the write above goes through before returning. */
+ mb();
+
+out:
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return ret;
+}
+
+static void __iomem *local_pll_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ /* Not compatible with 8960 & friends */
+ struct pll_clk *pll = to_pll_clk(c);
+ static struct clk_register_data data[] = {
+ {"MODE", 0x0},
+ {"L", 0x4},
+ {"M", 0x8},
+ {"N", 0xC},
+ {"USER", 0x10},
+ {"CONFIG", 0x14},
+ {"STATUS", 0x1C},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return PLL_MODE_REG(pll);
+}
+
+
+struct clk_ops clk_ops_local_pll = {
+ .enable = local_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = local_pll_clk_set_rate,
+ .handoff = local_pll_clk_handoff,
+ .list_registers = local_pll_clk_list_registers,
+};
+
+struct clk_ops clk_ops_sr2_pll = {
+ .enable = sr2_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = local_pll_clk_set_rate,
+ .round_rate = local_pll_clk_round_rate,
+ .handoff = local_pll_clk_handoff,
+ .list_registers = local_pll_clk_list_registers,
+};
+
+static DEFINE_SPINLOCK(soft_vote_lock);
+
+static int pll_acpu_vote_clk_enable(struct clk *c)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&soft_vote_lock, flags);
+
+ if (!*pllv->soft_vote)
+ ret = pll_vote_clk_enable(c);
+ if (ret == 0)
+ *pllv->soft_vote |= (pllv->soft_vote_mask);
+
+ spin_unlock_irqrestore(&soft_vote_lock, flags);
+ return ret;
+}
+
+static void pll_acpu_vote_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&soft_vote_lock, flags);
+
+ *pllv->soft_vote &= ~(pllv->soft_vote_mask);
+ if (!*pllv->soft_vote)
+ pll_vote_clk_disable(c);
+
+ spin_unlock_irqrestore(&soft_vote_lock, flags);
+}
+
+static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
+{
+ if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
+ return HANDOFF_DISABLED_CLK;
+
+ if (pll_acpu_vote_clk_enable(c))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_pll_acpu_vote = {
+ .enable = pll_acpu_vote_clk_enable,
+ .disable = pll_acpu_vote_clk_disable,
+ .round_rate = fixed_pll_clk_round_rate,
+ .is_enabled = pll_vote_clk_is_enabled,
+ .handoff = pll_acpu_vote_clk_handoff,
+ .list_registers = pll_vote_clk_list_registers,
+};
+
+static void __set_fsm_mode(void __iomem *mode_reg,
+ u32 bias_count, u32 lock_count)
+{
+ u32 regval = readl_relaxed(mode_reg);
+
+ /* De-assert reset to FSM */
+ regval &= ~BIT(21);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program bias count */
+ regval &= ~BM(19, 14);
+ regval |= BVAL(19, 14, bias_count);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program lock count */
+ regval &= ~BM(13, 8);
+ regval |= BVAL(13, 8, lock_count);
+ writel_relaxed(regval, mode_reg);
+
+ /* Enable PLL FSM voting */
+ regval |= BIT(20);
+ writel_relaxed(regval, mode_reg);
+}
+
+static void __configure_alt_config(struct pll_alt_config config,
+ struct pll_config_regs *regs)
+{
+ u32 regval;
+
+ regval = readl_relaxed(PLL_CFG_CTL_REG(regs));
+
+ if (config.mask) {
+ regval &= ~config.mask;
+ regval |= config.val;
+ }
+
+ writel_relaxed(regval, PLL_CFG_CTL_REG(regs));
+}
+
+void __configure_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ u32 regval;
+
+ writel_relaxed(config->l, PLL_L_REG(regs));
+ writel_relaxed(config->m, PLL_M_REG(regs));
+ writel_relaxed(config->n, PLL_N_REG(regs));
+
+ regval = readl_relaxed(PLL_CONFIG_REG(regs));
+
+ /* Enable the MN accumulator */
+ if (config->mn_ena_mask) {
+ regval &= ~config->mn_ena_mask;
+ regval |= config->mn_ena_val;
+ }
+
+ /* Enable the main output */
+ if (config->main_output_mask) {
+ regval &= ~config->main_output_mask;
+ regval |= config->main_output_val;
+ }
+
+ /* Enable the aux output */
+ if (config->aux_output_mask) {
+ regval &= ~config->aux_output_mask;
+ regval |= config->aux_output_val;
+ }
+
+ /* Set pre-divider and post-divider values */
+ regval &= ~config->pre_div_mask;
+ regval |= config->pre_div_val;
+ regval &= ~config->post_div_mask;
+ regval |= config->post_div_val;
+
+ /* Select VCO setting */
+ regval &= ~config->vco_mask;
+ regval |= config->vco_val;
+
+ if (config->add_factor_mask) {
+ regval &= ~config->add_factor_mask;
+ regval |= config->add_factor_val;
+ }
+
+ writel_relaxed(regval, PLL_CONFIG_REG(regs));
+
+ if (regs->config_alt_reg)
+ __configure_alt_config(config->alt_cfg, regs);
+}
+
+void configure_sr_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ __configure_pll(config, regs, ena_fsm_mode);
+ if (ena_fsm_mode)
+ __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
+}
+
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ __configure_pll(config, regs, ena_fsm_mode);
+ if (ena_fsm_mode)
+ __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
+}
+
diff --git a/drivers/clk/qcom/clock-rpm.c b/drivers/clk/qcom/clock-rpm.c
new file mode 100644
index 00000000000..5f830d6c7e9
--- /dev/null
+++ b/drivers/clk/qcom/clock-rpm.c
@@ -0,0 +1,308 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-rpm.h>
+
+#define __clk_rpmrs_set_rate(r, value, ctx) \
+ ((r)->rpmrs_data->set_rate_fn((r), (value), (ctx)))
+
+#define clk_rpmrs_set_rate_sleep(r, value) \
+ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id)
+
+#define clk_rpmrs_set_rate_active(r, value) \
+ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id)
+
+static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
+ uint32_t context)
+{
+ struct msm_rpm_kvp kvp = {
+ .key = r->rpm_key,
+ .data = (void *)&value,
+ .length = sizeof(value),
+ };
+
+ return msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id,
+ &kvp, 1);
+}
+
+static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
+{
+ if (!r->branch)
+ r->c.rate = INT_MAX;
+
+ return 0;
+}
+
+static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r)
+{
+ return !!r->c.prepare_count;
+}
+
+struct clk_rpmrs_data {
+ int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context);
+ int (*get_rate_fn)(struct rpm_clk *r);
+ int (*handoff_fn)(struct rpm_clk *r);
+ int (*is_enabled)(struct rpm_clk *r);
+ int ctx_active_id;
+ int ctx_sleep_id;
+};
+
+struct clk_rpmrs_data clk_rpmrs_data_smd = {
+ .set_rate_fn = clk_rpmrs_set_rate_smd,
+ .handoff_fn = clk_rpmrs_handoff_smd,
+ .is_enabled = clk_rpmrs_is_enabled_smd,
+ .ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
+ .ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
+};
+
+static DEFINE_MUTEX(rpm_clock_lock);
+
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+ unsigned long *active_khz, unsigned long *sleep_khz)
+{
+ /* Convert the rate (hz) to khz */
+ *active_khz = DIV_ROUND_UP(rate, 1000);
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep_khz = 0;
+ else
+ *sleep_khz = *active_khz;
+}
+
+static int rpm_clk_prepare(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ uint32_t value;
+ int rc = 0;
+ unsigned long this_khz, this_sleep_khz;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ struct rpm_clk *peer = r->peer;
+
+ mutex_lock(&rpm_clock_lock);
+
+ to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (this_khz == 0)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = max(this_khz, peer_khz);
+ if (r->branch)
+ value = !!value;
+
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = max(this_sleep_khz, peer_sleep_khz);
+ if (r->branch)
+ value = !!value;
+
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ if (rc) {
+ /* Undo the active set vote and restore it to peer_khz */
+ value = peer_khz;
+ rc = clk_rpmrs_set_rate_active(r, value);
+ }
+
+out:
+ if (!rc)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clock_lock);
+
+ return rc;
+}
+
+static void rpm_clk_unprepare(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+
+ mutex_lock(&rpm_clock_lock);
+
+ if (r->c.rate) {
+ uint32_t value;
+ struct rpm_clk *peer = r->peer;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ int rc;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = r->branch ? !!peer_khz : peer_khz;
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ }
+ r->enabled = false;
+out:
+ mutex_unlock(&rpm_clock_lock);
+
+ return;
+}
+
+static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ unsigned long this_khz, this_sleep_khz;
+ int rc = 0;
+
+ mutex_lock(&rpm_clock_lock);
+
+ if (r->enabled) {
+ uint32_t value;
+ struct rpm_clk *peer = r->peer;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+
+ to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = max(this_khz, peer_khz);
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = max(this_sleep_khz, peer_sleep_khz);
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ }
+
+out:
+ mutex_unlock(&rpm_clock_lock);
+
+ return rc;
+}
+
+static int rpm_branch_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (rate == clk->rate)
+ return 0;
+
+ return -EPERM;
+}
+
+static unsigned long rpm_clk_get_rate(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ if (r->rpmrs_data->get_rate_fn)
+ return r->rpmrs_data->get_rate_fn(r);
+ else
+ return clk->rate;
+}
+
+static int rpm_clk_is_enabled(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ return r->rpmrs_data->is_enabled(r);
+}
+
+static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ /* Not supported. */
+ return rate;
+}
+
+static bool rpm_clk_is_local(struct clk *clk)
+{
+ return false;
+}
+
+static enum handoff rpm_clk_handoff(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ int rc;
+
+ /*
+ * Querying an RPM clock's status will return 0 unless the clock's
+ * rate has previously been set through the RPM. When handing off,
+ * assume these clocks are enabled (unless the RPM call fails) so
+ * child clocks of these RPM clocks can still be handed off.
+ */
+ rc = r->rpmrs_data->handoff_fn(r);
+ if (rc < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ /*
+ * Since RPM handoff code may update the software rate of the clock by
+ * querying the RPM, we need to make sure our request to RPM now
+ * matches the software rate of the clock. When we send the request
+ * to RPM, we also need to update any other state info we would
+ * normally update. So, call the appropriate clock function instead
+ * of directly using the RPM driver APIs.
+ */
+ rc = rpm_clk_prepare(clk);
+ if (rc < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_SCALING_ENABLE_ID 0x2
+
+void enable_rpm_scaling(void)
+{
+ int rc, value = 0x1;
+ struct msm_rpm_kvp kvp = {
+ .key = RPM_SMD_KEY_ENABLE,
+ .data = (void *)&value,
+ .length = sizeof(value),
+ };
+
+ rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET,
+ RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+ WARN(rc < 0, "RPM clock scaling (sleep set) did not enable!\n");
+
+ rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+ RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+ WARN(rc < 0, "RPM clock scaling (active set) did not enable!\n");
+}
+
+struct clk_ops clk_ops_rpm = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .set_rate = rpm_clk_set_rate,
+ .get_rate = rpm_clk_get_rate,
+ .is_enabled = rpm_clk_is_enabled,
+ .round_rate = rpm_clk_round_rate,
+ .is_local = rpm_clk_is_local,
+ .handoff = rpm_clk_handoff,
+};
+
+struct clk_ops clk_ops_rpm_branch = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .set_rate = rpm_branch_clk_set_rate,
+ .is_local = rpm_clk_is_local,
+ .handoff = rpm_clk_handoff,
+};
diff --git a/drivers/clk/qcom/clock-voter.c b/drivers/clk/qcom/clock-voter.c
new file mode 100644
index 00000000000..c6066091292
--- /dev/null
+++ b/drivers/clk/qcom/clock-voter.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-voter.h>
+
+static DEFINE_MUTEX(voter_clk_lock);
+
+/* Aggregate the rate of clocks that are currently on. */
+static unsigned long voter_clk_aggregate_rate(const struct clk *parent)
+{
+ struct clk *clk;
+ unsigned long rate = 0;
+
+ list_for_each_entry(clk, &parent->children, siblings) {
+ struct clk_voter *v = to_clk_voter(clk);
+ if (v->enabled)
+ rate = max(clk->rate, rate);
+ }
+ return rate;
+}
+
+static int voter_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ int ret = 0;
+ struct clk *clkp;
+ struct clk_voter *clkh, *v = to_clk_voter(clk);
+ unsigned long cur_rate, new_rate, other_rate = 0;
+
+ if (v->is_branch)
+ return 0;
+
+ mutex_lock(&voter_clk_lock);
+
+ if (v->enabled) {
+ struct clk *parent = clk->parent;
+
+ /*
+ * Get the aggregate rate without this clock's vote and update
+ * if the new rate is different than the current rate
+ */
+ list_for_each_entry(clkp, &parent->children, siblings) {
+ clkh = to_clk_voter(clkp);
+ if (clkh->enabled && clkh != v)
+ other_rate = max(clkp->rate, other_rate);
+ }
+
+ cur_rate = max(other_rate, clk->rate);
+ new_rate = max(other_rate, rate);
+
+ if (new_rate != cur_rate) {
+ ret = clk_set_rate(parent, new_rate);
+ if (ret)
+ goto unlock;
+ }
+ }
+ clk->rate = rate;
+unlock:
+ mutex_unlock(&voter_clk_lock);
+
+ return ret;
+}
+
+static int voter_clk_prepare(struct clk *clk)
+{
+ int ret = 0;
+ unsigned long cur_rate;
+ struct clk *parent;
+ struct clk_voter *v = to_clk_voter(clk);
+
+ mutex_lock(&voter_clk_lock);
+ parent = clk->parent;
+
+ if (v->is_branch) {
+ v->enabled = true;
+ goto out;
+ }
+
+ /*
+ * Increase the rate if this clock is voting for a higher rate
+ * than the current rate.
+ */
+ cur_rate = voter_clk_aggregate_rate(parent);
+ if (clk->rate > cur_rate) {
+ ret = clk_set_rate(parent, clk->rate);
+ if (ret)
+ goto out;
+ }
+ v->enabled = true;
+out:
+ mutex_unlock(&voter_clk_lock);
+
+ return ret;
+}
+
+static void voter_clk_unprepare(struct clk *clk)
+{
+ unsigned long cur_rate, new_rate;
+ struct clk *parent;
+ struct clk_voter *v = to_clk_voter(clk);
+
+
+ mutex_lock(&voter_clk_lock);
+ parent = clk->parent;
+
+ /*
+ * Decrease the rate if this clock was the only one voting for
+ * the highest rate.
+ */
+ v->enabled = false;
+ if (v->is_branch)
+ goto out;
+
+ new_rate = voter_clk_aggregate_rate(parent);
+ cur_rate = max(new_rate, clk->rate);
+
+ if (new_rate < cur_rate)
+ clk_set_rate(parent, new_rate);
+
+out:
+ mutex_unlock(&voter_clk_lock);
+}
+
+static int voter_clk_is_enabled(struct clk *clk)
+{
+ struct clk_voter *v = to_clk_voter(clk);
+ return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_round_rate(clk->parent, rate);
+}
+
+static bool voter_clk_is_local(struct clk *clk)
+{
+ return true;
+}
+
+static enum handoff voter_clk_handoff(struct clk *clk)
+{
+ if (!clk->rate)
+ return HANDOFF_DISABLED_CLK;
+
+ /*
+ * Send the default rate to the parent if necessary and update the
+ * software state of the voter clock.
+ */
+ if (voter_clk_prepare(clk) < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_voter = {
+ .prepare = voter_clk_prepare,
+ .unprepare = voter_clk_unprepare,
+ .set_rate = voter_clk_set_rate,
+ .is_enabled = voter_clk_is_enabled,
+ .round_rate = voter_clk_round_rate,
+ .is_local = voter_clk_is_local,
+ .handoff = voter_clk_handoff,
+};
diff --git a/drivers/clk/qcom/clock.c b/drivers/clk/qcom/clock.c
index e69f0cbdd65..1f5092e1ffe 100644
--- a/drivers/clk/qcom/clock.c
+++ b/drivers/clk/qcom/clock.c
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/clock.c
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -488,9 +488,6 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (IS_ERR_OR_NULL(clk))
return -EINVAL;
- if (!clk->ops->set_rate)
- return -ENOSYS;
-
if (!is_rate_valid(clk, rate))
return -EINVAL;
@@ -500,6 +497,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (clk->rate == rate && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
goto out;
+ if (!clk->ops->set_rate) {
+ rc = -ENOSYS;
+ goto out;
+ }
+
trace_clock_set_rate(name, rate, raw_smp_processor_id());
start_rate = clk->rate;
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 99aeab501ae..b1e03134273 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,8 +83,8 @@ static int gdsc_enable(struct regulator_dev *rdev)
ret = readl_tight_poll_timeout(sc->gdscr, regval,
regval & PWR_ON_MASK, TIMEOUT_US);
if (ret) {
- dev_err(&rdev->dev, "%s enable timed out\n",
- sc->rdesc.name);
+ dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
return ret;
}
} else {
@@ -139,8 +139,8 @@ static int gdsc_disable(struct regulator_dev *rdev)
!(regval & PWR_ON_MASK),
TIMEOUT_US);
if (ret)
- dev_err(&rdev->dev, "%s disable timed out\n",
- sc->rdesc.name);
+ dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
} else {
for (i = sc->clock_count-1; i >= 0; i--)
clk_reset(sc->clocks[i], CLK_RESET_ASSERT);
@@ -212,8 +212,8 @@ static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
ret = readl_tight_poll_timeout(sc->gdscr, regval,
regval & PWR_ON_MASK, TIMEOUT_US);
if (ret) {
- dev_err(&rdev->dev, "%s set_mode timed out\n",
- sc->rdesc.name);
+ dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+ sc->rdesc.name, regval);
return ret;
}
break;
@@ -334,8 +334,8 @@ static int gdsc_probe(struct platform_device *pdev)
ret = readl_tight_poll_timeout(sc->gdscr, regval,
regval & PWR_ON_MASK, TIMEOUT_US);
if (ret) {
- dev_err(&pdev->dev, "%s enable timed out\n",
- sc->rdesc.name);
+ dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
return ret;
}
}