/**
* omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
- * @clk: struct clk * to initialize
+ * @hw: struct clk_hw * to initialize
*
* Add an OMAP clock @clk to the internal list of OMAP clocks. Used
* temporarily for autoidle handling, until this support can be
* integrated into the common clock framework code in some way. No
* return value.
*/
-void omap2_init_clk_hw_omap_clocks(struct clk *clk)
+void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
{
struct clk_hw_omap *c;
- if (__clk_get_flags(clk) & CLK_IS_BASIC)
+ if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
return;
- c = to_clk_hw_omap(__clk_get_hw(clk));
+ c = to_clk_hw_omap(hw);
list_add(&c->node, &clk_hw_omap_clocks);
}
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/clk/ti.h>
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
- fint = __clk_get_rate(__clk_get_parent(clk->hw.clk)) / n;
+ fint = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)) / n;
if (dd->flags & DPLL_J_TYPE) {
fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN;
v >>= __ffs(dd->enable_mask);
if (_omap2_dpll_is_in_bypass(v))
- return __clk_get_rate(dd->clk_bypass);
+ return clk_get_rate(dd->clk_bypass);
v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
dpll_mult = v & dd->mult_mask;
dpll_div = v & dd->div1_mask;
dpll_div >>= __ffs(dd->div1_mask);
- dpll_clk = (long long)__clk_get_rate(dd->clk_ref) * dpll_mult;
+ dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult;
do_div(dpll_clk, dpll_div + 1);
return dpll_clk;
dd = clk->dpll_data;
- ref_rate = __clk_get_rate(dd->clk_ref);
- clk_name = __clk_get_name(hw->clk);
+ ref_rate = clk_get_rate(dd->clk_ref);
+ clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
clk_name, target_rate);
ti_of_clk_init_cb_t func);
int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
-void omap2_init_clk_hw_omap_clocks(struct clk *clk);
+void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw);
int of_ti_clk_autoidle_setup(struct device_node *node);
void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
if (!div) {
WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
return parent_rate;
}
*best_parent_rate = parent_rate_saved;
return i;
}
- parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
MULT_ROUND_UP(rate, i));
now = DIV_ROUND_UP(parent_rate, i);
if (now <= rate && now > best) {
if (!bestdiv) {
bestdiv = _get_maxdiv(divider);
*best_parent_rate =
- __clk_round_rate(__clk_get_parent(hw->clk), 1);
+ clk_hw_round_rate(clk_hw_get_parent(hw), 1);
}
return bestdiv;
const char *clk_name;
dd = clk->dpll_data;
- clk_name = __clk_get_name(clk->hw.clk);
+ clk_name = clk_hw_get_name(&clk->hw);
state <<= __ffs(dd->idlest_mask);
unsigned long fint;
u16 f = 0;
- fint = __clk_get_rate(clk->dpll_data->clk_ref) / n;
+ fint = clk_get_rate(clk->dpll_data->clk_ref) / n;
pr_debug("clock: fint is %lu\n", fint);
u8 state = 1;
int r = 0;
- pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk));
+ pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
dd = clk->dpll_data;
state <<= __ffs(dd->idlest_mask);
return -EINVAL;
pr_debug("clock: configuring DPLL %s for low-power bypass\n",
- __clk_get_name(clk->hw.clk));
+ clk_hw_get_name(&clk->hw));
ai = omap3_dpll_autoidle_read(clk);
if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
return -EINVAL;
- pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk));
+ pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
ai = omap3_dpll_autoidle_read(clk);
{
unsigned long fint, clkinp; /* watch out for overflow */
- clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
+ clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
fint = (clkinp / n) * m;
if (fint < 1000000000)
unsigned long clkinp, sd; /* watch out for overflow */
int mod1, mod2;
- clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
+ clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
/*
* target sigma-delta to near 250MHz
if (r) {
WARN(1,
"%s: could not enable %s's clockdomain %s: %d\n",
- __func__, __clk_get_name(hw->clk),
+ __func__, clk_hw_get_name(hw),
clk->clkdm_name, r);
return r;
}
}
- parent = __clk_get_hw(__clk_get_parent(hw->clk));
+ parent = clk_hw_get_parent(hw);
- if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
+ if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) {
WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
r = _omap3_noncore_dpll_bypass(clk);
} else {
if (!dd)
return -EINVAL;
- if (__clk_get_rate(dd->clk_bypass) == req->rate &&
+ if (clk_get_rate(dd->clk_bypass) == req->rate &&
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
} else {
if (!dd)
return -EINVAL;
- if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
- __clk_get_hw(dd->clk_ref))
+ if (clk_hw_get_parent(hw) != __clk_get_hw(dd->clk_ref))
return -EINVAL;
if (dd->last_rounded_rate == 0)
}
pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
- __clk_get_name(hw->clk), rate);
+ clk_hw_get_name(hw), rate);
ret = omap3_noncore_dpll_program(clk, freqsel);
static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
{
struct clk_hw_omap *pclk = NULL;
- struct clk *parent;
/* Walk up the parents of clk, looking for a DPLL */
do {
do {
- parent = __clk_get_parent(hw->clk);
- hw = __clk_get_hw(parent);
+ hw = clk_hw_get_parent(hw);
} while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC));
if (!hw)
break;
{
long fint, fout;
- fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+ fint = clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
fout = fint * dd->last_rounded_m;
if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
if (!dd)
return -EINVAL;
- if (__clk_get_rate(dd->clk_bypass) == req->rate &&
+ if (clk_get_rate(dd->clk_bypass) == req->rate &&
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
} else {
* (Any other value different from the Read value) to the
* corresponding CM_CLKSEL register will refresh the dividers.
*/
-static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw)
{
struct clk_divider *parent;
struct clk_hw *parent_hw;
int ret;
/* Clear PWRDN bit of HSDIVIDER */
- ret = omap2_dflt_clk_enable(clk);
+ ret = omap2_dflt_clk_enable(hw);
/* Parent is the x2 node, get parent of parent for the m2 div */
- parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk)));
+ parent_hw = clk_hw_get_parent(clk_hw_get_parent(hw));
parent = to_clk_divider(parent_hw);
/* Restore the dividers */