CLOCK_PROTOCOL_ATTRIBUTES provides attributes to indicate the maximum
number of pending asynchronous clock rate changes supported by the
platform. If it's non-zero, then we should be able to use asynchronous
clock rate set for any clocks until the maximum limit is reached.
In order to add that support, let's drop the config flag passed to
clk_ops->rate_set and handle the asynchronous requests dynamically.
Cc: Stephen Boyd <sboyd@kernel.org>
Cc: linux-clk@vger.kernel.org
Acked-by: Stephen Boyd <sboyd@kernel.org>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
{
struct scmi_clk *clk = to_scmi_clk(hw);
- return clk->handle->clk_ops->rate_set(clk->handle, clk->id, 0, rate);
+ return clk->handle->clk_ops->rate_set(clk->handle, clk->id, rate);
}
static int scmi_clk_enable(struct clk_hw *hw)
}
static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
- u32 config, u64 rate)
+ u64 rate)
{
int ret;
struct scmi_xfer *t;
return ret;
cfg = t->tx.buf;
- cfg->flags = cpu_to_le32(config);
+ cfg->flags = cpu_to_le32(0);
cfg->id = cpu_to_le32(clk_id);
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32);
int (*rate_get)(const struct scmi_handle *handle, u32 clk_id,
u64 *rate);
int (*rate_set)(const struct scmi_handle *handle, u32 clk_id,
- u32 config, u64 rate);
+ u64 rate);
int (*enable)(const struct scmi_handle *handle, u32 clk_id);
int (*disable)(const struct scmi_handle *handle, u32 clk_id);
};