struct mtk_smi_common_plat {
enum mtk_smi_gen gen;
+ bool has_gals;
};
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
void (*config_port)(struct device *);
unsigned int larb_direct_to_common_mask;
+ bool has_gals;
};
struct mtk_smi {
struct device *dev;
struct clk *clk_apb, *clk_smi;
+ struct clk *clk_gals0, *clk_gals1;
struct clk *clk_async; /*only needed by mt2701*/
void __iomem *smi_ao_base;
if (ret)
goto err_disable_apb;
+ ret = clk_prepare_enable(smi->clk_gals0);
+ if (ret)
+ goto err_disable_smi;
+
+ ret = clk_prepare_enable(smi->clk_gals1);
+ if (ret)
+ goto err_disable_gals0;
+
return 0;
+err_disable_gals0:
+ clk_disable_unprepare(smi->clk_gals0);
+err_disable_smi:
+ clk_disable_unprepare(smi->clk_smi);
err_disable_apb:
clk_disable_unprepare(smi->clk_apb);
err_put_pm:
static void mtk_smi_disable(const struct mtk_smi *smi)
{
+ clk_disable_unprepare(smi->clk_gals1);
+ clk_disable_unprepare(smi->clk_gals0);
clk_disable_unprepare(smi->clk_smi);
clk_disable_unprepare(smi->clk_apb);
pm_runtime_put_sync(smi->dev);
larb->smi.clk_smi = devm_clk_get(dev, "smi");
if (IS_ERR(larb->smi.clk_smi))
return PTR_ERR(larb->smi.clk_smi);
+
+ if (larb->larb_gen->has_gals) {
+ /* The larbs may still haven't gals even if the SoC support.*/
+ larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
+ if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
+ larb->smi.clk_gals0 = NULL;
+ else if (IS_ERR(larb->smi.clk_gals0))
+ return PTR_ERR(larb->smi.clk_gals0);
+ }
larb->smi.dev = dev;
if (larb->larb_gen->need_larbid) {
if (IS_ERR(common->clk_smi))
return PTR_ERR(common->clk_smi);
+ if (common->plat->has_gals) {
+ common->clk_gals0 = devm_clk_get(dev, "gals0");
+ if (IS_ERR(common->clk_gals0))
+ return PTR_ERR(common->clk_gals0);
+
+ common->clk_gals1 = devm_clk_get(dev, "gals1");
+ if (IS_ERR(common->clk_gals1))
+ return PTR_ERR(common->clk_gals1);
+ }
+
/*
* for mtk smi gen 1, we need to get the ao(always on) base to config
* m4u port, and we need to enable the aync clock for transform the smi