diff mbox series

[V2,3/8] clk: imx: clk-composite-93: check slice busy

Message ID 20220815013039.474970-4-peng.fan@oss.nxp.com
State New
Headers show
Series [V2,1/8] dt-bindings: clock: imx93-clock: add more MU/SAI clocks | expand

Commit Message

Peng Fan (OSS) Aug. 15, 2022, 1:30 a.m. UTC
From: Peng Fan <peng.fan@nxp.com>

i.MX93 CCM ROOT STAT register has a SLICE_BUSY bit:
indication for clock generation logic is applying new setting.
0b - Clock generation logic is not busy.
1b - Clock generation logic is applying new setting.

So when set parent/rate/gate, need check this bit.

Introduce specific ops to do the work.

Signed-off-by: Peng Fan <peng.fan@nxp.com>
Reviewed-by: Ye Li <ye.li@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
---
 drivers/clk/imx/clk-composite-93.c | 163 ++++++++++++++++++++++++++++-
 1 file changed, 160 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index b44619aa5ca5..19f4037e6cca 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -9,20 +9,176 @@ 
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/slab.h>
 
 #include "clk.h"
 
+#define TIMEOUT_US	500U
+
 #define CCM_DIV_SHIFT	0
 #define CCM_DIV_WIDTH	8
 #define CCM_MUX_SHIFT	8
 #define CCM_MUX_MASK	3
 #define CCM_OFF_SHIFT	24
+#define CCM_BUSY_SHIFT	28
 
+#define STAT_OFFSET	0x4
 #define AUTHEN_OFFSET	0x30
 #define TZ_NS_SHIFT	9
 #define TZ_NS_MASK	BIT(9)
 
+static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
+{
+	int ret;
+	u32 val;
+
+	ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
+					0, TIMEOUT_US);
+	if (ret)
+		pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
+
+	return ret;
+}
+
+static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
+{
+	struct clk_gate *gate = to_clk_gate(hw);
+	unsigned long flags;
+	u32 reg;
+
+	if (gate->lock)
+		spin_lock_irqsave(gate->lock, flags);
+
+	reg = readl(gate->reg);
+
+	if (enable)
+		reg &= ~BIT(gate->bit_idx);
+	else
+		reg |= BIT(gate->bit_idx);
+
+	writel(reg, gate->reg);
+
+	imx93_clk_composite_wait_ready(hw, gate->reg);
+
+	if (gate->lock)
+		spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
+{
+	imx93_clk_composite_gate_endisable(hw, 1);
+
+	return 0;
+}
+
+static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
+{
+	imx93_clk_composite_gate_endisable(hw, 0);
+}
+
+static const struct clk_ops imx93_clk_composite_gate_ops = {
+	.enable = imx93_clk_composite_gate_enable,
+	.disable = imx93_clk_composite_gate_disable,
+	.is_enabled = clk_gate_is_enabled,
+};
+
+static unsigned long
+imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long
+imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+{
+	return clk_divider_ops.round_rate(hw, rate, prate);
+}
+
+static int
+imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+	return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+						unsigned long parent_rate)
+{
+	struct clk_divider *divider = to_clk_divider(hw);
+	int value;
+	unsigned long flags = 0;
+	u32 val;
+	int ret;
+
+	value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
+	if (value < 0)
+		return value;
+
+	if (divider->lock)
+		spin_lock_irqsave(divider->lock, flags);
+
+	val = readl(divider->reg);
+	val &= ~(clk_div_mask(divider->width) << divider->shift);
+	val |= (u32)value << divider->shift;
+	writel(val, divider->reg);
+
+	ret = imx93_clk_composite_wait_ready(hw, divider->reg);
+
+	if (divider->lock)
+		spin_unlock_irqrestore(divider->lock, flags);
+
+	return ret;
+}
+
+static const struct clk_ops imx93_clk_composite_divider_ops = {
+	.recalc_rate = imx93_clk_composite_divider_recalc_rate,
+	.round_rate = imx93_clk_composite_divider_round_rate,
+	.determine_rate = imx93_clk_composite_divider_determine_rate,
+	.set_rate = imx93_clk_composite_divider_set_rate,
+};
+
+static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
+{
+	return clk_mux_ops.get_parent(hw);
+}
+
+static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_mux *mux = to_clk_mux(hw);
+	u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+	unsigned long flags = 0;
+	u32 reg;
+	int ret;
+
+	if (mux->lock)
+		spin_lock_irqsave(mux->lock, flags);
+
+	reg = readl(mux->reg);
+	reg &= ~(mux->mask << mux->shift);
+	val = val << mux->shift;
+	reg |= val;
+	writel(reg, mux->reg);
+
+	ret = imx93_clk_composite_wait_ready(hw, mux->reg);
+
+	if (mux->lock)
+		spin_unlock_irqrestore(mux->lock, flags);
+
+	return ret;
+}
+
+static int
+imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+	return clk_mux_ops.determine_rate(hw, req);
+}
+
+static const struct clk_ops imx93_clk_composite_mux_ops = {
+	.get_parent = imx93_clk_composite_mux_get_parent,
+	.set_parent = imx93_clk_composite_mux_set_parent,
+	.determine_rate = imx93_clk_composite_mux_determine_rate,
+};
+
 struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
 					 int num_parents, void __iomem *reg,
 					 unsigned long flags)
@@ -74,9 +230,10 @@  struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
 		gate->flags = CLK_GATE_SET_TO_DISABLE;
 
 		hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
-					       mux_hw, &clk_mux_ops, div_hw,
-					       &clk_divider_ops, gate_hw,
-					       &clk_gate_ops, flags | CLK_SET_RATE_NO_REPARENT);
+					       mux_hw, &imx93_clk_composite_mux_ops, div_hw,
+					       &imx93_clk_composite_divider_ops, gate_hw,
+					       &imx93_clk_composite_gate_ops,
+					       flags | CLK_SET_RATE_NO_REPARENT);
 	}
 
 	if (IS_ERR(hw))