diff mbox series

[v2,05/28] drm/msm/dsi: fuse dsi_pll_* code into dsi_phy_* code

Message ID 20210324151846.2774204-6-dmitry.baryshkov@linaro.org
State Superseded
Headers show
Series drm/msm/dsi: refactor MSM DSI PHY/PLL drivers | expand

Commit Message

Dmitry Baryshkov March 24, 2021, 3:18 p.m. UTC
Each phy version is tightly coupled with the corresponding PLL code,
there is no need to keep them separate. Fuse source files together in
order to simplify DSI code.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>

---
 drivers/gpu/drm/msm/Makefile                  |    9 +-
 drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c    |  873 +++++++++++++
 drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c    | 1089 ++++++++++++++++
 drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c    |  637 ++++++++++
 .../gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c   |  519 ++++++++
 drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c     |  905 ++++++++++++++
 .../gpu/drm/msm/dsi/{pll => phy}/dsi_pll.c    |    0
 .../gpu/drm/msm/dsi/{pll => phy}/dsi_pll.h    |    0
 drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c    |  881 -------------
 drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c    | 1096 -----------------
 drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c    |  643 ----------
 .../gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c   |  526 --------
 drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c     |  913 --------------
 13 files changed, 4024 insertions(+), 4067 deletions(-)
 rename drivers/gpu/drm/msm/dsi/{pll => phy}/dsi_pll.c (100%)
 rename drivers/gpu/drm/msm/dsi/{pll => phy}/dsi_pll.h (100%)
 delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
 delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
 delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
 delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
 delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c

-- 
2.30.2

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Comments

Abhinav Kumar March 26, 2021, 5:48 p.m. UTC | #1
On 2021-03-24 08:18, Dmitry Baryshkov wrote:
> Each phy version is tightly coupled with the corresponding PLL code,

> there is no need to keep them separate. Fuse source files together in

> order to simplify DSI code.

> 

> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>

Reviewed-by: Abhinav Kumar <abhinavk@codeaurora.org>

> ---

>  drivers/gpu/drm/msm/Makefile                  |    9 +-

>  drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c    |  873 +++++++++++++

>  drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c    | 1089 ++++++++++++++++

>  drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c    |  637 ++++++++++

>  .../gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c   |  519 ++++++++

>  drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c     |  905 ++++++++++++++

>  .../gpu/drm/msm/dsi/{pll => phy}/dsi_pll.c    |    0

>  .../gpu/drm/msm/dsi/{pll => phy}/dsi_pll.h    |    0

>  drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c    |  881 -------------

>  drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c    | 1096 -----------------

>  drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c    |  643 ----------

>  .../gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c   |  526 --------

>  drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c     |  913 --------------

>  13 files changed, 4024 insertions(+), 4067 deletions(-)

>  rename drivers/gpu/drm/msm/dsi/{pll => phy}/dsi_pll.c (100%)

>  rename drivers/gpu/drm/msm/dsi/{pll => phy}/dsi_pll.h (100%)

>  delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c

>  delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c

>  delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c

>  delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c

>  delete mode 100644 drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c

> 

> diff --git a/drivers/gpu/drm/msm/Makefile 

> b/drivers/gpu/drm/msm/Makefile

> index 3cc906121fb3..1be6996b80b7 100644

> --- a/drivers/gpu/drm/msm/Makefile

> +++ b/drivers/gpu/drm/msm/Makefile

> @@ -136,13 +136,6 @@ msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) +=

> dsi/phy/dsi_phy_14nm.o

>  msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o

>  msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o

> 

> -ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)

> -msm-y += dsi/pll/dsi_pll.o

> -msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o

> -msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o

> -msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o

> -msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o

> -msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o

> -endif

> +msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/phy/dsi_pll.o

> 

>  obj-$(CONFIG_DRM_MSM)	+= msm.o

> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c

> b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c

> index 655fa17a0452..5da369b5c475 100644

> --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c

> +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c

> @@ -3,11 +3,884 @@

>   * Copyright (c) 2018, The Linux Foundation

>   */

> 

> +#include <linux/clk.h>

> +#include <linux/clk-provider.h>

>  #include <linux/iopoll.h>

> 

> +#include "dsi_pll.h"

>  #include "dsi_phy.h"

>  #include "dsi.xml.h"

> 

> +/*

> + * DSI PLL 10nm - clock diagram (eg: DSI0):

> + *

> + *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk

> + *                              |                |

> + *                              |                |

> + *                 +---------+  |  +----------+  |  +----+

> + *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |--

> dsi0_phy_pll_out_byteclk

> + *                 +---------+  |  +----------+  |  +----+

> + *                              |                |

> + *                              |                |

> dsi0_pll_by_2_bit_clk

> + *                              |                |          |

> + *                              |                |  +----+  |  |\

> dsi0_pclk_mux

> + *                              |                |--| /2 |--o--| \   |

> + *                              |                |  +----+     |  \

> |  +---------+

> + *                              |                --------------|

> |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk

> + *                              |------------------------------|  /

>   +---------+

> + *                              |          +-----+             | /

> + *                              -----------| /4? |--o----------|/

> + *                                         +-----+  |           |

> + *                                                  |           

> |dsiclk_sel

> + *                                                  |

> + *                                                  

> dsi0_pll_post_out_div_clk

> + */

> +

> +#define DSI_BYTE_PLL_CLK		0

> +#define DSI_PIXEL_PLL_CLK		1

> +#define NUM_PROVIDED_CLKS		2

> +

> +#define VCO_REF_CLK_RATE		19200000

> +

> +struct dsi_pll_regs {

> +	u32 pll_prop_gain_rate;

> +	u32 pll_lockdet_rate;

> +	u32 decimal_div_start;

> +	u32 frac_div_start_low;

> +	u32 frac_div_start_mid;

> +	u32 frac_div_start_high;

> +	u32 pll_clock_inverters;

> +	u32 ssc_stepsize_low;

> +	u32 ssc_stepsize_high;

> +	u32 ssc_div_per_low;

> +	u32 ssc_div_per_high;

> +	u32 ssc_adjper_low;

> +	u32 ssc_adjper_high;

> +	u32 ssc_control;

> +};

> +

> +struct dsi_pll_config {

> +	u32 ref_freq;

> +	bool div_override;

> +	u32 output_div;

> +	bool ignore_frac;

> +	bool disable_prescaler;

> +	bool enable_ssc;

> +	bool ssc_center;

> +	u32 dec_bits;

> +	u32 frac_bits;

> +	u32 lock_timer;

> +	u32 ssc_freq;

> +	u32 ssc_offset;

> +	u32 ssc_adj_per;

> +	u32 thresh_cycles;

> +	u32 refclk_cycles;

> +};

> +

> +struct pll_10nm_cached_state {

> +	unsigned long vco_rate;

> +	u8 bit_clk_div;

> +	u8 pix_clk_div;

> +	u8 pll_out_div;

> +	u8 pll_mux;

> +};

> +

> +struct dsi_pll_10nm {

> +	struct msm_dsi_pll base;

> +

> +	int id;

> +	struct platform_device *pdev;

> +

> +	void __iomem *phy_cmn_mmio;

> +	void __iomem *mmio;

> +

> +	u64 vco_ref_clk_rate;

> +	u64 vco_current_rate;

> +

> +	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */

> +	spinlock_t postdiv_lock;

> +

> +	int vco_delay;

> +	struct dsi_pll_config pll_configuration;

> +	struct dsi_pll_regs reg_setup;

> +

> +	/* private clocks: */

> +	struct clk_hw *out_div_clk_hw;

> +	struct clk_hw *bit_clk_hw;

> +	struct clk_hw *byte_clk_hw;

> +	struct clk_hw *by_2_bit_clk_hw;

> +	struct clk_hw *post_out_div_clk_hw;

> +	struct clk_hw *pclk_mux_hw;

> +	struct clk_hw *out_dsiclk_hw;

> +

> +	/* clock-provider: */

> +	struct clk_hw_onecell_data *hw_data;

> +

> +	struct pll_10nm_cached_state cached_state;

> +

> +	enum msm_dsi_phy_usecase uc;

> +	struct dsi_pll_10nm *slave;

> +};

> +

> +#define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, base)

> +

> +/*

> + * Global list of private DSI PLL struct pointers. We need this for 

> Dual DSI

> + * mode, where the master PLL's clk_ops needs access the slave's 

> private data

> + */

> +static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];

> +

> +static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)

> +{

> +	struct dsi_pll_config *config = &pll->pll_configuration;

> +

> +	config->ref_freq = pll->vco_ref_clk_rate;

> +	config->output_div = 1;

> +	config->dec_bits = 8;

> +	config->frac_bits = 18;

> +	config->lock_timer = 64;

> +	config->ssc_freq = 31500;

> +	config->ssc_offset = 5000;

> +	config->ssc_adj_per = 2;

> +	config->thresh_cycles = 32;

> +	config->refclk_cycles = 256;

> +

> +	config->div_override = false;

> +	config->ignore_frac = false;

> +	config->disable_prescaler = false;

> +

> +	config->enable_ssc = false;

> +	config->ssc_center = 0;

> +}

> +

> +static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)

> +{

> +	struct dsi_pll_config *config = &pll->pll_configuration;

> +	struct dsi_pll_regs *regs = &pll->reg_setup;

> +	u64 fref = pll->vco_ref_clk_rate;

> +	u64 pll_freq;

> +	u64 divider;

> +	u64 dec, dec_multiple;

> +	u32 frac;

> +	u64 multiplier;

> +

> +	pll_freq = pll->vco_current_rate;

> +

> +	if (config->disable_prescaler)

> +		divider = fref;

> +	else

> +		divider = fref * 2;

> +

> +	multiplier = 1 << config->frac_bits;

> +	dec_multiple = div_u64(pll_freq * multiplier, divider);

> +	dec = div_u64_rem(dec_multiple, multiplier, &frac);

> +

> +	if (pll_freq <= 1900000000UL)

> +		regs->pll_prop_gain_rate = 8;

> +	else if (pll_freq <= 3000000000UL)

> +		regs->pll_prop_gain_rate = 10;

> +	else

> +		regs->pll_prop_gain_rate = 12;

> +	if (pll_freq < 1100000000UL)

> +		regs->pll_clock_inverters = 8;

> +	else

> +		regs->pll_clock_inverters = 0;

> +

> +	regs->pll_lockdet_rate = config->lock_timer;

> +	regs->decimal_div_start = dec;

> +	regs->frac_div_start_low = (frac & 0xff);

> +	regs->frac_div_start_mid = (frac & 0xff00) >> 8;

> +	regs->frac_div_start_high = (frac & 0x30000) >> 16;

> +}

> +

> +#define SSC_CENTER		BIT(0)

> +#define SSC_EN			BIT(1)

> +

> +static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)

> +{

> +	struct dsi_pll_config *config = &pll->pll_configuration;

> +	struct dsi_pll_regs *regs = &pll->reg_setup;

> +	u32 ssc_per;

> +	u32 ssc_mod;

> +	u64 ssc_step_size;

> +	u64 frac;

> +

> +	if (!config->enable_ssc) {

> +		DBG("SSC not enabled\n");

> +		return;

> +	}

> +

> +	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 

> 1;

> +	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);

> +	ssc_per -= ssc_mod;

> +

> +	frac = regs->frac_div_start_low |

> +			(regs->frac_div_start_mid << 8) |

> +			(regs->frac_div_start_high << 16);

> +	ssc_step_size = regs->decimal_div_start;

> +	ssc_step_size *= (1 << config->frac_bits);

> +	ssc_step_size += frac;

> +	ssc_step_size *= config->ssc_offset;

> +	ssc_step_size *= (config->ssc_adj_per + 1);

> +	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));

> +	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);

> +

> +	regs->ssc_div_per_low = ssc_per & 0xFF;

> +	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;

> +	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);

> +	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);

> +	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;

> +	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;

> +

> +	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;

> +

> +	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",

> +		 regs->decimal_div_start, frac, config->frac_bits);

> +	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",

> +		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);

> +}

> +

> +static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)

> +{

> +	void __iomem *base = pll->mmio;

> +	struct dsi_pll_regs *regs = &pll->reg_setup;

> +

> +	if (pll->pll_configuration.enable_ssc) {

> +		pr_debug("SSC is enabled\n");

> +

> +		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,

> +			  regs->ssc_stepsize_low);

> +		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,

> +			  regs->ssc_stepsize_high);

> +		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,

> +			  regs->ssc_div_per_low);

> +		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,

> +			  regs->ssc_div_per_high);

> +		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,

> +			  regs->ssc_adjper_low);

> +		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,

> +			  regs->ssc_adjper_high);

> +		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,

> +			  SSC_EN | regs->ssc_control);

> +	}

> +}

> +

> +static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)

> +{

> +	void __iomem *base = pll->mmio;

> +

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,

> +		  0xba);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 

> 0x0c);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 

> 0xfa);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,

> +		  0x4c);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);

> +}

> +

> +static void dsi_pll_commit(struct dsi_pll_10nm *pll)

> +{

> +	void __iomem *base = pll->mmio;

> +	struct dsi_pll_regs *reg = &pll->reg_setup;

> +

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,

> +		  reg->decimal_div_start);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,

> +		  reg->frac_div_start_low);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,

> +		  reg->frac_div_start_mid);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,

> +		  reg->frac_div_start_high);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,

> +		  reg->pll_lockdet_rate);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);

> +	pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,

> +		  reg->pll_clock_inverters);

> +}

> +

> +static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> +				     unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +

> +	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,

> +	    parent_rate);

> +

> +	pll_10nm->vco_current_rate = rate;

> +	pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;

> +

> +	dsi_pll_setup_config(pll_10nm);

> +

> +	dsi_pll_calc_dec_frac(pll_10nm);

> +

> +	dsi_pll_calc_ssc(pll_10nm);

> +

> +	dsi_pll_commit(pll_10nm);

> +

> +	dsi_pll_config_hzindep_reg(pll_10nm);

> +

> +	dsi_pll_ssc_commit(pll_10nm);

> +

> +	/* flush, ensure all register writes are done*/

> +	wmb();

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)

> +{

> +	struct device *dev = &pll->pdev->dev;

> +	int rc;

> +	u32 status = 0;

> +	u32 const delay_us = 100;

> +	u32 const timeout_us = 5000;

> +

> +	rc = readl_poll_timeout_atomic(pll->mmio +

> +				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,

> +				       status,

> +				       ((status & BIT(0)) > 0),

> +				       delay_us,

> +				       timeout_us);

> +	if (rc)

> +		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",

> +			      pll->id, status);

> +

> +	return rc;

> +}

> +

> +static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)

> +{

> +	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);

> +

> +	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,

> +		  data & ~BIT(5));

> +	ndelay(250);

> +}

> +

> +static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)

> +{

> +	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);

> +

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,

> +		  data | BIT(5));

> +	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);

> +	ndelay(250);

> +}

> +

> +static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)

> +{

> +	u32 data;

> +

> +	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,

> +		  data & ~BIT(5));

> +}

> +

> +static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)

> +{

> +	u32 data;

> +

> +	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,

> +		  data | BIT(5));

> +}

> +

> +static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +	struct device *dev = &pll_10nm->pdev->dev;

> +	int rc;

> +

> +	dsi_pll_enable_pll_bias(pll_10nm);

> +	if (pll_10nm->slave)

> +		dsi_pll_enable_pll_bias(pll_10nm->slave);

> +

> +	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);

> +	if (rc) {

> +		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);

> +		return rc;

> +	}

> +

> +	/* Start PLL */

> +	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,

> +		  0x01);

> +

> +	/*

> +	 * ensure all PLL configurations are written prior to checking

> +	 * for PLL lock.

> +	 */

> +	wmb();

> +

> +	/* Check for PLL lock */

> +	rc = dsi_pll_10nm_lock_status(pll_10nm);

> +	if (rc) {

> +		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);

> +		goto error;

> +	}

> +

> +	pll->pll_on = true;

> +

> +	dsi_pll_enable_global_clk(pll_10nm);

> +	if (pll_10nm->slave)

> +		dsi_pll_enable_global_clk(pll_10nm->slave);

> +

> +	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,

> +		  0x01);

> +	if (pll_10nm->slave)

> +		pll_write(pll_10nm->slave->phy_cmn_mmio +

> +			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);

> +

> +error:

> +	return rc;

> +}

> +

> +static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)

> +{

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);

> +	dsi_pll_disable_pll_bias(pll);

> +}

> +

> +static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +

> +	/*

> +	 * To avoid any stray glitches while abruptly powering down the PLL

> +	 * make sure to gate the clock using the clock enable bit before

> +	 * powering down the PLL

> +	 */

> +	dsi_pll_disable_global_clk(pll_10nm);

> +	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 

> 0);

> +	dsi_pll_disable_sub(pll_10nm);

> +	if (pll_10nm->slave) {

> +		dsi_pll_disable_global_clk(pll_10nm->slave);

> +		dsi_pll_disable_sub(pll_10nm->slave);

> +	}

> +	/* flush, ensure all register writes are done */

> +	wmb();

> +	pll->pll_on = false;

> +}

> +

> +static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,

> +						  unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +	struct dsi_pll_config *config = &pll_10nm->pll_configuration;

> +	void __iomem *base = pll_10nm->mmio;

> +	u64 ref_clk = pll_10nm->vco_ref_clk_rate;

> +	u64 vco_rate = 0x0;

> +	u64 multiplier;

> +	u32 frac;

> +	u32 dec;

> +	u64 pll_freq, tmp64;

> +

> +	dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);

> +	dec &= 0xff;

> +

> +	frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);

> +	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) 

> &

> +		  0xff) << 8);

> +	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) 

> &

> +		  0x3) << 16);

> +

> +	/*

> +	 * TODO:

> +	 *	1. Assumes prescaler is disabled

> +	 */

> +	multiplier = 1 << config->frac_bits;

> +	pll_freq = dec * (ref_clk * 2);

> +	tmp64 = (ref_clk * 2 * frac);

> +	pll_freq += div_u64(tmp64, multiplier);

> +

> +	vco_rate = pll_freq;

> +

> +	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",

> +	    pll_10nm->id, (unsigned long)vco_rate, dec, frac);

> +

> +	return (unsigned long)vco_rate;

> +}

> +

> +static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {

> +	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> +	.set_rate = dsi_pll_10nm_vco_set_rate,

> +	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,

> +	.prepare = dsi_pll_10nm_vco_prepare,

> +	.unprepare = dsi_pll_10nm_vco_unprepare,

> +};

> +

> +/*

> + * PLL Callbacks

> + */

> +

> +static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;

> +	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;

> +	u32 cmn_clk_cfg0, cmn_clk_cfg1;

> +

> +	cached->pll_out_div = pll_read(pll_10nm->mmio +

> +				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);

> +	cached->pll_out_div &= 0x3;

> +

> +	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);

> +	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;

> +	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;

> +

> +	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> +	cached->pll_mux = cmn_clk_cfg1 & 0x3;

> +

> +	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",

> +	    pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,

> +	    cached->pix_clk_div, cached->pll_mux);

> +}

> +

> +static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;

> +	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;

> +	u32 val;

> +	int ret;

> +

> +	val = pll_read(pll_10nm->mmio + 

> REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);

> +	val &= ~0x3;

> +	val |= cached->pll_out_div;

> +	pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, 

> val);

> +

> +	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,

> +		  cached->bit_clk_div | (cached->pix_clk_div << 4));

> +

> +	val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> +	val &= ~0x3;

> +	val |= cached->pll_mux;

> +	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);

> +

> +	ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw,

> pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pll_10nm->pdev->dev,

> +			"restore vco rate failed. ret=%d\n", ret);

> +		return ret;

> +	}

> +

> +	DBG("DSI PLL%d", pll_10nm->id);

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,

> +				    enum msm_dsi_phy_usecase uc)

> +{

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +	void __iomem *base = pll_10nm->phy_cmn_mmio;

> +	u32 data = 0x0;	/* internal PLL */

> +

> +	DBG("DSI PLL%d", pll_10nm->id);

> +

> +	switch (uc) {

> +	case MSM_DSI_PHY_STANDALONE:

> +		break;

> +	case MSM_DSI_PHY_MASTER:

> +		pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];

> +		break;

> +	case MSM_DSI_PHY_SLAVE:

> +		data = 0x1; /* external PLL */

> +		break;

> +	default:

> +		return -EINVAL;

> +	}

> +

> +	/* set PLL src */

> +	pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));

> +

> +	pll_10nm->uc = uc;

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,

> +				     struct clk **byte_clk_provider,

> +				     struct clk **pixel_clk_provider)

> +{

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +	struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;

> +

> +	DBG("DSI PLL%d", pll_10nm->id);

> +

> +	if (byte_clk_provider)

> +		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;

> +	if (pixel_clk_provider)

> +		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;

> +

> +	return 0;

> +}

> +

> +static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> +	struct device *dev = &pll_10nm->pdev->dev;

> +

> +	DBG("DSI PLL%d", pll_10nm->id);

> +	of_clk_del_provider(dev->of_node);

> +

> +	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);

> +	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);

> +	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);

> +	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);

> +	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);

> +	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);

> +	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);

> +	clk_hw_unregister(&pll_10nm->base.clk_hw);

> +}

> +

> +/*

> + * The post dividers and mux clocks are created using the standard 

> divider and

> + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its 

> dividers/mux

> + * state to follow the master PLL's divider/mux state. Therefore, we 

> don't

> + * require special clock ops that also configure the slave PLL 

> registers

> + */

> +static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)

> +{

> +	char clk_name[32], parent[32], vco_name[32];

> +	char parent2[32], parent3[32], parent4[32];

> +	struct clk_init_data vco_init = {

> +		.parent_names = (const char *[]){ "xo" },

> +		.num_parents = 1,

> +		.name = vco_name,

> +		.flags = CLK_IGNORE_UNUSED,

> +		.ops = &clk_ops_dsi_pll_10nm_vco,

> +	};

> +	struct device *dev = &pll_10nm->pdev->dev;

> +	struct clk_hw_onecell_data *hw_data;

> +	struct clk_hw *hw;

> +	int ret;

> +

> +	DBG("DSI%d", pll_10nm->id);

> +

> +	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +

> +			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),

> +			       GFP_KERNEL);

> +	if (!hw_data)

> +		return -ENOMEM;

> +

> +	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);

> +	pll_10nm->base.clk_hw.init = &vco_init;

> +

> +	ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);

> +	if (ret)

> +		return ret;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> +	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);

> +

> +	hw = clk_hw_register_divider(dev, clk_name,

> +				     parent, CLK_SET_RATE_PARENT,

> +				     pll_10nm->mmio +

> +				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,

> +				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_base_clk_hw;

> +	}

> +

> +	pll_10nm->out_div_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> +

> +	/* BIT CLK: DIV_CTRL_3_0 */

> +	hw = clk_hw_register_divider(dev, clk_name, parent,

> +				     CLK_SET_RATE_PARENT,

> +				     pll_10nm->phy_cmn_mmio +

> +				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,

> +				     0, 4, CLK_DIVIDER_ONE_BASED,

> +				     &pll_10nm->postdiv_lock);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_out_div_clk_hw;

> +	}

> +

> +	pll_10nm->bit_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> +

> +	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> +					  CLK_SET_RATE_PARENT, 1, 8);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_bit_clk_hw;

> +	}

> +

> +	pll_10nm->byte_clk_hw = hw;

> +	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> +

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> +					  0, 1, 2);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_byte_clk_hw;

> +	}

> +

> +	pll_10nm->by_2_bit_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> +

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> +					  0, 1, 4);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_by_2_bit_clk_hw;

> +	}

> +

> +	pll_10nm->post_out_div_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> +	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);

> +	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> +	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);

> +

> +	hw = clk_hw_register_mux(dev, clk_name,

> +				 ((const char *[]){

> +				 parent, parent2, parent3, parent4

> +				 }), 4, 0, pll_10nm->phy_cmn_mmio +

> +				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,

> +				 0, 2, 0, NULL);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_post_out_div_clk_hw;

> +	}

> +

> +	pll_10nm->pclk_mux_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);

> +	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);

> +

> +	/* PIX CLK DIV : DIV_CTRL_7_4*/

> +	hw = clk_hw_register_divider(dev, clk_name, parent,

> +				     0, pll_10nm->phy_cmn_mmio +

> +					REG_DSI_10nm_PHY_CMN_CLK_CFG0,

> +				     4, 4, CLK_DIVIDER_ONE_BASED,

> +				     &pll_10nm->postdiv_lock);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_pclk_mux_hw;

> +	}

> +

> +	pll_10nm->out_dsiclk_hw = hw;

> +	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;

> +

> +	hw_data->num = NUM_PROVIDED_CLKS;

> +	pll_10nm->hw_data = hw_data;

> +

> +	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,

> +				     pll_10nm->hw_data);

> +	if (ret) {

> +		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> +		goto err_dsiclk_hw;

> +	}

> +

> +	return 0;

> +

> +err_dsiclk_hw:

> +	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);

> +err_pclk_mux_hw:

> +	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);

> +err_post_out_div_clk_hw:

> +	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);

> +err_by_2_bit_clk_hw:

> +	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);

> +err_byte_clk_hw:

> +	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);

> +err_bit_clk_hw:

> +	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);

> +err_out_div_clk_hw:

> +	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);

> +err_base_clk_hw:

> +	clk_hw_unregister(&pll_10nm->base.clk_hw);

> +

> +	return ret;

> +}

> +

> +struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device 

> *pdev, int id)

> +{

> +	struct dsi_pll_10nm *pll_10nm;

> +	struct msm_dsi_pll *pll;

> +	int ret;

> +

> +	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);

> +	if (!pll_10nm)

> +		return ERR_PTR(-ENOMEM);

> +

> +	DBG("DSI PLL%d", id);

> +

> +	pll_10nm->pdev = pdev;

> +	pll_10nm->id = id;

> +	pll_10nm_list[id] = pll_10nm;

> +

> +	pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");

> +	if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> +	if (IS_ERR_OR_NULL(pll_10nm->mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	spin_lock_init(&pll_10nm->postdiv_lock);

> +

> +	pll = &pll_10nm->base;

> +	pll->min_rate = 1000000000UL;

> +	pll->max_rate = 3500000000UL;

> +	pll->get_provider = dsi_pll_10nm_get_provider;

> +	pll->destroy = dsi_pll_10nm_destroy;

> +	pll->save_state = dsi_pll_10nm_save_state;

> +	pll->restore_state = dsi_pll_10nm_restore_state;

> +	pll->set_usecase = dsi_pll_10nm_set_usecase;

> +

> +	pll_10nm->vco_delay = 1;

> +

> +	ret = pll_10nm_register(pll_10nm);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> +		return ERR_PTR(ret);

> +	}

> +

> +	/* TODO: Remove this when we have proper display handover support */

> +	msm_dsi_pll_save_state(pll);

> +

> +	return pll;

> +}

> +

>  static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)

>  {

>  	void __iomem *base = phy->base;

> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c

> b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c

> index 6989730b5fbd..6a63901da7a4 100644

> --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c

> +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c

> @@ -3,13 +3,1102 @@

>   * Copyright (c) 2016, The Linux Foundation. All rights reserved.

>   */

> 

> +#include <linux/clk.h>

> +#include <linux/clk-provider.h>

>  #include <linux/delay.h>

> 

>  #include "dsi_phy.h"

> +#include "dsi_pll.h"

>  #include "dsi.xml.h"

> 

>  #define PHY_14NM_CKLN_IDX	4

> 

> +/*

> + * DSI PLL 14nm - clock diagram (eg: DSI0):

> + *

> + *         dsi0n1_postdiv_clk

> + *                         |

> + *                         |

> + *                 +----+  |  +----+

> + *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte

> + *                 +----+  |  +----+

> + *                         |           dsi0n1_postdivby2_clk

> + *                         |   +----+  |

> + *                         o---| /2 |--o--|\

> + *                         |   +----+     | \   +----+

> + *                         |              |  |--| n2 |-- dsi0pll

> + *                         o--------------| /   +----+

> + *                                        |/

> + */

> +

> +#define POLL_MAX_READS			15

> +#define POLL_TIMEOUT_US			1000

> +

> +#define NUM_PROVIDED_CLKS		2

> +

> +#define VCO_REF_CLK_RATE		19200000

> +#define VCO_MIN_RATE			1300000000UL

> +#define VCO_MAX_RATE			2600000000UL

> +

> +#define DSI_BYTE_PLL_CLK		0

> +#define DSI_PIXEL_PLL_CLK		1

> +

> +#define DSI_PLL_DEFAULT_VCO_POSTDIV	1

> +

> +struct dsi_pll_input {

> +	u32 fref;	/* reference clk */

> +	u32 fdata;	/* bit clock rate */

> +	u32 dsiclk_sel; /* Mux configuration (see diagram) */

> +	u32 ssc_en;	/* SSC enable/disable */

> +	u32 ldo_en;

> +

> +	/* fixed params */

> +	u32 refclk_dbler_en;

> +	u32 vco_measure_time;

> +	u32 kvco_measure_time;

> +	u32 bandgap_timer;

> +	u32 pll_wakeup_timer;

> +	u32 plllock_cnt;

> +	u32 plllock_rng;

> +	u32 ssc_center;

> +	u32 ssc_adj_period;

> +	u32 ssc_spread;

> +	u32 ssc_freq;

> +	u32 pll_ie_trim;

> +	u32 pll_ip_trim;

> +	u32 pll_iptat_trim;

> +	u32 pll_cpcset_cur;

> +	u32 pll_cpmset_cur;

> +

> +	u32 pll_icpmset;

> +	u32 pll_icpcset;

> +

> +	u32 pll_icpmset_p;

> +	u32 pll_icpmset_m;

> +

> +	u32 pll_icpcset_p;

> +	u32 pll_icpcset_m;

> +

> +	u32 pll_lpf_res1;

> +	u32 pll_lpf_cap1;

> +	u32 pll_lpf_cap2;

> +	u32 pll_c3ctrl;

> +	u32 pll_r3ctrl;

> +};

> +

> +struct dsi_pll_output {

> +	u32 pll_txclk_en;

> +	u32 dec_start;

> +	u32 div_frac_start;

> +	u32 ssc_period;

> +	u32 ssc_step_size;

> +	u32 plllock_cmp;

> +	u32 pll_vco_div_ref;

> +	u32 pll_vco_count;

> +	u32 pll_kvco_div_ref;

> +	u32 pll_kvco_count;

> +	u32 pll_misc1;

> +	u32 pll_lpf2_postdiv;

> +	u32 pll_resetsm_cntrl;

> +	u32 pll_resetsm_cntrl2;

> +	u32 pll_resetsm_cntrl5;

> +	u32 pll_kvco_code;

> +

> +	u32 cmn_clk_cfg0;

> +	u32 cmn_clk_cfg1;

> +	u32 cmn_ldo_cntrl;

> +

> +	u32 pll_postdiv;

> +	u32 fcvo;

> +};

> +

> +struct pll_14nm_cached_state {

> +	unsigned long vco_rate;

> +	u8 n2postdiv;

> +	u8 n1postdiv;

> +};

> +

> +struct dsi_pll_14nm {

> +	struct msm_dsi_pll base;

> +

> +	int id;

> +	struct platform_device *pdev;

> +

> +	void __iomem *phy_cmn_mmio;

> +	void __iomem *mmio;

> +

> +	int vco_delay;

> +

> +	struct dsi_pll_input in;

> +	struct dsi_pll_output out;

> +

> +	/* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */

> +	spinlock_t postdiv_lock;

> +

> +	u64 vco_current_rate;

> +	u64 vco_ref_clk_rate;

> +

> +	/* private clocks: */

> +	struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];

> +	u32 num_hws;

> +

> +	/* clock-provider: */

> +	struct clk_hw_onecell_data *hw_data;

> +

> +	struct pll_14nm_cached_state cached_state;

> +

> +	enum msm_dsi_phy_usecase uc;

> +	struct dsi_pll_14nm *slave;

> +};

> +

> +#define to_pll_14nm(x)	container_of(x, struct dsi_pll_14nm, base)

> +

> +/*

> + * Private struct for N1/N2 post-divider clocks. These clocks are 

> similar to

> + * the generic clk_divider class of clocks. The only difference is 

> that it

> + * also sets the slave DSI PLL's post-dividers if in Dual DSI mode

> + */

> +struct dsi_pll_14nm_postdiv {

> +	struct clk_hw hw;

> +

> +	/* divider params */

> +	u8 shift;

> +	u8 width;

> +	u8 flags; /* same flags as used by clk_divider struct */

> +

> +	struct dsi_pll_14nm *pll;

> +};

> +

> +#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct

> dsi_pll_14nm_postdiv, hw)

> +

> +/*

> + * Global list of private DSI PLL struct pointers. We need this for 

> Dual DSI

> + * mode, where the master PLL's clk_ops needs access the slave's 

> private data

> + */

> +static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];

> +

> +static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,

> +				    u32 nb_tries, u32 timeout_us)

> +{

> +	bool pll_locked = false;

> +	void __iomem *base = pll_14nm->mmio;

> +	u32 tries, val;

> +

> +	tries = nb_tries;

> +	while (tries--) {

> +		val = pll_read(base +

> +			       REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);

> +		pll_locked = !!(val & BIT(5));

> +

> +		if (pll_locked)

> +			break;

> +

> +		udelay(timeout_us);

> +	}

> +

> +	if (!pll_locked) {

> +		tries = nb_tries;

> +		while (tries--) {

> +			val = pll_read(base +

> +				REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);

> +			pll_locked = !!(val & BIT(0));

> +

> +			if (pll_locked)

> +				break;

> +

> +			udelay(timeout_us);

> +		}

> +	}

> +

> +	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");

> +

> +	return pll_locked;

> +}

> +

> +static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)

> +{

> +	pll->in.fref = pll->vco_ref_clk_rate;

> +	pll->in.fdata = 0;

> +	pll->in.dsiclk_sel = 1;	/* Use the /2 path in Mux */

> +	pll->in.ldo_en = 0;	/* disabled for now */

> +

> +	/* fixed input */

> +	pll->in.refclk_dbler_en = 0;

> +	pll->in.vco_measure_time = 5;

> +	pll->in.kvco_measure_time = 5;

> +	pll->in.bandgap_timer = 4;

> +	pll->in.pll_wakeup_timer = 5;

> +	pll->in.plllock_cnt = 1;

> +	pll->in.plllock_rng = 0;

> +

> +	/*

> +	 * SSC is enabled by default. We might need DT props for configuring

> +	 * some SSC params like PPM and center/down spread etc.

> +	 */

> +	pll->in.ssc_en = 1;

> +	pll->in.ssc_center = 0;		/* down spread by default */

> +	pll->in.ssc_spread = 5;		/* PPM / 1000 */

> +	pll->in.ssc_freq = 31500;	/* default recommended */

> +	pll->in.ssc_adj_period = 37;

> +

> +	pll->in.pll_ie_trim = 4;

> +	pll->in.pll_ip_trim = 4;

> +	pll->in.pll_cpcset_cur = 1;

> +	pll->in.pll_cpmset_cur = 1;

> +	pll->in.pll_icpmset = 4;

> +	pll->in.pll_icpcset = 4;

> +	pll->in.pll_icpmset_p = 0;

> +	pll->in.pll_icpmset_m = 0;

> +	pll->in.pll_icpcset_p = 0;

> +	pll->in.pll_icpcset_m = 0;

> +	pll->in.pll_lpf_res1 = 3;

> +	pll->in.pll_lpf_cap1 = 11;

> +	pll->in.pll_lpf_cap2 = 1;

> +	pll->in.pll_iptat_trim = 7;

> +	pll->in.pll_c3ctrl = 2;

> +	pll->in.pll_r3ctrl = 1;

> +}

> +

> +#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))

> +

> +static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)

> +{

> +	u32 period, ssc_period;

> +	u32 ref, rem;

> +	u64 step_size;

> +

> +	DBG("vco=%lld ref=%lld", pll->vco_current_rate, 

> pll->vco_ref_clk_rate);

> +

> +	ssc_period = pll->in.ssc_freq / 500;

> +	period = (u32)pll->vco_ref_clk_rate / 1000;

> +	ssc_period  = CEIL(period, ssc_period);

> +	ssc_period -= 1;

> +	pll->out.ssc_period = ssc_period;

> +

> +	DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,

> +	    pll->in.ssc_spread, pll->out.ssc_period);

> +

> +	step_size = (u32)pll->vco_current_rate;

> +	ref = pll->vco_ref_clk_rate;

> +	ref /= 1000;

> +	step_size = div_u64(step_size, ref);

> +	step_size <<= 20;

> +	step_size = div_u64(step_size, 1000);

> +	step_size *= pll->in.ssc_spread;

> +	step_size = div_u64(step_size, 1000);

> +	step_size *= (pll->in.ssc_adj_period + 1);

> +

> +	rem = 0;

> +	step_size = div_u64_rem(step_size, ssc_period + 1, &rem);

> +	if (rem)

> +		step_size++;

> +

> +	DBG("step_size=%lld", step_size);

> +

> +	step_size &= 0x0ffff;	/* take lower 16 bits */

> +

> +	pll->out.ssc_step_size = step_size;

> +}

> +

> +static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)

> +{

> +	struct dsi_pll_input *pin = &pll->in;

> +	struct dsi_pll_output *pout = &pll->out;

> +	u64 multiplier = BIT(20);

> +	u64 dec_start_multiple, dec_start, pll_comp_val;

> +	u32 duration, div_frac_start;

> +	u64 vco_clk_rate = pll->vco_current_rate;

> +	u64 fref = pll->vco_ref_clk_rate;

> +

> +	DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);

> +

> +	dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);

> +	div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);

> +

> +	dec_start = div_u64(dec_start_multiple, multiplier);

> +

> +	pout->dec_start = (u32)dec_start;

> +	pout->div_frac_start = div_frac_start;

> +

> +	if (pin->plllock_cnt == 0)

> +		duration = 1024;

> +	else if (pin->plllock_cnt == 1)

> +		duration = 256;

> +	else if (pin->plllock_cnt == 2)

> +		duration = 128;

> +	else

> +		duration = 32;

> +

> +	pll_comp_val = duration * dec_start_multiple;

> +	pll_comp_val = div_u64(pll_comp_val, multiplier);

> +	do_div(pll_comp_val, 10);

> +

> +	pout->plllock_cmp = (u32)pll_comp_val;

> +

> +	pout->pll_txclk_en = 1;

> +	pout->cmn_ldo_cntrl = 0x3c;

> +}

> +

> +static u32 pll_14nm_kvco_slop(u32 vrate)

> +{

> +	u32 slop = 0;

> +

> +	if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)

> +		slop =  600;

> +	else if (vrate > 1800000000UL && vrate < 2300000000UL)

> +		slop = 400;

> +	else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)

> +		slop = 280;

> +

> +	return slop;

> +}

> +

> +static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)

> +{

> +	struct dsi_pll_input *pin = &pll->in;

> +	struct dsi_pll_output *pout = &pll->out;

> +	u64 vco_clk_rate = pll->vco_current_rate;

> +	u64 fref = pll->vco_ref_clk_rate;

> +	u64 data;

> +	u32 cnt;

> +

> +	data = fref * pin->vco_measure_time;

> +	do_div(data, 1000000);

> +	data &= 0x03ff;	/* 10 bits */

> +	data -= 2;

> +	pout->pll_vco_div_ref = data;

> +

> +	data = div_u64(vco_clk_rate, 1000000);	/* unit is Mhz */

> +	data *= pin->vco_measure_time;

> +	do_div(data, 10);

> +	pout->pll_vco_count = data;

> +

> +	data = fref * pin->kvco_measure_time;

> +	do_div(data, 1000000);

> +	data &= 0x03ff;	/* 10 bits */

> +	data -= 1;

> +	pout->pll_kvco_div_ref = data;

> +

> +	cnt = pll_14nm_kvco_slop(vco_clk_rate);

> +	cnt *= 2;

> +	cnt /= 100;

> +	cnt *= pin->kvco_measure_time;

> +	pout->pll_kvco_count = cnt;

> +

> +	pout->pll_misc1 = 16;

> +	pout->pll_resetsm_cntrl = 48;

> +	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;

> +	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;

> +	pout->pll_kvco_code = 0;

> +}

> +

> +static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)

> +{

> +	void __iomem *base = pll->mmio;

> +	struct dsi_pll_input *pin = &pll->in;

> +	struct dsi_pll_output *pout = &pll->out;

> +	u8 data;

> +

> +	data = pin->ssc_adj_period;

> +	data &= 0x0ff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);

> +	data = (pin->ssc_adj_period >> 8);

> +	data &= 0x03;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);

> +

> +	data = pout->ssc_period;

> +	data &= 0x0ff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);

> +	data = (pout->ssc_period >> 8);

> +	data &= 0x0ff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);

> +

> +	data = pout->ssc_step_size;

> +	data &= 0x0ff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);

> +	data = (pout->ssc_step_size >> 8);

> +	data &= 0x0ff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);

> +

> +	data = (pin->ssc_center & 0x01);

> +	data <<= 1;

> +	data |= 0x01; /* enable */

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);

> +

> +	wmb();	/* make sure register committed */

> +}

> +

> +static void pll_db_commit_common(struct dsi_pll_14nm *pll,

> +				 struct dsi_pll_input *pin,

> +				 struct dsi_pll_output *pout)

> +{

> +	void __iomem *base = pll->mmio;

> +	u8 data;

> +

> +	/* confgiure the non frequency dependent pll registers */

> +	data = 0;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);

> +

> +	data = pout->pll_txclk_en;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);

> +

> +	data = pout->pll_resetsm_cntrl;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);

> +	data = pout->pll_resetsm_cntrl2;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);

> +	data = pout->pll_resetsm_cntrl5;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);

> +

> +	data = pout->pll_vco_div_ref & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);

> +	data = (pout->pll_vco_div_ref >> 8) & 0x3;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);

> +

> +	data = pout->pll_kvco_div_ref & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);

> +	data = (pout->pll_kvco_div_ref >> 8) & 0x3;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);

> +

> +	data = pout->pll_misc1;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);

> +

> +	data = pin->pll_ie_trim;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);

> +

> +	data = pin->pll_ip_trim;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);

> +

> +	data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);

> +

> +	data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);

> +

> +	data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);

> +

> +	data = pin->pll_icpmset << 3 | pin->pll_icpcset;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);

> +

> +	data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);

> +

> +	data = pin->pll_iptat_trim;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);

> +

> +	data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);

> +}

> +

> +static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)

> +{

> +	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> +

> +	/* de assert pll start and apply pll sw reset */

> +

> +	/* stop pll */

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);

> +

> +	/* pll sw reset */

> +	pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);

> +	wmb();	/* make sure register committed */

> +

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);

> +	wmb();	/* make sure register committed */

> +}

> +

> +static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,

> +			       struct dsi_pll_input *pin,

> +			       struct dsi_pll_output *pout)

> +{

> +	void __iomem *base = pll->mmio;

> +	void __iomem *cmn_base = pll->phy_cmn_mmio;

> +	u8 data;

> +

> +	DBG("DSI%d PLL", pll->id);

> +

> +	data = pout->cmn_ldo_cntrl;

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);

> +

> +	pll_db_commit_common(pll, pin, pout);

> +

> +	pll_14nm_software_reset(pll);

> +

> +	data = pin->dsiclk_sel; /* set dsiclk_sel = 1  */

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);

> +

> +	data = 0xff; /* data, clk, pll normal operation */

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);

> +

> +	/* configure the frequency dependent pll registers */

> +	data = pout->dec_start;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);

> +

> +	data = pout->div_frac_start & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);

> +	data = (pout->div_frac_start >> 8) & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);

> +	data = (pout->div_frac_start >> 16) & 0xf;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);

> +

> +	data = pout->plllock_cmp & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);

> +

> +	data = (pout->plllock_cmp >> 8) & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);

> +

> +	data = (pout->plllock_cmp >> 16) & 0x3;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);

> +

> +	data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);

> +

> +	data = pout->pll_vco_count & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);

> +	data = (pout->pll_vco_count >> 8) & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);

> +

> +	data = pout->pll_kvco_count & 0xff;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);

> +	data = (pout->pll_kvco_count >> 8) & 0x3;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);

> +

> +	data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);

> +

> +	if (pin->ssc_en)

> +		pll_db_commit_ssc(pll);

> +

> +	wmb();	/* make sure register committed */

> +}

> +

> +/*

> + * VCO clock Callbacks

> + */

> +static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> +				     unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	struct dsi_pll_input *pin = &pll_14nm->in;

> +	struct dsi_pll_output *pout = &pll_14nm->out;

> +

> +	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,

> +	    parent_rate);

> +

> +	pll_14nm->vco_current_rate = rate;

> +	pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;

> +

> +	dsi_pll_14nm_input_init(pll_14nm);

> +

> +	/*

> +	 * This configures the post divider internal to the VCO. It's

> +	 * fixed to divide by 1 for now.

> +	 *

> +	 * tx_band = pll_postdiv.

> +	 * 0: divided by 1

> +	 * 1: divided by 2

> +	 * 2: divided by 4

> +	 * 3: divided by 8

> +	 */

> +	pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;

> +

> +	pll_14nm_dec_frac_calc(pll_14nm);

> +

> +	if (pin->ssc_en)

> +		pll_14nm_ssc_calc(pll_14nm);

> +

> +	pll_14nm_calc_vco_count(pll_14nm);

> +

> +	/* commit the slave DSI PLL registers if we're master. Note that we

> +	 * don't lock the slave PLL. We just ensure that the PLL/PHY 

> registers

> +	 * of the master and slave are identical

> +	 */

> +	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {

> +		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;

> +

> +		pll_db_commit_14nm(pll_14nm_slave, pin, pout);

> +	}

> +

> +	pll_db_commit_14nm(pll_14nm, pin, pout);

> +

> +	return 0;

> +}

> +

> +static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,

> +						  unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	void __iomem *base = pll_14nm->mmio;

> +	u64 vco_rate, multiplier = BIT(20);

> +	u32 div_frac_start;

> +	u32 dec_start;

> +	u64 ref_clk = parent_rate;

> +

> +	dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);

> +	dec_start &= 0x0ff;

> +

> +	DBG("dec_start = %x", dec_start);

> +

> +	div_frac_start = (pll_read(base + 

> REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)

> +				& 0xf) << 16;

> +	div_frac_start |= (pll_read(base + 

> REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)

> +				& 0xff) << 8;

> +	div_frac_start |= pll_read(base + 

> REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)

> +				& 0xff;

> +

> +	DBG("div_frac_start = %x", div_frac_start);

> +

> +	vco_rate = ref_clk * dec_start;

> +

> +	vco_rate += ((ref_clk * div_frac_start) / multiplier);

> +

> +	/*

> +	 * Recalculating the rate from dec_start and frac_start doesn't end 

> up

> +	 * the rate we originally set. Convert the freq to KHz, round it up 

> and

> +	 * convert it back to MHz.

> +	 */

> +	vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;

> +

> +	DBG("returning vco rate = %lu", (unsigned long)vco_rate);

> +

> +	return (unsigned long)vco_rate;

> +}

> +

> +static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {

> +	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> +	.set_rate = dsi_pll_14nm_vco_set_rate,

> +	.recalc_rate = dsi_pll_14nm_vco_recalc_rate,

> +	.prepare = msm_dsi_pll_helper_clk_prepare,

> +	.unprepare = msm_dsi_pll_helper_clk_unprepare,

> +};

> +

> +/*

> + * N1 and N2 post-divider clock callbacks

> + */

> +#define div_mask(width)	((1 << (width)) - 1)

> +static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw 

> *hw,

> +						      unsigned long parent_rate)

> +{

> +	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);

> +	struct dsi_pll_14nm *pll_14nm = postdiv->pll;

> +	void __iomem *base = pll_14nm->phy_cmn_mmio;

> +	u8 shift = postdiv->shift;

> +	u8 width = postdiv->width;

> +	u32 val;

> +

> +	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);

> +

> +	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;

> +	val &= div_mask(width);

> +

> +	return divider_recalc_rate(hw, parent_rate, val, NULL,

> +				   postdiv->flags, width);

> +}

> +

> +static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,

> +					    unsigned long rate,

> +					    unsigned long *prate)

> +{

> +	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);

> +	struct dsi_pll_14nm *pll_14nm = postdiv->pll;

> +

> +	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);

> +

> +	return divider_round_rate(hw, rate, prate, NULL,

> +				  postdiv->width,

> +				  postdiv->flags);

> +}

> +

> +static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned 

> long rate,

> +					 unsigned long parent_rate)

> +{

> +	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);

> +	struct dsi_pll_14nm *pll_14nm = postdiv->pll;

> +	void __iomem *base = pll_14nm->phy_cmn_mmio;

> +	spinlock_t *lock = &pll_14nm->postdiv_lock;

> +	u8 shift = postdiv->shift;

> +	u8 width = postdiv->width;

> +	unsigned int value;

> +	unsigned long flags = 0;

> +	u32 val;

> +

> +	DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,

> +	    parent_rate);

> +

> +	value = divider_get_val(rate, parent_rate, NULL, postdiv->width,

> +				postdiv->flags);

> +

> +	spin_lock_irqsave(lock, flags);

> +

> +	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);

> +	val &= ~(div_mask(width) << shift);

> +

> +	val |= value << shift;

> +	pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);

> +

> +	/* If we're master in dual DSI mode, then the slave PLL's 

> post-dividers

> +	 * follow the master's post dividers

> +	 */

> +	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {

> +		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;

> +		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;

> +

> +		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);

> +	}

> +

> +	spin_unlock_irqrestore(lock, flags);

> +

> +	return 0;

> +}

> +

> +static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {

> +	.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,

> +	.round_rate = dsi_pll_14nm_postdiv_round_rate,

> +	.set_rate = dsi_pll_14nm_postdiv_set_rate,

> +};

> +

> +/*

> + * PLL Callbacks

> + */

> +

> +static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	void __iomem *base = pll_14nm->mmio;

> +	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> +	bool locked;

> +

> +	DBG("");

> +

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);

> +

> +	locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,

> +					 POLL_TIMEOUT_US);

> +

> +	if (unlikely(!locked))

> +		DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");

> +	else

> +		DBG("DSI PLL lock success");

> +

> +	return locked ? 0 : -EINVAL;

> +}

> +

> +static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> +

> +	DBG("");

> +

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);

> +}

> +

> +static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;

> +	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> +	u32 data;

> +

> +	data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);

> +

> +	cached_state->n1postdiv = data & 0xf;

> +	cached_state->n2postdiv = (data >> 4) & 0xf;

> +

> +	DBG("DSI%d PLL save state %x %x", pll_14nm->id,

> +	    cached_state->n1postdiv, cached_state->n2postdiv);

> +

> +	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);

> +}

> +

> +static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;

> +	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> +	u32 data;

> +	int ret;

> +

> +	ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,

> +					cached_state->vco_rate, 0);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pll_14nm->pdev->dev,

> +			"restore vco rate failed. ret=%d\n", ret);

> +		return ret;

> +	}

> +

> +	data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);

> +

> +	DBG("DSI%d PLL restore state %x %x", pll_14nm->id,

> +	    cached_state->n1postdiv, cached_state->n2postdiv);

> +

> +	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);

> +

> +	/* also restore post-dividers for slave DSI PLL */

> +	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {

> +		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;

> +		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;

> +

> +		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);

> +	}

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,

> +				    enum msm_dsi_phy_usecase uc)

> +{

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	void __iomem *base = pll_14nm->mmio;

> +	u32 clkbuflr_en, bandgap = 0;

> +

> +	switch (uc) {

> +	case MSM_DSI_PHY_STANDALONE:

> +		clkbuflr_en = 0x1;

> +		break;

> +	case MSM_DSI_PHY_MASTER:

> +		clkbuflr_en = 0x3;

> +		pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];

> +		break;

> +	case MSM_DSI_PHY_SLAVE:

> +		clkbuflr_en = 0x0;

> +		bandgap = 0x3;

> +		break;

> +	default:

> +		return -EINVAL;

> +	}

> +

> +	pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);

> +	if (bandgap)

> +		pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);

> +

> +	pll_14nm->uc = uc;

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,

> +				     struct clk **byte_clk_provider,

> +				     struct clk **pixel_clk_provider)

> +{

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;

> +

> +	if (byte_clk_provider)

> +		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;

> +	if (pixel_clk_provider)

> +		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;

> +

> +	return 0;

> +}

> +

> +static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> +	struct platform_device *pdev = pll_14nm->pdev;

> +	int num_hws = pll_14nm->num_hws;

> +

> +	of_clk_del_provider(pdev->dev.of_node);

> +

> +	while (num_hws--)

> +		clk_hw_unregister(pll_14nm->hws[num_hws]);

> +}

> +

> +static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm 

> *pll_14nm,

> +						const char *name,

> +						const char *parent_name,

> +						unsigned long flags,

> +						u8 shift)

> +{

> +	struct dsi_pll_14nm_postdiv *pll_postdiv;

> +	struct device *dev = &pll_14nm->pdev->dev;

> +	struct clk_init_data postdiv_init = {

> +		.parent_names = (const char *[]) { parent_name },

> +		.num_parents = 1,

> +		.name = name,

> +		.flags = flags,

> +		.ops = &clk_ops_dsi_pll_14nm_postdiv,

> +	};

> +	int ret;

> +

> +	pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);

> +	if (!pll_postdiv)

> +		return ERR_PTR(-ENOMEM);

> +

> +	pll_postdiv->pll = pll_14nm;

> +	pll_postdiv->shift = shift;

> +	/* both N1 and N2 postdividers are 4 bits wide */

> +	pll_postdiv->width = 4;

> +	/* range of each divider is from 1 to 15 */

> +	pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;

> +	pll_postdiv->hw.init = &postdiv_init;

> +

> +	ret = clk_hw_register(dev, &pll_postdiv->hw);

> +	if (ret)

> +		return ERR_PTR(ret);

> +

> +	return &pll_postdiv->hw;

> +}

> +

> +static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)

> +{

> +	char clk_name[32], parent[32], vco_name[32];

> +	struct clk_init_data vco_init = {

> +		.parent_names = (const char *[]){ "xo" },

> +		.num_parents = 1,

> +		.name = vco_name,

> +		.flags = CLK_IGNORE_UNUSED,

> +		.ops = &clk_ops_dsi_pll_14nm_vco,

> +	};

> +	struct device *dev = &pll_14nm->pdev->dev;

> +	struct clk_hw **hws = pll_14nm->hws;

> +	struct clk_hw_onecell_data *hw_data;

> +	struct clk_hw *hw;

> +	int num = 0;

> +	int ret;

> +

> +	DBG("DSI%d", pll_14nm->id);

> +

> +	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +

> +			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),

> +			       GFP_KERNEL);

> +	if (!hw_data)

> +		return -ENOMEM;

> +

> +	snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);

> +	pll_14nm->base.clk_hw.init = &vco_init;

> +

> +	ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);

> +	if (ret)

> +		return ret;

> +

> +	hws[num++] = &pll_14nm->base.clk_hw;

> +

> +	snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);

> +	snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);

> +

> +	/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */

> +	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,

> +				       CLK_SET_RATE_PARENT, 0);

> +	if (IS_ERR(hw))

> +		return PTR_ERR(hw);

> +

> +	hws[num++] = hw;

> +

> +	snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);

> +	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);

> +

> +	/* DSI Byte clock = VCO_CLK / N1 / 8 */

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> +					  CLK_SET_RATE_PARENT, 1, 8);

> +	if (IS_ERR(hw))

> +		return PTR_ERR(hw);

> +

> +	hws[num++] = hw;

> +	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;

> +

> +	snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);

> +	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);

> +

> +	/*

> +	 * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider

> +	 * on the way. Don't let it set parent.

> +	 */

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);

> +	if (IS_ERR(hw))

> +		return PTR_ERR(hw);

> +

> +	hws[num++] = hw;

> +

> +	snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);

> +	snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);

> +

> +	/* DSI pixel clock = VCO_CLK / N1 / 2 / N2

> +	 * This is the output of N2 post-divider, bits 4-7 in

> +	 * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.

> +	 */

> +	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);

> +	if (IS_ERR(hw))

> +		return PTR_ERR(hw);

> +

> +	hws[num++] = hw;

> +	hw_data->hws[DSI_PIXEL_PLL_CLK]	= hw;

> +

> +	pll_14nm->num_hws = num;

> +

> +	hw_data->num = NUM_PROVIDED_CLKS;

> +	pll_14nm->hw_data = hw_data;

> +

> +	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,

> +				     pll_14nm->hw_data);

> +	if (ret) {

> +		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> +		return ret;

> +	}

> +

> +	return 0;

> +}

> +

> +struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device 

> *pdev, int id)

> +{

> +	struct dsi_pll_14nm *pll_14nm;

> +	struct msm_dsi_pll *pll;

> +	int ret;

> +

> +	if (!pdev)

> +		return ERR_PTR(-ENODEV);

> +

> +	pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);

> +	if (!pll_14nm)

> +		return ERR_PTR(-ENOMEM);

> +

> +	DBG("PLL%d", id);

> +

> +	pll_14nm->pdev = pdev;

> +	pll_14nm->id = id;

> +	pll_14nm_list[id] = pll_14nm;

> +

> +	pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");

> +	if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> +	if (IS_ERR_OR_NULL(pll_14nm->mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	spin_lock_init(&pll_14nm->postdiv_lock);

> +

> +	pll = &pll_14nm->base;

> +	pll->min_rate = VCO_MIN_RATE;

> +	pll->max_rate = VCO_MAX_RATE;

> +	pll->get_provider = dsi_pll_14nm_get_provider;

> +	pll->destroy = dsi_pll_14nm_destroy;

> +	pll->disable_seq = dsi_pll_14nm_disable_seq;

> +	pll->save_state = dsi_pll_14nm_save_state;

> +	pll->restore_state = dsi_pll_14nm_restore_state;

> +	pll->set_usecase = dsi_pll_14nm_set_usecase;

> +

> +	pll_14nm->vco_delay = 1;

> +

> +	pll->en_seq_cnt = 1;

> +	pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;

> +

> +	ret = pll_14nm_register(pll_14nm);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> +		return ERR_PTR(ret);

> +	}

> +

> +	return pll;

> +}

> +

>  static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,

>  				     struct msm_dsi_dphy_timing *timing,

>  				     int lane_idx)

> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c

> b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c

> index 5bf79de0da67..2f502efa4dd5 100644

> --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c

> +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c

> @@ -3,9 +3,646 @@

>   * Copyright (c) 2015, The Linux Foundation. All rights reserved.

>   */

> 

> +#include <linux/clk.h>

> +#include <linux/clk-provider.h>

> +

>  #include "dsi_phy.h"

> +#include "dsi_pll.h"

>  #include "dsi.xml.h"

> 

> +/*

> + * DSI PLL 28nm - clock diagram (eg: DSI0):

> + *

> + *         dsi0analog_postdiv_clk

> + *                             |         dsi0indirect_path_div2_clk

> + *                             |          |

> + *                   +------+  |  +----+  |  |\   dsi0byte_mux

> + *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |

> + *                |  +------+     +----+     | m|  |  +----+

> + *                |                          | u|--o--| /4 |-- 

> dsi0pllbyte

> + *                |                          | x|     +----+

> + *                o--------------------------| /

> + *                |                          |/

> + *                |          +------+

> + *                o----------| DIV3 |------------------------- dsi0pll

> + *                           +------+

> + */

> +

> +#define POLL_MAX_READS			10

> +#define POLL_TIMEOUT_US		50

> +

> +#define NUM_PROVIDED_CLKS		2

> +

> +#define VCO_REF_CLK_RATE		19200000

> +#define VCO_MIN_RATE			350000000

> +#define VCO_MAX_RATE			750000000

> +

> +#define DSI_BYTE_PLL_CLK		0

> +#define DSI_PIXEL_PLL_CLK		1

> +

> +#define LPFR_LUT_SIZE			10

> +struct lpfr_cfg {

> +	unsigned long vco_rate;

> +	u32 resistance;

> +};

> +

> +/* Loop filter resistance: */

> +static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {

> +	{ 479500000,  8 },

> +	{ 480000000, 11 },

> +	{ 575500000,  8 },

> +	{ 576000000, 12 },

> +	{ 610500000,  8 },

> +	{ 659500000,  9 },

> +	{ 671500000, 10 },

> +	{ 672000000, 14 },

> +	{ 708500000, 10 },

> +	{ 750000000, 11 },

> +};

> +

> +struct pll_28nm_cached_state {

> +	unsigned long vco_rate;

> +	u8 postdiv3;

> +	u8 postdiv1;

> +	u8 byte_mux;

> +};

> +

> +struct dsi_pll_28nm {

> +	struct msm_dsi_pll base;

> +

> +	int id;

> +	struct platform_device *pdev;

> +	void __iomem *mmio;

> +

> +	int vco_delay;

> +

> +	/* private clocks: */

> +	struct clk *clks[NUM_DSI_CLOCKS_MAX];

> +	u32 num_clks;

> +

> +	/* clock-provider: */

> +	struct clk *provided_clks[NUM_PROVIDED_CLKS];

> +	struct clk_onecell_data clk_data;

> +

> +	struct pll_28nm_cached_state cached_state;

> +};

> +

> +#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)

> +

> +static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,

> +				u32 nb_tries, u32 timeout_us)

> +{

> +	bool pll_locked = false;

> +	u32 val;

> +

> +	while (nb_tries--) {

> +		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);

> +		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);

> +

> +		if (pll_locked)

> +			break;

> +

> +		udelay(timeout_us);

> +	}

> +	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");

> +

> +	return pll_locked;

> +}

> +

> +static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)

> +{

> +	void __iomem *base = pll_28nm->mmio;

> +

> +	/*

> +	 * Add HW recommended delays after toggling the software

> +	 * reset bit off and back on.

> +	 */

> +	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,

> +			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);

> +	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);

> +}

> +

> +/*

> + * Clock Callbacks

> + */

> +static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> +		unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct device *dev = &pll_28nm->pdev->dev;

> +	void __iomem *base = pll_28nm->mmio;

> +	unsigned long div_fbx1000, gen_vco_clk;

> +	u32 refclk_cfg, frac_n_mode, frac_n_value;

> +	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;

> +	u32 cal_cfg10, cal_cfg11;

> +	u32 rem;

> +	int i;

> +

> +	VERB("rate=%lu, parent's=%lu", rate, parent_rate);

> +

> +	/* Force postdiv2 to be div-4 */

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);

> +

> +	/* Configure the Loop filter resistance */

> +	for (i = 0; i < LPFR_LUT_SIZE; i++)

> +		if (rate <= lpfr_lut[i].vco_rate)

> +			break;

> +	if (i == LPFR_LUT_SIZE) {

> +		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. 

> vco=%lu\n",

> +				rate);

> +		return -EINVAL;

> +	}

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, 

> lpfr_lut[i].resistance);

> +

> +	/* Loop filter capacitance values : c1 and c2 */

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);

> +

> +	rem = rate % VCO_REF_CLK_RATE;

> +	if (rem) {

> +		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;

> +		frac_n_mode = 1;

> +		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);

> +		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);

> +	} else {

> +		refclk_cfg = 0x0;

> +		frac_n_mode = 0;

> +		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);

> +		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);

> +	}

> +

> +	DBG("refclk_cfg = %d", refclk_cfg);

> +

> +	rem = div_fbx1000 % 1000;

> +	frac_n_value = (rem << 16) / 1000;

> +

> +	DBG("div_fb = %lu", div_fbx1000);

> +	DBG("frac_n_value = %d", frac_n_value);

> +

> +	DBG("Generated VCO Clock: %lu", gen_vco_clk);

> +	rem = 0;

> +	sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);

> +	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;

> +	if (frac_n_mode) {

> +		sdm_cfg0 = 0x0;

> +		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);

> +		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(

> +				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));

> +		sdm_cfg3 = frac_n_value >> 8;

> +		sdm_cfg2 = frac_n_value & 0xff;

> +	} else {

> +		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;

> +		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(

> +				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));

> +		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);

> +		sdm_cfg2 = 0;

> +		sdm_cfg3 = 0;

> +	}

> +

> +	DBG("sdm_cfg0=%d", sdm_cfg0);

> +	DBG("sdm_cfg1=%d", sdm_cfg1);

> +	DBG("sdm_cfg2=%d", sdm_cfg2);

> +	DBG("sdm_cfg3=%d", sdm_cfg3);

> +

> +	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));

> +	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);

> +	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);

> +

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);

> +

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,

> +		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,

> +		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);

> +

> +	/* Add hardware recommended delay for correct PLL configuration */

> +	if (pll_28nm->vco_delay)

> +		udelay(pll_28nm->vco_delay);

> +

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +

> +	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,

> +					POLL_TIMEOUT_US);

> +}

> +

> +static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,

> +		unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	void __iomem *base = pll_28nm->mmio;

> +	u32 sdm0, doubler, sdm_byp_div;

> +	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;

> +	u32 ref_clk = VCO_REF_CLK_RATE;

> +	unsigned long vco_rate;

> +

> +	VERB("parent_rate=%lu", parent_rate);

> +

> +	/* Check to see if the ref clk doubler is enabled */

> +	doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &

> +			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;

> +	ref_clk += (doubler * VCO_REF_CLK_RATE);

> +

> +	/* see if it is integer mode or sdm mode */

> +	sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);

> +	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {

> +		/* integer mode */

> +		sdm_byp_div = FIELD(

> +				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),

> +				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;

> +		vco_rate = ref_clk * sdm_byp_div;

> +	} else {

> +		/* sdm mode */

> +		sdm_dc_off = FIELD(

> +				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),

> +				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);

> +		DBG("sdm_dc_off = %d", sdm_dc_off);

> +		sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),

> +				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);

> +		sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),

> +				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);

> +		sdm_freq_seed = (sdm3 << 8) | sdm2;

> +		DBG("sdm_freq_seed = %d", sdm_freq_seed);

> +

> +		vco_rate = (ref_clk * (sdm_dc_off + 1)) +

> +			mult_frac(ref_clk, sdm_freq_seed, BIT(16));

> +		DBG("vco rate = %lu", vco_rate);

> +	}

> +

> +	DBG("returning vco rate = %lu", vco_rate);

> +

> +	return vco_rate;

> +}

> +

> +static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {

> +	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> +	.set_rate = dsi_pll_28nm_clk_set_rate,

> +	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,

> +	.prepare = msm_dsi_pll_helper_clk_prepare,

> +	.unprepare = msm_dsi_pll_helper_clk_unprepare,

> +	.is_enabled = dsi_pll_28nm_clk_is_enabled,

> +};

> +

> +/*

> + * PLL Callbacks

> + */

> +static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct device *dev = &pll_28nm->pdev->dev;

> +	void __iomem *base = pll_28nm->mmio;

> +	u32 max_reads = 5, timeout_us = 100;

> +	bool locked;

> +	u32 val;

> +	int i;

> +

> +	DBG("id=%d", pll_28nm->id);

> +

> +	pll_28nm_software_reset(pll_28nm);

> +

> +	/*

> +	 * PLL power up sequence.

> +	 * Add necessary delays recommended by hardware.

> +	 */

> +	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;

> +	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);

> +

> +	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;

> +	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);

> +

> +	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> +	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> +

> +	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;

> +	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);

> +

> +	for (i = 0; i < 2; i++) {

> +		/* DSI Uniphy lock detect setting */

> +		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,

> +				0x0c, 100);

> +		pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);

> +

> +		/* poll for PLL ready status */

> +		locked = pll_28nm_poll_for_ready(pll_28nm,

> +						max_reads, timeout_us);

> +		if (locked)

> +			break;

> +

> +		pll_28nm_software_reset(pll_28nm);

> +

> +		/*

> +		 * PLL power up sequence.

> +		 * Add necessary delays recommended by hardware.

> +		 */

> +		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;

> +		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);

> +

> +		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;

> +		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);

> +

> +		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> +		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);

> +

> +		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> +		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);

> +

> +		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> +		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> +

> +		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;

> +		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);

> +	}

> +

> +	if (unlikely(!locked))

> +		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");

> +	else

> +		DBG("DSI PLL Lock success");

> +

> +	return locked ? 0 : -EINVAL;

> +}

> +

> +static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct device *dev = &pll_28nm->pdev->dev;

> +	void __iomem *base = pll_28nm->mmio;

> +	bool locked;

> +	u32 max_reads = 10, timeout_us = 50;

> +	u32 val;

> +

> +	DBG("id=%d", pll_28nm->id);

> +

> +	pll_28nm_software_reset(pll_28nm);

> +

> +	/*

> +	 * PLL power up sequence.

> +	 * Add necessary delays recommended by hardware.

> +	 */

> +	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);

> +

> +	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;

> +	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> +

> +	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;

> +	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> +

> +	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |

> +		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;

> +	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> +

> +	/* DSI PLL toggle lock detect setting */

> +	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);

> +	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);

> +

> +	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);

> +

> +	if (unlikely(!locked))

> +		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");

> +	else

> +		DBG("DSI PLL lock success");

> +

> +	return locked ? 0 : -EINVAL;

> +}

> +

> +static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +

> +	DBG("id=%d", pll_28nm->id);

> +	pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);

> +}

> +

> +static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> +	void __iomem *base = pll_28nm->mmio;

> +

> +	cached_state->postdiv3 =

> +			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);

> +	cached_state->postdiv1 =

> +			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);

> +	cached_state->byte_mux = pll_read(base + 

> REG_DSI_28nm_PHY_PLL_VREG_CFG);

> +	if (dsi_pll_28nm_clk_is_enabled(&pll->clk_hw))

> +		cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);

> +	else

> +		cached_state->vco_rate = 0;

> +}

> +

> +static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> +	void __iomem *base = pll_28nm->mmio;

> +	int ret;

> +

> +	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,

> +					cached_state->vco_rate, 0);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pll_28nm->pdev->dev,

> +			"restore vco rate failed. ret=%d\n", ret);

> +		return ret;

> +	}

> +

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,

> +			cached_state->postdiv3);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,

> +			cached_state->postdiv1);

> +	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,

> +			cached_state->byte_mux);

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,

> +				struct clk **byte_clk_provider,

> +				struct clk **pixel_clk_provider)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +

> +	if (byte_clk_provider)

> +		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];

> +	if (pixel_clk_provider)

> +		*pixel_clk_provider =

> +				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];

> +

> +	return 0;

> +}

> +

> +static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	int i;

> +

> +	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,

> +					pll_28nm->clks, pll_28nm->num_clks);

> +

> +	for (i = 0; i < NUM_PROVIDED_CLKS; i++)

> +		pll_28nm->provided_clks[i] = NULL;

> +

> +	pll_28nm->num_clks = 0;

> +	pll_28nm->clk_data.clks = NULL;

> +	pll_28nm->clk_data.clk_num = 0;

> +}

> +

> +static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)

> +{

> +	char clk_name[32], parent1[32], parent2[32], vco_name[32];

> +	struct clk_init_data vco_init = {

> +		.parent_names = (const char *[]){ "xo" },

> +		.num_parents = 1,

> +		.name = vco_name,

> +		.flags = CLK_IGNORE_UNUSED,

> +		.ops = &clk_ops_dsi_pll_28nm_vco,

> +	};

> +	struct device *dev = &pll_28nm->pdev->dev;

> +	struct clk **clks = pll_28nm->clks;

> +	struct clk **provided_clks = pll_28nm->provided_clks;

> +	int num = 0;

> +	int ret;

> +

> +	DBG("%d", pll_28nm->id);

> +

> +	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);

> +	pll_28nm->base.clk_hw.init = &vco_init;

> +	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);

> +

> +	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);

> +	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);

> +	clks[num++] = clk_register_divider(dev, clk_name,

> +			parent1, CLK_SET_RATE_PARENT,

> +			pll_28nm->mmio +

> +			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,

> +			0, 4, 0, NULL);

> +

> +	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);

> +	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);

> +	clks[num++] = clk_register_fixed_factor(dev, clk_name,

> +			parent1, CLK_SET_RATE_PARENT,

> +			1, 2);

> +

> +	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);

> +	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);

> +	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =

> +			clk_register_divider(dev, clk_name,

> +				parent1, 0, pll_28nm->mmio +

> +				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,

> +				0, 8, 0, NULL);

> +

> +	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);

> +	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);

> +	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);

> +	clks[num++] = clk_register_mux(dev, clk_name,

> +			((const char *[]){

> +				parent1, parent2

> +			}), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +

> +			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);

> +

> +	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);

> +	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);

> +	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =

> +			clk_register_fixed_factor(dev, clk_name,

> +				parent1, CLK_SET_RATE_PARENT, 1, 4);

> +

> +	pll_28nm->num_clks = num;

> +

> +	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;

> +	pll_28nm->clk_data.clks = provided_clks;

> +

> +	ret = of_clk_add_provider(dev->of_node,

> +			of_clk_src_onecell_get, &pll_28nm->clk_data);

> +	if (ret) {

> +		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> +		return ret;

> +	}

> +

> +	return 0;

> +}

> +

> +struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device 

> *pdev,

> +					enum msm_dsi_phy_type type, int id)

> +{

> +	struct dsi_pll_28nm *pll_28nm;

> +	struct msm_dsi_pll *pll;

> +	int ret;

> +

> +	if (!pdev)

> +		return ERR_PTR(-ENODEV);

> +

> +	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);

> +	if (!pll_28nm)

> +		return ERR_PTR(-ENOMEM);

> +

> +	pll_28nm->pdev = pdev;

> +	pll_28nm->id = id;

> +

> +	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> +	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	pll = &pll_28nm->base;

> +	pll->min_rate = VCO_MIN_RATE;

> +	pll->max_rate = VCO_MAX_RATE;

> +	pll->get_provider = dsi_pll_28nm_get_provider;

> +	pll->destroy = dsi_pll_28nm_destroy;

> +	pll->disable_seq = dsi_pll_28nm_disable_seq;

> +	pll->save_state = dsi_pll_28nm_save_state;

> +	pll->restore_state = dsi_pll_28nm_restore_state;

> +

> +	if (type == MSM_DSI_PHY_28NM_HPM) {

> +		pll_28nm->vco_delay = 1;

> +

> +		pll->en_seq_cnt = 3;

> +		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;

> +		pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;

> +		pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;

> +	} else if (type == MSM_DSI_PHY_28NM_LP) {

> +		pll_28nm->vco_delay = 1000;

> +

> +		pll->en_seq_cnt = 1;

> +		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;

> +	} else {

> +		DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);

> +		return ERR_PTR(-EINVAL);

> +	}

> +

> +	ret = pll_28nm_register(pll_28nm);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> +		return ERR_PTR(ret);

> +	}

> +

> +	return pll;

> +}

> +

> +

>  static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,

>  		struct msm_dsi_dphy_timing *timing)

>  {

> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c

> b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c

> index 5d33de27a0f4..4a40513057e8 100644

> --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c

> +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c

> @@ -3,11 +3,530 @@

>   * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.

>   */

> 

> +#include <linux/clk-provider.h>

>  #include <linux/delay.h>

> 

>  #include "dsi_phy.h"

> +#include "dsi_pll.h"

>  #include "dsi.xml.h"

> 

> +/*

> + * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):

> + *

> + *

> + *                        +------+

> + *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)

> + *  F * byte_clk    |     +------+

> + *                  | bit clock divider (F / 8)

> + *                  |

> + *                  |     +------+

> + *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG

> + *                  |     +------+                 | (sets parent 

> rate)

> + *                  | byte clock divider (F)       |

> + *                  |                              |

> + *                  |                              o---> To esc RCG

> + *                  |                                (doesn't set 

> parent rate)

> + *                  |

> + *                  |     +------+

> + *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG

> + *                        +------+                 | (sets parent 

> rate)

> + *                  dsi clock divider (F * magic)  |

> + *                                                 |

> + *                                                 o---> To pixel rcg

> + *                                                  (doesn't set 

> parent rate)

> + */

> +

> +#define POLL_MAX_READS		8000

> +#define POLL_TIMEOUT_US		1

> +

> +#define NUM_PROVIDED_CLKS	2

> +

> +#define VCO_REF_CLK_RATE	27000000

> +#define VCO_MIN_RATE		600000000

> +#define VCO_MAX_RATE		1200000000

> +

> +#define DSI_BYTE_PLL_CLK	0

> +#define DSI_PIXEL_PLL_CLK	1

> +

> +#define VCO_PREF_DIV_RATIO	27

> +

> +struct pll_28nm_cached_state {

> +	unsigned long vco_rate;

> +	u8 postdiv3;

> +	u8 postdiv2;

> +	u8 postdiv1;

> +};

> +

> +struct clk_bytediv {

> +	struct clk_hw hw;

> +	void __iomem *reg;

> +};

> +

> +struct dsi_pll_28nm {

> +	struct msm_dsi_pll base;

> +

> +	int id;

> +	struct platform_device *pdev;

> +	void __iomem *mmio;

> +

> +	/* custom byte clock divider */

> +	struct clk_bytediv *bytediv;

> +

> +	/* private clocks: */

> +	struct clk *clks[NUM_DSI_CLOCKS_MAX];

> +	u32 num_clks;

> +

> +	/* clock-provider: */

> +	struct clk *provided_clks[NUM_PROVIDED_CLKS];

> +	struct clk_onecell_data clk_data;

> +

> +	struct pll_28nm_cached_state cached_state;

> +};

> +

> +#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)

> +

> +static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,

> +				    int nb_tries, int timeout_us)

> +{

> +	bool pll_locked = false;

> +	u32 val;

> +

> +	while (nb_tries--) {

> +		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);

> +		pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);

> +

> +		if (pll_locked)

> +			break;

> +

> +		udelay(timeout_us);

> +	}

> +	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");

> +

> +	return pll_locked;

> +}

> +

> +/*

> + * Clock Callbacks

> + */

> +static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> +				     unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	void __iomem *base = pll_28nm->mmio;

> +	u32 val, temp, fb_divider;

> +

> +	DBG("rate=%lu, parent's=%lu", rate, parent_rate);

> +

> +	temp = rate / 10;

> +	val = VCO_REF_CLK_RATE / 10;

> +	fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;

> +	fb_divider = fb_divider / 2 - 1;

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,

> +			fb_divider & 0xff);

> +

> +	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);

> +

> +	val |= (fb_divider >> 8) & 0x07;

> +

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,

> +			val);

> +

> +	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);

> +

> +	val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;

> +

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,

> +			val);

> +

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,

> +			0xf);

> +

> +	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);

> +	val |= 0x7 << 4;

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,

> +			val);

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +

> +	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,

> +					POLL_TIMEOUT_US);

> +}

> +

> +static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,

> +						  unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	void __iomem *base = pll_28nm->mmio;

> +	unsigned long vco_rate;

> +	u32 status, fb_divider, temp, ref_divider;

> +

> +	VERB("parent_rate=%lu", parent_rate);

> +

> +	status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);

> +

> +	if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {

> +		fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);

> +		fb_divider &= 0xff;

> +		temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;

> +		fb_divider = (temp << 8) | fb_divider;

> +		fb_divider += 1;

> +

> +		ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);

> +		ref_divider &= 0x3f;

> +		ref_divider += 1;

> +

> +		/* multiply by 2 */

> +		vco_rate = (parent_rate / ref_divider) * fb_divider * 2;

> +	} else {

> +		vco_rate = 0;

> +	}

> +

> +	DBG("returning vco rate = %lu", vco_rate);

> +

> +	return vco_rate;

> +}

> +

> +static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {

> +	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> +	.set_rate = dsi_pll_28nm_clk_set_rate,

> +	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,

> +	.prepare = msm_dsi_pll_helper_clk_prepare,

> +	.unprepare = msm_dsi_pll_helper_clk_unprepare,

> +	.is_enabled = dsi_pll_28nm_clk_is_enabled,

> +};

> +

> +/*

> + * Custom byte clock divier clk_ops

> + *

> + * This clock is the entry point to configuring the PLL. The user (dsi 

> host)

> + * will set this clock's rate to the desired byte clock rate. The VCO 

> lock

> + * frequency is a multiple of the byte clock rate. The multiplication 

> factor

> + * (shown as F in the diagram above) is a function of the byte clock 

> rate.

> + *

> + * This custom divider clock ensures that its parent (VCO) is set to 

> the

> + * desired rate, and that the byte clock postdivider (POSTDIV2) is 

> configured

> + * accordingly

> + */

> +#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)

> +

> +static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,

> +		unsigned long parent_rate)

> +{

> +	struct clk_bytediv *bytediv = to_clk_bytediv(hw);

> +	unsigned int div;

> +

> +	div = pll_read(bytediv->reg) & 0xff;

> +

> +	return parent_rate / (div + 1);

> +}

> +

> +/* find multiplication factor(wrt byte clock) at which the VCO should 

> be set */

> +static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)

> +{

> +	unsigned long bit_mhz;

> +

> +	/* convert to bit clock in Mhz */

> +	bit_mhz = (byte_clk_rate * 8) / 1000000;

> +

> +	if (bit_mhz < 125)

> +		return 64;

> +	else if (bit_mhz < 250)

> +		return 32;

> +	else if (bit_mhz < 600)

> +		return 16;

> +	else

> +		return 8;

> +}

> +

> +static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long 

> rate,

> +				   unsigned long *prate)

> +{

> +	unsigned long best_parent;

> +	unsigned int factor;

> +

> +	factor = get_vco_mul_factor(rate);

> +

> +	best_parent = rate * factor;

> +	*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);

> +

> +	return *prate / factor;

> +}

> +

> +static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,

> +				unsigned long parent_rate)

> +{

> +	struct clk_bytediv *bytediv = to_clk_bytediv(hw);

> +	u32 val;

> +	unsigned int factor;

> +

> +	factor = get_vco_mul_factor(rate);

> +

> +	val = pll_read(bytediv->reg);

> +	val |= (factor - 1) & 0xff;

> +	pll_write(bytediv->reg, val);

> +

> +	return 0;

> +}

> +

> +/* Our special byte clock divider ops */

> +static const struct clk_ops clk_bytediv_ops = {

> +	.round_rate = clk_bytediv_round_rate,

> +	.set_rate = clk_bytediv_set_rate,

> +	.recalc_rate = clk_bytediv_recalc_rate,

> +};

> +

> +/*

> + * PLL Callbacks

> + */

> +static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct device *dev = &pll_28nm->pdev->dev;

> +	void __iomem *base = pll_28nm->mmio;

> +	bool locked;

> +	unsigned int bit_div, byte_div;

> +	int max_reads = 1000, timeout_us = 100;

> +	u32 val;

> +

> +	DBG("id=%d", pll_28nm->id);

> +

> +	/*

> +	 * before enabling the PLL, configure the bit clock divider since we

> +	 * don't expose it as a clock to the outside world

> +	 * 1: read back the byte clock divider that should already be set

> +	 * 2: divide by 8 to get bit clock divider

> +	 * 3: write it to POSTDIV1

> +	 */

> +	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);

> +	byte_div = val + 1;

> +	bit_div = byte_div / 8;

> +

> +	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);

> +	val &= ~0xf;

> +	val |= (bit_div - 1);

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);

> +

> +	/* enable the PLL */

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,

> +			DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);

> +

> +	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);

> +

> +	if (unlikely(!locked))

> +		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");

> +	else

> +		DBG("DSI PLL lock success");

> +

> +	return locked ? 0 : -EINVAL;

> +}

> +

> +static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +

> +	DBG("id=%d", pll_28nm->id);

> +	pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);

> +}

> +

> +static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> +	void __iomem *base = pll_28nm->mmio;

> +

> +	cached_state->postdiv3 =

> +			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);

> +	cached_state->postdiv2 =

> +			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);

> +	cached_state->postdiv1 =

> +			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);

> +

> +	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);

> +}

> +

> +static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> +	void __iomem *base = pll_28nm->mmio;

> +	int ret;

> +

> +	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,

> +					cached_state->vco_rate, 0);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pll_28nm->pdev->dev,

> +			"restore vco rate failed. ret=%d\n", ret);

> +		return ret;

> +	}

> +

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,

> +			cached_state->postdiv3);

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,

> +			cached_state->postdiv2);

> +	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,

> +			cached_state->postdiv1);

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,

> +				struct clk **byte_clk_provider,

> +				struct clk **pixel_clk_provider)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +

> +	if (byte_clk_provider)

> +		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];

> +	if (pixel_clk_provider)

> +		*pixel_clk_provider =

> +				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];

> +

> +	return 0;

> +}

> +

> +static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> +

> +	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,

> +					pll_28nm->clks, pll_28nm->num_clks);

> +}

> +

> +static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)

> +{

> +	char *clk_name, *parent_name, *vco_name;

> +	struct clk_init_data vco_init = {

> +		.parent_names = (const char *[]){ "pxo" },

> +		.num_parents = 1,

> +		.flags = CLK_IGNORE_UNUSED,

> +		.ops = &clk_ops_dsi_pll_28nm_vco,

> +	};

> +	struct device *dev = &pll_28nm->pdev->dev;

> +	struct clk **clks = pll_28nm->clks;

> +	struct clk **provided_clks = pll_28nm->provided_clks;

> +	struct clk_bytediv *bytediv;

> +	struct clk_init_data bytediv_init = { };

> +	int ret, num = 0;

> +

> +	DBG("%d", pll_28nm->id);

> +

> +	bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);

> +	if (!bytediv)

> +		return -ENOMEM;

> +

> +	vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);

> +	if (!vco_name)

> +		return -ENOMEM;

> +

> +	parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);

> +	if (!parent_name)

> +		return -ENOMEM;

> +

> +	clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);

> +	if (!clk_name)

> +		return -ENOMEM;

> +

> +	pll_28nm->bytediv = bytediv;

> +

> +	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);

> +	vco_init.name = vco_name;

> +

> +	pll_28nm->base.clk_hw.init = &vco_init;

> +

> +	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);

> +

> +	/* prepare and register bytediv */

> +	bytediv->hw.init = &bytediv_init;

> +	bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;

> +

> +	snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);

> +	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);

> +

> +	bytediv_init.name = clk_name;

> +	bytediv_init.ops = &clk_bytediv_ops;

> +	bytediv_init.flags = CLK_SET_RATE_PARENT;

> +	bytediv_init.parent_names = (const char * const *) &parent_name;

> +	bytediv_init.num_parents = 1;

> +

> +	/* DIV2 */

> +	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =

> +			clk_register(dev, &bytediv->hw);

> +

> +	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);

> +	/* DIV3 */

> +	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =

> +			clk_register_divider(dev, clk_name,

> +				parent_name, 0, pll_28nm->mmio +

> +				REG_DSI_28nm_8960_PHY_PLL_CTRL_10,

> +				0, 8, 0, NULL);

> +

> +	pll_28nm->num_clks = num;

> +

> +	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;

> +	pll_28nm->clk_data.clks = provided_clks;

> +

> +	ret = of_clk_add_provider(dev->of_node,

> +			of_clk_src_onecell_get, &pll_28nm->clk_data);

> +	if (ret) {

> +		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> +		return ret;

> +	}

> +

> +	return 0;

> +}

> +

> +struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device 

> *pdev,

> +					       int id)

> +{

> +	struct dsi_pll_28nm *pll_28nm;

> +	struct msm_dsi_pll *pll;

> +	int ret;

> +

> +	if (!pdev)

> +		return ERR_PTR(-ENODEV);

> +

> +	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);

> +	if (!pll_28nm)

> +		return ERR_PTR(-ENOMEM);

> +

> +	pll_28nm->pdev = pdev;

> +	pll_28nm->id = id + 1;

> +

> +	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> +	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	pll = &pll_28nm->base;

> +	pll->min_rate = VCO_MIN_RATE;

> +	pll->max_rate = VCO_MAX_RATE;

> +	pll->get_provider = dsi_pll_28nm_get_provider;

> +	pll->destroy = dsi_pll_28nm_destroy;

> +	pll->disable_seq = dsi_pll_28nm_disable_seq;

> +	pll->save_state = dsi_pll_28nm_save_state;

> +	pll->restore_state = dsi_pll_28nm_restore_state;

> +

> +	pll->en_seq_cnt = 1;

> +	pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;

> +

> +	ret = pll_28nm_register(pll_28nm);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> +		return ERR_PTR(ret);

> +	}

> +

> +	return pll;

> +}

> +

>  static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,

>  		struct msm_dsi_dphy_timing *timing)

>  {

> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c

> b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c

> index cbfeec860e69..f9af9d70b56a 100644

> --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c

> +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c

> @@ -3,11 +3,916 @@

>   * Copyright (c) 2018, The Linux Foundation

>   */

> 

> +#include <linux/clk.h>

> +#include <linux/clk-provider.h>

>  #include <linux/iopoll.h>

> 

> +#include "dsi_pll.h"

>  #include "dsi_phy.h"

>  #include "dsi.xml.h"

> 

> +/*

> + * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram

> + *

> + *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk

> + *                              |                |

> + *                              |                |

> + *                 +---------+  |  +----------+  |  +----+

> + *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |--

> dsi0_phy_pll_out_byteclk

> + *                 +---------+  |  +----------+  |  +----+

> + *                              |                |

> + *                              |                |

> dsi0_pll_by_2_bit_clk

> + *                              |                |          |

> + *                              |                |  +----+  |  |\

> dsi0_pclk_mux

> + *                              |                |--| /2 |--o--| \   |

> + *                              |                |  +----+     |  \

> |  +---------+

> + *                              |                --------------|

> |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk

> + *                              |------------------------------|  /

>   +---------+

> + *                              |          +-----+             | /

> + *                              -----------| /4? |--o----------|/

> + *                                         +-----+  |           |

> + *                                                  |           

> |dsiclk_sel

> + *                                                  |

> + *                                                  

> dsi0_pll_post_out_div_clk

> + */

> +

> +#define DSI_BYTE_PLL_CLK		0

> +#define DSI_PIXEL_PLL_CLK		1

> +#define NUM_PROVIDED_CLKS		2

> +

> +#define VCO_REF_CLK_RATE		19200000

> +

> +struct dsi_pll_regs {

> +	u32 pll_prop_gain_rate;

> +	u32 pll_lockdet_rate;

> +	u32 decimal_div_start;

> +	u32 frac_div_start_low;

> +	u32 frac_div_start_mid;

> +	u32 frac_div_start_high;

> +	u32 pll_clock_inverters;

> +	u32 ssc_stepsize_low;

> +	u32 ssc_stepsize_high;

> +	u32 ssc_div_per_low;

> +	u32 ssc_div_per_high;

> +	u32 ssc_adjper_low;

> +	u32 ssc_adjper_high;

> +	u32 ssc_control;

> +};

> +

> +struct dsi_pll_config {

> +	u32 ref_freq;

> +	bool div_override;

> +	u32 output_div;

> +	bool ignore_frac;

> +	bool disable_prescaler;

> +	bool enable_ssc;

> +	bool ssc_center;

> +	u32 dec_bits;

> +	u32 frac_bits;

> +	u32 lock_timer;

> +	u32 ssc_freq;

> +	u32 ssc_offset;

> +	u32 ssc_adj_per;

> +	u32 thresh_cycles;

> +	u32 refclk_cycles;

> +};

> +

> +struct pll_7nm_cached_state {

> +	unsigned long vco_rate;

> +	u8 bit_clk_div;

> +	u8 pix_clk_div;

> +	u8 pll_out_div;

> +	u8 pll_mux;

> +};

> +

> +struct dsi_pll_7nm {

> +	struct msm_dsi_pll base;

> +

> +	int id;

> +	struct platform_device *pdev;

> +

> +	void __iomem *phy_cmn_mmio;

> +	void __iomem *mmio;

> +

> +	u64 vco_ref_clk_rate;

> +	u64 vco_current_rate;

> +

> +	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */

> +	spinlock_t postdiv_lock;

> +

> +	int vco_delay;

> +	struct dsi_pll_config pll_configuration;

> +	struct dsi_pll_regs reg_setup;

> +

> +	/* private clocks: */

> +	struct clk_hw *out_div_clk_hw;

> +	struct clk_hw *bit_clk_hw;

> +	struct clk_hw *byte_clk_hw;

> +	struct clk_hw *by_2_bit_clk_hw;

> +	struct clk_hw *post_out_div_clk_hw;

> +	struct clk_hw *pclk_mux_hw;

> +	struct clk_hw *out_dsiclk_hw;

> +

> +	/* clock-provider: */

> +	struct clk_hw_onecell_data *hw_data;

> +

> +	struct pll_7nm_cached_state cached_state;

> +

> +	enum msm_dsi_phy_usecase uc;

> +	struct dsi_pll_7nm *slave;

> +};

> +

> +#define to_pll_7nm(x)	container_of(x, struct dsi_pll_7nm, base)

> +

> +/*

> + * Global list of private DSI PLL struct pointers. We need this for 

> Dual DSI

> + * mode, where the master PLL's clk_ops needs access the slave's 

> private data

> + */

> +static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];

> +

> +static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)

> +{

> +	struct dsi_pll_config *config = &pll->pll_configuration;

> +

> +	config->ref_freq = pll->vco_ref_clk_rate;

> +	config->output_div = 1;

> +	config->dec_bits = 8;

> +	config->frac_bits = 18;

> +	config->lock_timer = 64;

> +	config->ssc_freq = 31500;

> +	config->ssc_offset = 4800;

> +	config->ssc_adj_per = 2;

> +	config->thresh_cycles = 32;

> +	config->refclk_cycles = 256;

> +

> +	config->div_override = false;

> +	config->ignore_frac = false;

> +	config->disable_prescaler = false;

> +

> +	/* TODO: ssc enable */

> +	config->enable_ssc = false;

> +	config->ssc_center = 0;

> +}

> +

> +static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)

> +{

> +	struct dsi_pll_config *config = &pll->pll_configuration;

> +	struct dsi_pll_regs *regs = &pll->reg_setup;

> +	u64 fref = pll->vco_ref_clk_rate;

> +	u64 pll_freq;

> +	u64 divider;

> +	u64 dec, dec_multiple;

> +	u32 frac;

> +	u64 multiplier;

> +

> +	pll_freq = pll->vco_current_rate;

> +

> +	if (config->disable_prescaler)

> +		divider = fref;

> +	else

> +		divider = fref * 2;

> +

> +	multiplier = 1 << config->frac_bits;

> +	dec_multiple = div_u64(pll_freq * multiplier, divider);

> +	div_u64_rem(dec_multiple, multiplier, &frac);

> +

> +	dec = div_u64(dec_multiple, multiplier);

> +

> +	if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)

> +		regs->pll_clock_inverters = 0x28;

> +	else if (pll_freq <= 1000000000ULL)

> +		regs->pll_clock_inverters = 0xa0;

> +	else if (pll_freq <= 2500000000ULL)

> +		regs->pll_clock_inverters = 0x20;

> +	else if (pll_freq <= 3020000000ULL)

> +		regs->pll_clock_inverters = 0x00;

> +	else

> +		regs->pll_clock_inverters = 0x40;

> +

> +	regs->pll_lockdet_rate = config->lock_timer;

> +	regs->decimal_div_start = dec;

> +	regs->frac_div_start_low = (frac & 0xff);

> +	regs->frac_div_start_mid = (frac & 0xff00) >> 8;

> +	regs->frac_div_start_high = (frac & 0x30000) >> 16;

> +}

> +

> +#define SSC_CENTER		BIT(0)

> +#define SSC_EN			BIT(1)

> +

> +static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)

> +{

> +	struct dsi_pll_config *config = &pll->pll_configuration;

> +	struct dsi_pll_regs *regs = &pll->reg_setup;

> +	u32 ssc_per;

> +	u32 ssc_mod;

> +	u64 ssc_step_size;

> +	u64 frac;

> +

> +	if (!config->enable_ssc) {

> +		DBG("SSC not enabled\n");

> +		return;

> +	}

> +

> +	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 

> 1;

> +	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);

> +	ssc_per -= ssc_mod;

> +

> +	frac = regs->frac_div_start_low |

> +			(regs->frac_div_start_mid << 8) |

> +			(regs->frac_div_start_high << 16);

> +	ssc_step_size = regs->decimal_div_start;

> +	ssc_step_size *= (1 << config->frac_bits);

> +	ssc_step_size += frac;

> +	ssc_step_size *= config->ssc_offset;

> +	ssc_step_size *= (config->ssc_adj_per + 1);

> +	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));

> +	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);

> +

> +	regs->ssc_div_per_low = ssc_per & 0xFF;

> +	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;

> +	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);

> +	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);

> +	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;

> +	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;

> +

> +	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;

> +

> +	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",

> +		 regs->decimal_div_start, frac, config->frac_bits);

> +	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",

> +		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);

> +}

> +

> +static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)

> +{

> +	void __iomem *base = pll->mmio;

> +	struct dsi_pll_regs *regs = &pll->reg_setup;

> +

> +	if (pll->pll_configuration.enable_ssc) {

> +		pr_debug("SSC is enabled\n");

> +

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,

> +			  regs->ssc_stepsize_low);

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,

> +			  regs->ssc_stepsize_high);

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,

> +			  regs->ssc_div_per_low);

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,

> +			  regs->ssc_div_per_high);

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,

> +			  regs->ssc_adjper_low);

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,

> +			  regs->ssc_adjper_high);

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,

> +			  SSC_EN | regs->ssc_control);

> +	}

> +}

> +

> +static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)

> +{

> +	void __iomem *base = pll->mmio;

> +	u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;

> +

> +	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {

> +		if (pll->vco_current_rate >= 3100000000ULL)

> +			analog_controls_five_1 = 0x03;

> +

> +		if (pll->vco_current_rate < 1520000000ULL)

> +			vco_config_1 = 0x08;

> +		else if (pll->vco_current_rate < 2990000000ULL)

> +			vco_config_1 = 0x01;

> +	}

> +

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,

> +		  analog_controls_five_1);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 

> 0xba);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 

> 0x84);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 

> 0x82);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 

> 0x4c);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,

> +		  pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);

> +

> +	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {

> +		pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);

> +		if (pll->slave)

> +			pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 

> 0x22);

> +	}

> +}

> +

> +static void dsi_pll_commit(struct dsi_pll_7nm *pll)

> +{

> +	void __iomem *base = pll->mmio;

> +	struct dsi_pll_regs *reg = &pll->reg_setup;

> +

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,

> reg->decimal_div_start);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,

> reg->frac_div_start_low);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,

> reg->frac_div_start_mid);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,

> reg->frac_div_start_high);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1,

> reg->pll_lockdet_rate);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 

> for CPHY */

> +	pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,

> reg->pll_clock_inverters);

> +}

> +

> +static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> +				     unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +

> +	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,

> +	    parent_rate);

> +

> +	pll_7nm->vco_current_rate = rate;

> +	pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;

> +

> +	dsi_pll_setup_config(pll_7nm);

> +

> +	dsi_pll_calc_dec_frac(pll_7nm);

> +

> +	dsi_pll_calc_ssc(pll_7nm);

> +

> +	dsi_pll_commit(pll_7nm);

> +

> +	dsi_pll_config_hzindep_reg(pll_7nm);

> +

> +	dsi_pll_ssc_commit(pll_7nm);

> +

> +	/* flush, ensure all register writes are done*/

> +	wmb();

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)

> +{

> +	int rc;

> +	u32 status = 0;

> +	u32 const delay_us = 100;

> +	u32 const timeout_us = 5000;

> +

> +	rc = readl_poll_timeout_atomic(pll->mmio +

> +				       REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,

> +				       status,

> +				       ((status & BIT(0)) > 0),

> +				       delay_us,

> +				       timeout_us);

> +	if (rc)

> +		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",

> +		       pll->id, status);

> +

> +	return rc;

> +}

> +

> +static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)

> +{

> +	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);

> +

> +	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & 

> ~BIT(5));

> +	ndelay(250);

> +}

> +

> +static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)

> +{

> +	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);

> +

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | 

> BIT(5));

> +	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);

> +	ndelay(250);

> +}

> +

> +static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)

> +{

> +	u32 data;

> +

> +	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & 

> ~BIT(5));

> +}

> +

> +static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)

> +{

> +	u32 data;

> +

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);

> +

> +	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,

> +		  data | BIT(5) | BIT(4));

> +}

> +

> +static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)

> +{

> +	/*

> +	 * Reset the PHY digital domain. This would be needed when

> +	 * coming out of a CX or analog rail power collapse while

> +	 * ensuring that the pads maintain LP00 or LP11 state

> +	 */

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 

> BIT(0));

> +	wmb(); /* Ensure that the reset is deasserted */

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 

> 0x0);

> +	wmb(); /* Ensure that the reset is deasserted */

> +}

> +

> +static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +	int rc;

> +

> +	dsi_pll_enable_pll_bias(pll_7nm);

> +	if (pll_7nm->slave)

> +		dsi_pll_enable_pll_bias(pll_7nm->slave);

> +

> +	/* Start PLL */

> +	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 

> 0x01);

> +

> +	/*

> +	 * ensure all PLL configurations are written prior to checking

> +	 * for PLL lock.

> +	 */

> +	wmb();

> +

> +	/* Check for PLL lock */

> +	rc = dsi_pll_7nm_lock_status(pll_7nm);

> +	if (rc) {

> +		pr_err("PLL(%d) lock failed\n", pll_7nm->id);

> +		goto error;

> +	}

> +

> +	pll->pll_on = true;

> +

> +	/*

> +	 * assert power on reset for PHY digital in case the PLL is

> +	 * enabled after CX of analog domain power collapse. This needs

> +	 * to be done before enabling the global clk.

> +	 */

> +	dsi_pll_phy_dig_reset(pll_7nm);

> +	if (pll_7nm->slave)

> +		dsi_pll_phy_dig_reset(pll_7nm->slave);

> +

> +	dsi_pll_enable_global_clk(pll_7nm);

> +	if (pll_7nm->slave)

> +		dsi_pll_enable_global_clk(pll_7nm->slave);

> +

> +error:

> +	return rc;

> +}

> +

> +static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)

> +{

> +	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);

> +	dsi_pll_disable_pll_bias(pll);

> +}

> +

> +static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +

> +	/*

> +	 * To avoid any stray glitches while abruptly powering down the PLL

> +	 * make sure to gate the clock using the clock enable bit before

> +	 * powering down the PLL

> +	 */

> +	dsi_pll_disable_global_clk(pll_7nm);

> +	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);

> +	dsi_pll_disable_sub(pll_7nm);

> +	if (pll_7nm->slave) {

> +		dsi_pll_disable_global_clk(pll_7nm->slave);

> +		dsi_pll_disable_sub(pll_7nm->slave);

> +	}

> +	/* flush, ensure all register writes are done */

> +	wmb();

> +	pll->pll_on = false;

> +}

> +

> +static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,

> +						  unsigned long parent_rate)

> +{

> +	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +	struct dsi_pll_config *config = &pll_7nm->pll_configuration;

> +	void __iomem *base = pll_7nm->mmio;

> +	u64 ref_clk = pll_7nm->vco_ref_clk_rate;

> +	u64 vco_rate = 0x0;

> +	u64 multiplier;

> +	u32 frac;

> +	u32 dec;

> +	u64 pll_freq, tmp64;

> +

> +	dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);

> +	dec &= 0xff;

> +

> +	frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);

> +	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &

> +		  0xff) << 8);

> +	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) 

> &

> +		  0x3) << 16);

> +

> +	/*

> +	 * TODO:

> +	 *	1. Assumes prescaler is disabled

> +	 */

> +	multiplier = 1 << config->frac_bits;

> +	pll_freq = dec * (ref_clk * 2);

> +	tmp64 = (ref_clk * 2 * frac);

> +	pll_freq += div_u64(tmp64, multiplier);

> +

> +	vco_rate = pll_freq;

> +

> +	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",

> +	    pll_7nm->id, (unsigned long)vco_rate, dec, frac);

> +

> +	return (unsigned long)vco_rate;

> +}

> +

> +static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {

> +	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> +	.set_rate = dsi_pll_7nm_vco_set_rate,

> +	.recalc_rate = dsi_pll_7nm_vco_recalc_rate,

> +	.prepare = dsi_pll_7nm_vco_prepare,

> +	.unprepare = dsi_pll_7nm_vco_unprepare,

> +};

> +

> +/*

> + * PLL Callbacks

> + */

> +

> +static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;

> +	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;

> +	u32 cmn_clk_cfg0, cmn_clk_cfg1;

> +

> +	cached->pll_out_div = pll_read(pll_7nm->mmio +

> +				       REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);

> +	cached->pll_out_div &= 0x3;

> +

> +	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);

> +	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;

> +	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;

> +

> +	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> +	cached->pll_mux = cmn_clk_cfg1 & 0x3;

> +

> +	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",

> +	    pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,

> +	    cached->pix_clk_div, cached->pll_mux);

> +}

> +

> +static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;

> +	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;

> +	u32 val;

> +	int ret;

> +

> +	val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);

> +	val &= ~0x3;

> +	val |= cached->pll_out_div;

> +	pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);

> +

> +	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,

> +		  cached->bit_clk_div | (cached->pix_clk_div << 4));

> +

> +	val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> +	val &= ~0x3;

> +	val |= cached->pll_mux;

> +	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);

> +

> +	ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw,

> pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pll_7nm->pdev->dev,

> +			"restore vco rate failed. ret=%d\n", ret);

> +		return ret;

> +	}

> +

> +	DBG("DSI PLL%d", pll_7nm->id);

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,

> +				    enum msm_dsi_phy_usecase uc)

> +{

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +	void __iomem *base = pll_7nm->phy_cmn_mmio;

> +	u32 data = 0x0;	/* internal PLL */

> +

> +	DBG("DSI PLL%d", pll_7nm->id);

> +

> +	switch (uc) {

> +	case MSM_DSI_PHY_STANDALONE:

> +		break;

> +	case MSM_DSI_PHY_MASTER:

> +		pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];

> +		break;

> +	case MSM_DSI_PHY_SLAVE:

> +		data = 0x1; /* external PLL */

> +		break;

> +	default:

> +		return -EINVAL;

> +	}

> +

> +	/* set PLL src */

> +	pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));

> +

> +	pll_7nm->uc = uc;

> +

> +	return 0;

> +}

> +

> +static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,

> +				     struct clk **byte_clk_provider,

> +				     struct clk **pixel_clk_provider)

> +{

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +	struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;

> +

> +	DBG("DSI PLL%d", pll_7nm->id);

> +

> +	if (byte_clk_provider)

> +		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;

> +	if (pixel_clk_provider)

> +		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;

> +

> +	return 0;

> +}

> +

> +static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)

> +{

> +	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> +	struct device *dev = &pll_7nm->pdev->dev;

> +

> +	DBG("DSI PLL%d", pll_7nm->id);

> +	of_clk_del_provider(dev->of_node);

> +

> +	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);

> +	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);

> +	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);

> +	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);

> +	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);

> +	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);

> +	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);

> +	clk_hw_unregister(&pll_7nm->base.clk_hw);

> +}

> +

> +/*

> + * The post dividers and mux clocks are created using the standard 

> divider and

> + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its 

> dividers/mux

> + * state to follow the master PLL's divider/mux state. Therefore, we 

> don't

> + * require special clock ops that also configure the slave PLL 

> registers

> + */

> +static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)

> +{

> +	char clk_name[32], parent[32], vco_name[32];

> +	char parent2[32], parent3[32], parent4[32];

> +	struct clk_init_data vco_init = {

> +		.parent_names = (const char *[]){ "bi_tcxo" },

> +		.num_parents = 1,

> +		.name = vco_name,

> +		.flags = CLK_IGNORE_UNUSED,

> +		.ops = &clk_ops_dsi_pll_7nm_vco,

> +	};

> +	struct device *dev = &pll_7nm->pdev->dev;

> +	struct clk_hw_onecell_data *hw_data;

> +	struct clk_hw *hw;

> +	int ret;

> +

> +	DBG("DSI%d", pll_7nm->id);

> +

> +	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +

> +			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),

> +			       GFP_KERNEL);

> +	if (!hw_data)

> +		return -ENOMEM;

> +

> +	snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);

> +	pll_7nm->base.clk_hw.init = &vco_init;

> +

> +	ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);

> +	if (ret)

> +		return ret;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> +	snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);

> +

> +	hw = clk_hw_register_divider(dev, clk_name,

> +				     parent, CLK_SET_RATE_PARENT,

> +				     pll_7nm->mmio +

> +				     REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,

> +				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_base_clk_hw;

> +	}

> +

> +	pll_7nm->out_div_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> +

> +	/* BIT CLK: DIV_CTRL_3_0 */

> +	hw = clk_hw_register_divider(dev, clk_name, parent,

> +				     CLK_SET_RATE_PARENT,

> +				     pll_7nm->phy_cmn_mmio +

> +				     REG_DSI_7nm_PHY_CMN_CLK_CFG0,

> +				     0, 4, CLK_DIVIDER_ONE_BASED,

> +				     &pll_7nm->postdiv_lock);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_out_div_clk_hw;

> +	}

> +

> +	pll_7nm->bit_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> +

> +	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> +					  CLK_SET_RATE_PARENT, 1, 8);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_bit_clk_hw;

> +	}

> +

> +	pll_7nm->byte_clk_hw = hw;

> +	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> +

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> +					  0, 1, 2);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_byte_clk_hw;

> +	}

> +

> +	pll_7nm->by_2_bit_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> +

> +	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> +					  0, 1, 4);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_by_2_bit_clk_hw;

> +	}

> +

> +	pll_7nm->post_out_div_clk_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);

> +	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> +	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);

> +	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> +	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);

> +

> +	hw = clk_hw_register_mux(dev, clk_name,

> +				 ((const char *[]){

> +				 parent, parent2, parent3, parent4

> +				 }), 4, 0, pll_7nm->phy_cmn_mmio +

> +				 REG_DSI_7nm_PHY_CMN_CLK_CFG1,

> +				 0, 2, 0, NULL);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_post_out_div_clk_hw;

> +	}

> +

> +	pll_7nm->pclk_mux_hw = hw;

> +

> +	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);

> +	snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);

> +

> +	/* PIX CLK DIV : DIV_CTRL_7_4*/

> +	hw = clk_hw_register_divider(dev, clk_name, parent,

> +				     0, pll_7nm->phy_cmn_mmio +

> +					REG_DSI_7nm_PHY_CMN_CLK_CFG0,

> +				     4, 4, CLK_DIVIDER_ONE_BASED,

> +				     &pll_7nm->postdiv_lock);

> +	if (IS_ERR(hw)) {

> +		ret = PTR_ERR(hw);

> +		goto err_pclk_mux_hw;

> +	}

> +

> +	pll_7nm->out_dsiclk_hw = hw;

> +	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;

> +

> +	hw_data->num = NUM_PROVIDED_CLKS;

> +	pll_7nm->hw_data = hw_data;

> +

> +	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,

> +				     pll_7nm->hw_data);

> +	if (ret) {

> +		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> +		goto err_dsiclk_hw;

> +	}

> +

> +	return 0;

> +

> +err_dsiclk_hw:

> +	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);

> +err_pclk_mux_hw:

> +	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);

> +err_post_out_div_clk_hw:

> +	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);

> +err_by_2_bit_clk_hw:

> +	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);

> +err_byte_clk_hw:

> +	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);

> +err_bit_clk_hw:

> +	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);

> +err_out_div_clk_hw:

> +	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);

> +err_base_clk_hw:

> +	clk_hw_unregister(&pll_7nm->base.clk_hw);

> +

> +	return ret;

> +}

> +

> +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,

> +					enum msm_dsi_phy_type type, int id)

> +{

> +	struct dsi_pll_7nm *pll_7nm;

> +	struct msm_dsi_pll *pll;

> +	int ret;

> +

> +	pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);

> +	if (!pll_7nm)

> +		return ERR_PTR(-ENOMEM);

> +

> +	DBG("DSI PLL%d", id);

> +

> +	pll_7nm->pdev = pdev;

> +	pll_7nm->id = id;

> +	pll_7nm_list[id] = pll_7nm;

> +

> +	pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");

> +	if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> +	if (IS_ERR_OR_NULL(pll_7nm->mmio)) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");

> +		return ERR_PTR(-ENOMEM);

> +	}

> +

> +	spin_lock_init(&pll_7nm->postdiv_lock);

> +

> +	pll = &pll_7nm->base;

> +	pll->min_rate = 1000000000UL;

> +	pll->max_rate = 3500000000UL;

> +	if (type == MSM_DSI_PHY_7NM_V4_1) {

> +		pll->min_rate = 600000000UL;

> +		pll->max_rate = (unsigned long)5000000000ULL;

> +		/* workaround for max rate overflowing on 32-bit builds: */

> +		pll->max_rate = max(pll->max_rate, 0xffffffffUL);

> +	}

> +	pll->get_provider = dsi_pll_7nm_get_provider;

> +	pll->destroy = dsi_pll_7nm_destroy;

> +	pll->save_state = dsi_pll_7nm_save_state;

> +	pll->restore_state = dsi_pll_7nm_restore_state;

> +	pll->set_usecase = dsi_pll_7nm_set_usecase;

> +

> +	pll_7nm->vco_delay = 1;

> +

> +	ret = pll_7nm_register(pll_7nm);

> +	if (ret) {

> +		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> +		return ERR_PTR(ret);

> +	}

> +

> +	/* TODO: Remove this when we have proper display handover support */

> +	msm_dsi_pll_save_state(pll);

> +

> +	return pll;

> +}

> +

>  static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)

>  {

>  	void __iomem *base = phy->base;

> diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c

> b/drivers/gpu/drm/msm/dsi/phy/dsi_pll.c

> similarity index 100%

> rename from drivers/gpu/drm/msm/dsi/pll/dsi_pll.c

> rename to drivers/gpu/drm/msm/dsi/phy/dsi_pll.c

> diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h

> b/drivers/gpu/drm/msm/dsi/phy/dsi_pll.h

> similarity index 100%

> rename from drivers/gpu/drm/msm/dsi/pll/dsi_pll.h

> rename to drivers/gpu/drm/msm/dsi/phy/dsi_pll.h

> diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c

> b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c

> deleted file mode 100644

> index de3b802ccd3d..000000000000

> --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c

> +++ /dev/null

> @@ -1,881 +0,0 @@

> -/*

> - * SPDX-License-Identifier: GPL-2.0

> - * Copyright (c) 2018, The Linux Foundation

> - */

> -

> -#include <linux/clk.h>

> -#include <linux/clk-provider.h>

> -#include <linux/iopoll.h>

> -

> -#include "dsi_pll.h"

> -#include "dsi.xml.h"

> -

> -/*

> - * DSI PLL 10nm - clock diagram (eg: DSI0):

> - *

> - *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk

> - *                              |                |

> - *                              |                |

> - *                 +---------+  |  +----------+  |  +----+

> - *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |--

> dsi0_phy_pll_out_byteclk

> - *                 +---------+  |  +----------+  |  +----+

> - *                              |                |

> - *                              |                |

> dsi0_pll_by_2_bit_clk

> - *                              |                |          |

> - *                              |                |  +----+  |  |\

> dsi0_pclk_mux

> - *                              |                |--| /2 |--o--| \   |

> - *                              |                |  +----+     |  \

> |  +---------+

> - *                              |                --------------|

> |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk

> - *                              |------------------------------|  /

>   +---------+

> - *                              |          +-----+             | /

> - *                              -----------| /4? |--o----------|/

> - *                                         +-----+  |           |

> - *                                                  |           

> |dsiclk_sel

> - *                                                  |

> - *                                                  

> dsi0_pll_post_out_div_clk

> - */

> -

> -#define DSI_BYTE_PLL_CLK		0

> -#define DSI_PIXEL_PLL_CLK		1

> -#define NUM_PROVIDED_CLKS		2

> -

> -#define VCO_REF_CLK_RATE		19200000

> -

> -struct dsi_pll_regs {

> -	u32 pll_prop_gain_rate;

> -	u32 pll_lockdet_rate;

> -	u32 decimal_div_start;

> -	u32 frac_div_start_low;

> -	u32 frac_div_start_mid;

> -	u32 frac_div_start_high;

> -	u32 pll_clock_inverters;

> -	u32 ssc_stepsize_low;

> -	u32 ssc_stepsize_high;

> -	u32 ssc_div_per_low;

> -	u32 ssc_div_per_high;

> -	u32 ssc_adjper_low;

> -	u32 ssc_adjper_high;

> -	u32 ssc_control;

> -};

> -

> -struct dsi_pll_config {

> -	u32 ref_freq;

> -	bool div_override;

> -	u32 output_div;

> -	bool ignore_frac;

> -	bool disable_prescaler;

> -	bool enable_ssc;

> -	bool ssc_center;

> -	u32 dec_bits;

> -	u32 frac_bits;

> -	u32 lock_timer;

> -	u32 ssc_freq;

> -	u32 ssc_offset;

> -	u32 ssc_adj_per;

> -	u32 thresh_cycles;

> -	u32 refclk_cycles;

> -};

> -

> -struct pll_10nm_cached_state {

> -	unsigned long vco_rate;

> -	u8 bit_clk_div;

> -	u8 pix_clk_div;

> -	u8 pll_out_div;

> -	u8 pll_mux;

> -};

> -

> -struct dsi_pll_10nm {

> -	struct msm_dsi_pll base;

> -

> -	int id;

> -	struct platform_device *pdev;

> -

> -	void __iomem *phy_cmn_mmio;

> -	void __iomem *mmio;

> -

> -	u64 vco_ref_clk_rate;

> -	u64 vco_current_rate;

> -

> -	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */

> -	spinlock_t postdiv_lock;

> -

> -	int vco_delay;

> -	struct dsi_pll_config pll_configuration;

> -	struct dsi_pll_regs reg_setup;

> -

> -	/* private clocks: */

> -	struct clk_hw *out_div_clk_hw;

> -	struct clk_hw *bit_clk_hw;

> -	struct clk_hw *byte_clk_hw;

> -	struct clk_hw *by_2_bit_clk_hw;

> -	struct clk_hw *post_out_div_clk_hw;

> -	struct clk_hw *pclk_mux_hw;

> -	struct clk_hw *out_dsiclk_hw;

> -

> -	/* clock-provider: */

> -	struct clk_hw_onecell_data *hw_data;

> -

> -	struct pll_10nm_cached_state cached_state;

> -

> -	enum msm_dsi_phy_usecase uc;

> -	struct dsi_pll_10nm *slave;

> -};

> -

> -#define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, base)

> -

> -/*

> - * Global list of private DSI PLL struct pointers. We need this for 

> Dual DSI

> - * mode, where the master PLL's clk_ops needs access the slave's 

> private data

> - */

> -static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];

> -

> -static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)

> -{

> -	struct dsi_pll_config *config = &pll->pll_configuration;

> -

> -	config->ref_freq = pll->vco_ref_clk_rate;

> -	config->output_div = 1;

> -	config->dec_bits = 8;

> -	config->frac_bits = 18;

> -	config->lock_timer = 64;

> -	config->ssc_freq = 31500;

> -	config->ssc_offset = 5000;

> -	config->ssc_adj_per = 2;

> -	config->thresh_cycles = 32;

> -	config->refclk_cycles = 256;

> -

> -	config->div_override = false;

> -	config->ignore_frac = false;

> -	config->disable_prescaler = false;

> -

> -	config->enable_ssc = false;

> -	config->ssc_center = 0;

> -}

> -

> -static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)

> -{

> -	struct dsi_pll_config *config = &pll->pll_configuration;

> -	struct dsi_pll_regs *regs = &pll->reg_setup;

> -	u64 fref = pll->vco_ref_clk_rate;

> -	u64 pll_freq;

> -	u64 divider;

> -	u64 dec, dec_multiple;

> -	u32 frac;

> -	u64 multiplier;

> -

> -	pll_freq = pll->vco_current_rate;

> -

> -	if (config->disable_prescaler)

> -		divider = fref;

> -	else

> -		divider = fref * 2;

> -

> -	multiplier = 1 << config->frac_bits;

> -	dec_multiple = div_u64(pll_freq * multiplier, divider);

> -	dec = div_u64_rem(dec_multiple, multiplier, &frac);

> -

> -	if (pll_freq <= 1900000000UL)

> -		regs->pll_prop_gain_rate = 8;

> -	else if (pll_freq <= 3000000000UL)

> -		regs->pll_prop_gain_rate = 10;

> -	else

> -		regs->pll_prop_gain_rate = 12;

> -	if (pll_freq < 1100000000UL)

> -		regs->pll_clock_inverters = 8;

> -	else

> -		regs->pll_clock_inverters = 0;

> -

> -	regs->pll_lockdet_rate = config->lock_timer;

> -	regs->decimal_div_start = dec;

> -	regs->frac_div_start_low = (frac & 0xff);

> -	regs->frac_div_start_mid = (frac & 0xff00) >> 8;

> -	regs->frac_div_start_high = (frac & 0x30000) >> 16;

> -}

> -

> -#define SSC_CENTER		BIT(0)

> -#define SSC_EN			BIT(1)

> -

> -static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)

> -{

> -	struct dsi_pll_config *config = &pll->pll_configuration;

> -	struct dsi_pll_regs *regs = &pll->reg_setup;

> -	u32 ssc_per;

> -	u32 ssc_mod;

> -	u64 ssc_step_size;

> -	u64 frac;

> -

> -	if (!config->enable_ssc) {

> -		DBG("SSC not enabled\n");

> -		return;

> -	}

> -

> -	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 

> 1;

> -	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);

> -	ssc_per -= ssc_mod;

> -

> -	frac = regs->frac_div_start_low |

> -			(regs->frac_div_start_mid << 8) |

> -			(regs->frac_div_start_high << 16);

> -	ssc_step_size = regs->decimal_div_start;

> -	ssc_step_size *= (1 << config->frac_bits);

> -	ssc_step_size += frac;

> -	ssc_step_size *= config->ssc_offset;

> -	ssc_step_size *= (config->ssc_adj_per + 1);

> -	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));

> -	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);

> -

> -	regs->ssc_div_per_low = ssc_per & 0xFF;

> -	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;

> -	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);

> -	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);

> -	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;

> -	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;

> -

> -	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;

> -

> -	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",

> -		 regs->decimal_div_start, frac, config->frac_bits);

> -	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",

> -		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);

> -}

> -

> -static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)

> -{

> -	void __iomem *base = pll->mmio;

> -	struct dsi_pll_regs *regs = &pll->reg_setup;

> -

> -	if (pll->pll_configuration.enable_ssc) {

> -		pr_debug("SSC is enabled\n");

> -

> -		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,

> -			  regs->ssc_stepsize_low);

> -		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,

> -			  regs->ssc_stepsize_high);

> -		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,

> -			  regs->ssc_div_per_low);

> -		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,

> -			  regs->ssc_div_per_high);

> -		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,

> -			  regs->ssc_adjper_low);

> -		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,

> -			  regs->ssc_adjper_high);

> -		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,

> -			  SSC_EN | regs->ssc_control);

> -	}

> -}

> -

> -static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)

> -{

> -	void __iomem *base = pll->mmio;

> -

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,

> -		  0xba);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 

> 0x0c);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 

> 0xfa);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,

> -		  0x4c);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);

> -}

> -

> -static void dsi_pll_commit(struct dsi_pll_10nm *pll)

> -{

> -	void __iomem *base = pll->mmio;

> -	struct dsi_pll_regs *reg = &pll->reg_setup;

> -

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,

> -		  reg->decimal_div_start);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,

> -		  reg->frac_div_start_low);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,

> -		  reg->frac_div_start_mid);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,

> -		  reg->frac_div_start_high);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,

> -		  reg->pll_lockdet_rate);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);

> -	pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,

> -		  reg->pll_clock_inverters);

> -}

> -

> -static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> -				     unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -

> -	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,

> -	    parent_rate);

> -

> -	pll_10nm->vco_current_rate = rate;

> -	pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;

> -

> -	dsi_pll_setup_config(pll_10nm);

> -

> -	dsi_pll_calc_dec_frac(pll_10nm);

> -

> -	dsi_pll_calc_ssc(pll_10nm);

> -

> -	dsi_pll_commit(pll_10nm);

> -

> -	dsi_pll_config_hzindep_reg(pll_10nm);

> -

> -	dsi_pll_ssc_commit(pll_10nm);

> -

> -	/* flush, ensure all register writes are done*/

> -	wmb();

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)

> -{

> -	struct device *dev = &pll->pdev->dev;

> -	int rc;

> -	u32 status = 0;

> -	u32 const delay_us = 100;

> -	u32 const timeout_us = 5000;

> -

> -	rc = readl_poll_timeout_atomic(pll->mmio +

> -				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,

> -				       status,

> -				       ((status & BIT(0)) > 0),

> -				       delay_us,

> -				       timeout_us);

> -	if (rc)

> -		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",

> -			      pll->id, status);

> -

> -	return rc;

> -}

> -

> -static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)

> -{

> -	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);

> -

> -	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,

> -		  data & ~BIT(5));

> -	ndelay(250);

> -}

> -

> -static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)

> -{

> -	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);

> -

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,

> -		  data | BIT(5));

> -	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);

> -	ndelay(250);

> -}

> -

> -static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)

> -{

> -	u32 data;

> -

> -	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,

> -		  data & ~BIT(5));

> -}

> -

> -static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)

> -{

> -	u32 data;

> -

> -	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,

> -		  data | BIT(5));

> -}

> -

> -static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -	struct device *dev = &pll_10nm->pdev->dev;

> -	int rc;

> -

> -	dsi_pll_enable_pll_bias(pll_10nm);

> -	if (pll_10nm->slave)

> -		dsi_pll_enable_pll_bias(pll_10nm->slave);

> -

> -	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);

> -	if (rc) {

> -		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);

> -		return rc;

> -	}

> -

> -	/* Start PLL */

> -	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,

> -		  0x01);

> -

> -	/*

> -	 * ensure all PLL configurations are written prior to checking

> -	 * for PLL lock.

> -	 */

> -	wmb();

> -

> -	/* Check for PLL lock */

> -	rc = dsi_pll_10nm_lock_status(pll_10nm);

> -	if (rc) {

> -		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);

> -		goto error;

> -	}

> -

> -	pll->pll_on = true;

> -

> -	dsi_pll_enable_global_clk(pll_10nm);

> -	if (pll_10nm->slave)

> -		dsi_pll_enable_global_clk(pll_10nm->slave);

> -

> -	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,

> -		  0x01);

> -	if (pll_10nm->slave)

> -		pll_write(pll_10nm->slave->phy_cmn_mmio +

> -			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);

> -

> -error:

> -	return rc;

> -}

> -

> -static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)

> -{

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);

> -	dsi_pll_disable_pll_bias(pll);

> -}

> -

> -static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -

> -	/*

> -	 * To avoid any stray glitches while abruptly powering down the PLL

> -	 * make sure to gate the clock using the clock enable bit before

> -	 * powering down the PLL

> -	 */

> -	dsi_pll_disable_global_clk(pll_10nm);

> -	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 

> 0);

> -	dsi_pll_disable_sub(pll_10nm);

> -	if (pll_10nm->slave) {

> -		dsi_pll_disable_global_clk(pll_10nm->slave);

> -		dsi_pll_disable_sub(pll_10nm->slave);

> -	}

> -	/* flush, ensure all register writes are done */

> -	wmb();

> -	pll->pll_on = false;

> -}

> -

> -static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,

> -						  unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -	struct dsi_pll_config *config = &pll_10nm->pll_configuration;

> -	void __iomem *base = pll_10nm->mmio;

> -	u64 ref_clk = pll_10nm->vco_ref_clk_rate;

> -	u64 vco_rate = 0x0;

> -	u64 multiplier;

> -	u32 frac;

> -	u32 dec;

> -	u64 pll_freq, tmp64;

> -

> -	dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);

> -	dec &= 0xff;

> -

> -	frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);

> -	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) 

> &

> -		  0xff) << 8);

> -	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) 

> &

> -		  0x3) << 16);

> -

> -	/*

> -	 * TODO:

> -	 *	1. Assumes prescaler is disabled

> -	 */

> -	multiplier = 1 << config->frac_bits;

> -	pll_freq = dec * (ref_clk * 2);

> -	tmp64 = (ref_clk * 2 * frac);

> -	pll_freq += div_u64(tmp64, multiplier);

> -

> -	vco_rate = pll_freq;

> -

> -	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",

> -	    pll_10nm->id, (unsigned long)vco_rate, dec, frac);

> -

> -	return (unsigned long)vco_rate;

> -}

> -

> -static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {

> -	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> -	.set_rate = dsi_pll_10nm_vco_set_rate,

> -	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,

> -	.prepare = dsi_pll_10nm_vco_prepare,

> -	.unprepare = dsi_pll_10nm_vco_unprepare,

> -};

> -

> -/*

> - * PLL Callbacks

> - */

> -

> -static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;

> -	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;

> -	u32 cmn_clk_cfg0, cmn_clk_cfg1;

> -

> -	cached->pll_out_div = pll_read(pll_10nm->mmio +

> -				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);

> -	cached->pll_out_div &= 0x3;

> -

> -	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);

> -	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;

> -	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;

> -

> -	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> -	cached->pll_mux = cmn_clk_cfg1 & 0x3;

> -

> -	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",

> -	    pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,

> -	    cached->pix_clk_div, cached->pll_mux);

> -}

> -

> -static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;

> -	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;

> -	u32 val;

> -	int ret;

> -

> -	val = pll_read(pll_10nm->mmio + 

> REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);

> -	val &= ~0x3;

> -	val |= cached->pll_out_div;

> -	pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, 

> val);

> -

> -	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,

> -		  cached->bit_clk_div | (cached->pix_clk_div << 4));

> -

> -	val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);

> -	val &= ~0x3;

> -	val |= cached->pll_mux;

> -	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);

> -

> -	ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw,

> pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pll_10nm->pdev->dev,

> -			"restore vco rate failed. ret=%d\n", ret);

> -		return ret;

> -	}

> -

> -	DBG("DSI PLL%d", pll_10nm->id);

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,

> -				    enum msm_dsi_phy_usecase uc)

> -{

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -	void __iomem *base = pll_10nm->phy_cmn_mmio;

> -	u32 data = 0x0;	/* internal PLL */

> -

> -	DBG("DSI PLL%d", pll_10nm->id);

> -

> -	switch (uc) {

> -	case MSM_DSI_PHY_STANDALONE:

> -		break;

> -	case MSM_DSI_PHY_MASTER:

> -		pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];

> -		break;

> -	case MSM_DSI_PHY_SLAVE:

> -		data = 0x1; /* external PLL */

> -		break;

> -	default:

> -		return -EINVAL;

> -	}

> -

> -	/* set PLL src */

> -	pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));

> -

> -	pll_10nm->uc = uc;

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,

> -				     struct clk **byte_clk_provider,

> -				     struct clk **pixel_clk_provider)

> -{

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -	struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;

> -

> -	DBG("DSI PLL%d", pll_10nm->id);

> -

> -	if (byte_clk_provider)

> -		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;

> -	if (pixel_clk_provider)

> -		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;

> -

> -	return 0;

> -}

> -

> -static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);

> -	struct device *dev = &pll_10nm->pdev->dev;

> -

> -	DBG("DSI PLL%d", pll_10nm->id);

> -	of_clk_del_provider(dev->of_node);

> -

> -	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);

> -	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);

> -	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);

> -	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);

> -	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);

> -	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);

> -	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);

> -	clk_hw_unregister(&pll_10nm->base.clk_hw);

> -}

> -

> -/*

> - * The post dividers and mux clocks are created using the standard 

> divider and

> - * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its 

> dividers/mux

> - * state to follow the master PLL's divider/mux state. Therefore, we 

> don't

> - * require special clock ops that also configure the slave PLL 

> registers

> - */

> -static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)

> -{

> -	char clk_name[32], parent[32], vco_name[32];

> -	char parent2[32], parent3[32], parent4[32];

> -	struct clk_init_data vco_init = {

> -		.parent_names = (const char *[]){ "xo" },

> -		.num_parents = 1,

> -		.name = vco_name,

> -		.flags = CLK_IGNORE_UNUSED,

> -		.ops = &clk_ops_dsi_pll_10nm_vco,

> -	};

> -	struct device *dev = &pll_10nm->pdev->dev;

> -	struct clk_hw_onecell_data *hw_data;

> -	struct clk_hw *hw;

> -	int ret;

> -

> -	DBG("DSI%d", pll_10nm->id);

> -

> -	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +

> -			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),

> -			       GFP_KERNEL);

> -	if (!hw_data)

> -		return -ENOMEM;

> -

> -	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);

> -	pll_10nm->base.clk_hw.init = &vco_init;

> -

> -	ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);

> -	if (ret)

> -		return ret;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> -	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);

> -

> -	hw = clk_hw_register_divider(dev, clk_name,

> -				     parent, CLK_SET_RATE_PARENT,

> -				     pll_10nm->mmio +

> -				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,

> -				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_base_clk_hw;

> -	}

> -

> -	pll_10nm->out_div_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> -

> -	/* BIT CLK: DIV_CTRL_3_0 */

> -	hw = clk_hw_register_divider(dev, clk_name, parent,

> -				     CLK_SET_RATE_PARENT,

> -				     pll_10nm->phy_cmn_mmio +

> -				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,

> -				     0, 4, CLK_DIVIDER_ONE_BASED,

> -				     &pll_10nm->postdiv_lock);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_out_div_clk_hw;

> -	}

> -

> -	pll_10nm->bit_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> -

> -	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> -					  CLK_SET_RATE_PARENT, 1, 8);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_bit_clk_hw;

> -	}

> -

> -	pll_10nm->byte_clk_hw = hw;

> -	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> -

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> -					  0, 1, 2);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_byte_clk_hw;

> -	}

> -

> -	pll_10nm->by_2_bit_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> -

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> -					  0, 1, 4);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_by_2_bit_clk_hw;

> -	}

> -

> -	pll_10nm->post_out_div_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);

> -	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);

> -	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);

> -	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);

> -

> -	hw = clk_hw_register_mux(dev, clk_name,

> -				 ((const char *[]){

> -				 parent, parent2, parent3, parent4

> -				 }), 4, 0, pll_10nm->phy_cmn_mmio +

> -				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,

> -				 0, 2, 0, NULL);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_post_out_div_clk_hw;

> -	}

> -

> -	pll_10nm->pclk_mux_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);

> -	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);

> -

> -	/* PIX CLK DIV : DIV_CTRL_7_4*/

> -	hw = clk_hw_register_divider(dev, clk_name, parent,

> -				     0, pll_10nm->phy_cmn_mmio +

> -					REG_DSI_10nm_PHY_CMN_CLK_CFG0,

> -				     4, 4, CLK_DIVIDER_ONE_BASED,

> -				     &pll_10nm->postdiv_lock);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_pclk_mux_hw;

> -	}

> -

> -	pll_10nm->out_dsiclk_hw = hw;

> -	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;

> -

> -	hw_data->num = NUM_PROVIDED_CLKS;

> -	pll_10nm->hw_data = hw_data;

> -

> -	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,

> -				     pll_10nm->hw_data);

> -	if (ret) {

> -		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> -		goto err_dsiclk_hw;

> -	}

> -

> -	return 0;

> -

> -err_dsiclk_hw:

> -	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);

> -err_pclk_mux_hw:

> -	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);

> -err_post_out_div_clk_hw:

> -	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);

> -err_by_2_bit_clk_hw:

> -	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);

> -err_byte_clk_hw:

> -	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);

> -err_bit_clk_hw:

> -	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);

> -err_out_div_clk_hw:

> -	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);

> -err_base_clk_hw:

> -	clk_hw_unregister(&pll_10nm->base.clk_hw);

> -

> -	return ret;

> -}

> -

> -struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device 

> *pdev, int id)

> -{

> -	struct dsi_pll_10nm *pll_10nm;

> -	struct msm_dsi_pll *pll;

> -	int ret;

> -

> -	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);

> -	if (!pll_10nm)

> -		return ERR_PTR(-ENOMEM);

> -

> -	DBG("DSI PLL%d", id);

> -

> -	pll_10nm->pdev = pdev;

> -	pll_10nm->id = id;

> -	pll_10nm_list[id] = pll_10nm;

> -

> -	pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");

> -	if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> -	if (IS_ERR_OR_NULL(pll_10nm->mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	spin_lock_init(&pll_10nm->postdiv_lock);

> -

> -	pll = &pll_10nm->base;

> -	pll->min_rate = 1000000000UL;

> -	pll->max_rate = 3500000000UL;

> -	pll->get_provider = dsi_pll_10nm_get_provider;

> -	pll->destroy = dsi_pll_10nm_destroy;

> -	pll->save_state = dsi_pll_10nm_save_state;

> -	pll->restore_state = dsi_pll_10nm_restore_state;

> -	pll->set_usecase = dsi_pll_10nm_set_usecase;

> -

> -	pll_10nm->vco_delay = 1;

> -

> -	ret = pll_10nm_register(pll_10nm);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> -		return ERR_PTR(ret);

> -	}

> -

> -	/* TODO: Remove this when we have proper display handover support */

> -	msm_dsi_pll_save_state(pll);

> -

> -	return pll;

> -}

> diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c

> b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c

> deleted file mode 100644

> index f847376d501e..000000000000

> --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c

> +++ /dev/null

> @@ -1,1096 +0,0 @@

> -// SPDX-License-Identifier: GPL-2.0-only

> -/*

> - * Copyright (c) 2016, The Linux Foundation. All rights reserved.

> - */

> -

> -#include <linux/clk.h>

> -#include <linux/clk-provider.h>

> -

> -#include "dsi_pll.h"

> -#include "dsi.xml.h"

> -

> -/*

> - * DSI PLL 14nm - clock diagram (eg: DSI0):

> - *

> - *         dsi0n1_postdiv_clk

> - *                         |

> - *                         |

> - *                 +----+  |  +----+

> - *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte

> - *                 +----+  |  +----+

> - *                         |           dsi0n1_postdivby2_clk

> - *                         |   +----+  |

> - *                         o---| /2 |--o--|\

> - *                         |   +----+     | \   +----+

> - *                         |              |  |--| n2 |-- dsi0pll

> - *                         o--------------| /   +----+

> - *                                        |/

> - */

> -

> -#define POLL_MAX_READS			15

> -#define POLL_TIMEOUT_US			1000

> -

> -#define NUM_PROVIDED_CLKS		2

> -

> -#define VCO_REF_CLK_RATE		19200000

> -#define VCO_MIN_RATE			1300000000UL

> -#define VCO_MAX_RATE			2600000000UL

> -

> -#define DSI_BYTE_PLL_CLK		0

> -#define DSI_PIXEL_PLL_CLK		1

> -

> -#define DSI_PLL_DEFAULT_VCO_POSTDIV	1

> -

> -struct dsi_pll_input {

> -	u32 fref;	/* reference clk */

> -	u32 fdata;	/* bit clock rate */

> -	u32 dsiclk_sel; /* Mux configuration (see diagram) */

> -	u32 ssc_en;	/* SSC enable/disable */

> -	u32 ldo_en;

> -

> -	/* fixed params */

> -	u32 refclk_dbler_en;

> -	u32 vco_measure_time;

> -	u32 kvco_measure_time;

> -	u32 bandgap_timer;

> -	u32 pll_wakeup_timer;

> -	u32 plllock_cnt;

> -	u32 plllock_rng;

> -	u32 ssc_center;

> -	u32 ssc_adj_period;

> -	u32 ssc_spread;

> -	u32 ssc_freq;

> -	u32 pll_ie_trim;

> -	u32 pll_ip_trim;

> -	u32 pll_iptat_trim;

> -	u32 pll_cpcset_cur;

> -	u32 pll_cpmset_cur;

> -

> -	u32 pll_icpmset;

> -	u32 pll_icpcset;

> -

> -	u32 pll_icpmset_p;

> -	u32 pll_icpmset_m;

> -

> -	u32 pll_icpcset_p;

> -	u32 pll_icpcset_m;

> -

> -	u32 pll_lpf_res1;

> -	u32 pll_lpf_cap1;

> -	u32 pll_lpf_cap2;

> -	u32 pll_c3ctrl;

> -	u32 pll_r3ctrl;

> -};

> -

> -struct dsi_pll_output {

> -	u32 pll_txclk_en;

> -	u32 dec_start;

> -	u32 div_frac_start;

> -	u32 ssc_period;

> -	u32 ssc_step_size;

> -	u32 plllock_cmp;

> -	u32 pll_vco_div_ref;

> -	u32 pll_vco_count;

> -	u32 pll_kvco_div_ref;

> -	u32 pll_kvco_count;

> -	u32 pll_misc1;

> -	u32 pll_lpf2_postdiv;

> -	u32 pll_resetsm_cntrl;

> -	u32 pll_resetsm_cntrl2;

> -	u32 pll_resetsm_cntrl5;

> -	u32 pll_kvco_code;

> -

> -	u32 cmn_clk_cfg0;

> -	u32 cmn_clk_cfg1;

> -	u32 cmn_ldo_cntrl;

> -

> -	u32 pll_postdiv;

> -	u32 fcvo;

> -};

> -

> -struct pll_14nm_cached_state {

> -	unsigned long vco_rate;

> -	u8 n2postdiv;

> -	u8 n1postdiv;

> -};

> -

> -struct dsi_pll_14nm {

> -	struct msm_dsi_pll base;

> -

> -	int id;

> -	struct platform_device *pdev;

> -

> -	void __iomem *phy_cmn_mmio;

> -	void __iomem *mmio;

> -

> -	int vco_delay;

> -

> -	struct dsi_pll_input in;

> -	struct dsi_pll_output out;

> -

> -	/* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */

> -	spinlock_t postdiv_lock;

> -

> -	u64 vco_current_rate;

> -	u64 vco_ref_clk_rate;

> -

> -	/* private clocks: */

> -	struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];

> -	u32 num_hws;

> -

> -	/* clock-provider: */

> -	struct clk_hw_onecell_data *hw_data;

> -

> -	struct pll_14nm_cached_state cached_state;

> -

> -	enum msm_dsi_phy_usecase uc;

> -	struct dsi_pll_14nm *slave;

> -};

> -

> -#define to_pll_14nm(x)	container_of(x, struct dsi_pll_14nm, base)

> -

> -/*

> - * Private struct for N1/N2 post-divider clocks. These clocks are 

> similar to

> - * the generic clk_divider class of clocks. The only difference is 

> that it

> - * also sets the slave DSI PLL's post-dividers if in Dual DSI mode

> - */

> -struct dsi_pll_14nm_postdiv {

> -	struct clk_hw hw;

> -

> -	/* divider params */

> -	u8 shift;

> -	u8 width;

> -	u8 flags; /* same flags as used by clk_divider struct */

> -

> -	struct dsi_pll_14nm *pll;

> -};

> -

> -#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct

> dsi_pll_14nm_postdiv, hw)

> -

> -/*

> - * Global list of private DSI PLL struct pointers. We need this for 

> Dual DSI

> - * mode, where the master PLL's clk_ops needs access the slave's 

> private data

> - */

> -static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];

> -

> -static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,

> -				    u32 nb_tries, u32 timeout_us)

> -{

> -	bool pll_locked = false;

> -	void __iomem *base = pll_14nm->mmio;

> -	u32 tries, val;

> -

> -	tries = nb_tries;

> -	while (tries--) {

> -		val = pll_read(base +

> -			       REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);

> -		pll_locked = !!(val & BIT(5));

> -

> -		if (pll_locked)

> -			break;

> -

> -		udelay(timeout_us);

> -	}

> -

> -	if (!pll_locked) {

> -		tries = nb_tries;

> -		while (tries--) {

> -			val = pll_read(base +

> -				REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);

> -			pll_locked = !!(val & BIT(0));

> -

> -			if (pll_locked)

> -				break;

> -

> -			udelay(timeout_us);

> -		}

> -	}

> -

> -	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");

> -

> -	return pll_locked;

> -}

> -

> -static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)

> -{

> -	pll->in.fref = pll->vco_ref_clk_rate;

> -	pll->in.fdata = 0;

> -	pll->in.dsiclk_sel = 1;	/* Use the /2 path in Mux */

> -	pll->in.ldo_en = 0;	/* disabled for now */

> -

> -	/* fixed input */

> -	pll->in.refclk_dbler_en = 0;

> -	pll->in.vco_measure_time = 5;

> -	pll->in.kvco_measure_time = 5;

> -	pll->in.bandgap_timer = 4;

> -	pll->in.pll_wakeup_timer = 5;

> -	pll->in.plllock_cnt = 1;

> -	pll->in.plllock_rng = 0;

> -

> -	/*

> -	 * SSC is enabled by default. We might need DT props for configuring

> -	 * some SSC params like PPM and center/down spread etc.

> -	 */

> -	pll->in.ssc_en = 1;

> -	pll->in.ssc_center = 0;		/* down spread by default */

> -	pll->in.ssc_spread = 5;		/* PPM / 1000 */

> -	pll->in.ssc_freq = 31500;	/* default recommended */

> -	pll->in.ssc_adj_period = 37;

> -

> -	pll->in.pll_ie_trim = 4;

> -	pll->in.pll_ip_trim = 4;

> -	pll->in.pll_cpcset_cur = 1;

> -	pll->in.pll_cpmset_cur = 1;

> -	pll->in.pll_icpmset = 4;

> -	pll->in.pll_icpcset = 4;

> -	pll->in.pll_icpmset_p = 0;

> -	pll->in.pll_icpmset_m = 0;

> -	pll->in.pll_icpcset_p = 0;

> -	pll->in.pll_icpcset_m = 0;

> -	pll->in.pll_lpf_res1 = 3;

> -	pll->in.pll_lpf_cap1 = 11;

> -	pll->in.pll_lpf_cap2 = 1;

> -	pll->in.pll_iptat_trim = 7;

> -	pll->in.pll_c3ctrl = 2;

> -	pll->in.pll_r3ctrl = 1;

> -}

> -

> -#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))

> -

> -static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)

> -{

> -	u32 period, ssc_period;

> -	u32 ref, rem;

> -	u64 step_size;

> -

> -	DBG("vco=%lld ref=%lld", pll->vco_current_rate, 

> pll->vco_ref_clk_rate);

> -

> -	ssc_period = pll->in.ssc_freq / 500;

> -	period = (u32)pll->vco_ref_clk_rate / 1000;

> -	ssc_period  = CEIL(period, ssc_period);

> -	ssc_period -= 1;

> -	pll->out.ssc_period = ssc_period;

> -

> -	DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,

> -	    pll->in.ssc_spread, pll->out.ssc_period);

> -

> -	step_size = (u32)pll->vco_current_rate;

> -	ref = pll->vco_ref_clk_rate;

> -	ref /= 1000;

> -	step_size = div_u64(step_size, ref);

> -	step_size <<= 20;

> -	step_size = div_u64(step_size, 1000);

> -	step_size *= pll->in.ssc_spread;

> -	step_size = div_u64(step_size, 1000);

> -	step_size *= (pll->in.ssc_adj_period + 1);

> -

> -	rem = 0;

> -	step_size = div_u64_rem(step_size, ssc_period + 1, &rem);

> -	if (rem)

> -		step_size++;

> -

> -	DBG("step_size=%lld", step_size);

> -

> -	step_size &= 0x0ffff;	/* take lower 16 bits */

> -

> -	pll->out.ssc_step_size = step_size;

> -}

> -

> -static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)

> -{

> -	struct dsi_pll_input *pin = &pll->in;

> -	struct dsi_pll_output *pout = &pll->out;

> -	u64 multiplier = BIT(20);

> -	u64 dec_start_multiple, dec_start, pll_comp_val;

> -	u32 duration, div_frac_start;

> -	u64 vco_clk_rate = pll->vco_current_rate;

> -	u64 fref = pll->vco_ref_clk_rate;

> -

> -	DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);

> -

> -	dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);

> -	div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);

> -

> -	dec_start = div_u64(dec_start_multiple, multiplier);

> -

> -	pout->dec_start = (u32)dec_start;

> -	pout->div_frac_start = div_frac_start;

> -

> -	if (pin->plllock_cnt == 0)

> -		duration = 1024;

> -	else if (pin->plllock_cnt == 1)

> -		duration = 256;

> -	else if (pin->plllock_cnt == 2)

> -		duration = 128;

> -	else

> -		duration = 32;

> -

> -	pll_comp_val = duration * dec_start_multiple;

> -	pll_comp_val = div_u64(pll_comp_val, multiplier);

> -	do_div(pll_comp_val, 10);

> -

> -	pout->plllock_cmp = (u32)pll_comp_val;

> -

> -	pout->pll_txclk_en = 1;

> -	pout->cmn_ldo_cntrl = 0x3c;

> -}

> -

> -static u32 pll_14nm_kvco_slop(u32 vrate)

> -{

> -	u32 slop = 0;

> -

> -	if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)

> -		slop =  600;

> -	else if (vrate > 1800000000UL && vrate < 2300000000UL)

> -		slop = 400;

> -	else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)

> -		slop = 280;

> -

> -	return slop;

> -}

> -

> -static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)

> -{

> -	struct dsi_pll_input *pin = &pll->in;

> -	struct dsi_pll_output *pout = &pll->out;

> -	u64 vco_clk_rate = pll->vco_current_rate;

> -	u64 fref = pll->vco_ref_clk_rate;

> -	u64 data;

> -	u32 cnt;

> -

> -	data = fref * pin->vco_measure_time;

> -	do_div(data, 1000000);

> -	data &= 0x03ff;	/* 10 bits */

> -	data -= 2;

> -	pout->pll_vco_div_ref = data;

> -

> -	data = div_u64(vco_clk_rate, 1000000);	/* unit is Mhz */

> -	data *= pin->vco_measure_time;

> -	do_div(data, 10);

> -	pout->pll_vco_count = data;

> -

> -	data = fref * pin->kvco_measure_time;

> -	do_div(data, 1000000);

> -	data &= 0x03ff;	/* 10 bits */

> -	data -= 1;

> -	pout->pll_kvco_div_ref = data;

> -

> -	cnt = pll_14nm_kvco_slop(vco_clk_rate);

> -	cnt *= 2;

> -	cnt /= 100;

> -	cnt *= pin->kvco_measure_time;

> -	pout->pll_kvco_count = cnt;

> -

> -	pout->pll_misc1 = 16;

> -	pout->pll_resetsm_cntrl = 48;

> -	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;

> -	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;

> -	pout->pll_kvco_code = 0;

> -}

> -

> -static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)

> -{

> -	void __iomem *base = pll->mmio;

> -	struct dsi_pll_input *pin = &pll->in;

> -	struct dsi_pll_output *pout = &pll->out;

> -	u8 data;

> -

> -	data = pin->ssc_adj_period;

> -	data &= 0x0ff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);

> -	data = (pin->ssc_adj_period >> 8);

> -	data &= 0x03;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);

> -

> -	data = pout->ssc_period;

> -	data &= 0x0ff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);

> -	data = (pout->ssc_period >> 8);

> -	data &= 0x0ff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);

> -

> -	data = pout->ssc_step_size;

> -	data &= 0x0ff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);

> -	data = (pout->ssc_step_size >> 8);

> -	data &= 0x0ff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);

> -

> -	data = (pin->ssc_center & 0x01);

> -	data <<= 1;

> -	data |= 0x01; /* enable */

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);

> -

> -	wmb();	/* make sure register committed */

> -}

> -

> -static void pll_db_commit_common(struct dsi_pll_14nm *pll,

> -				 struct dsi_pll_input *pin,

> -				 struct dsi_pll_output *pout)

> -{

> -	void __iomem *base = pll->mmio;

> -	u8 data;

> -

> -	/* confgiure the non frequency dependent pll registers */

> -	data = 0;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);

> -

> -	data = pout->pll_txclk_en;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);

> -

> -	data = pout->pll_resetsm_cntrl;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);

> -	data = pout->pll_resetsm_cntrl2;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);

> -	data = pout->pll_resetsm_cntrl5;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);

> -

> -	data = pout->pll_vco_div_ref & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);

> -	data = (pout->pll_vco_div_ref >> 8) & 0x3;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);

> -

> -	data = pout->pll_kvco_div_ref & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);

> -	data = (pout->pll_kvco_div_ref >> 8) & 0x3;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);

> -

> -	data = pout->pll_misc1;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);

> -

> -	data = pin->pll_ie_trim;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);

> -

> -	data = pin->pll_ip_trim;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);

> -

> -	data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);

> -

> -	data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);

> -

> -	data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);

> -

> -	data = pin->pll_icpmset << 3 | pin->pll_icpcset;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);

> -

> -	data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);

> -

> -	data = pin->pll_iptat_trim;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);

> -

> -	data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);

> -}

> -

> -static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)

> -{

> -	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> -

> -	/* de assert pll start and apply pll sw reset */

> -

> -	/* stop pll */

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);

> -

> -	/* pll sw reset */

> -	pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);

> -	wmb();	/* make sure register committed */

> -

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);

> -	wmb();	/* make sure register committed */

> -}

> -

> -static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,

> -			       struct dsi_pll_input *pin,

> -			       struct dsi_pll_output *pout)

> -{

> -	void __iomem *base = pll->mmio;

> -	void __iomem *cmn_base = pll->phy_cmn_mmio;

> -	u8 data;

> -

> -	DBG("DSI%d PLL", pll->id);

> -

> -	data = pout->cmn_ldo_cntrl;

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);

> -

> -	pll_db_commit_common(pll, pin, pout);

> -

> -	pll_14nm_software_reset(pll);

> -

> -	data = pin->dsiclk_sel; /* set dsiclk_sel = 1  */

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);

> -

> -	data = 0xff; /* data, clk, pll normal operation */

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);

> -

> -	/* configure the frequency dependent pll registers */

> -	data = pout->dec_start;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);

> -

> -	data = pout->div_frac_start & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);

> -	data = (pout->div_frac_start >> 8) & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);

> -	data = (pout->div_frac_start >> 16) & 0xf;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);

> -

> -	data = pout->plllock_cmp & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);

> -

> -	data = (pout->plllock_cmp >> 8) & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);

> -

> -	data = (pout->plllock_cmp >> 16) & 0x3;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);

> -

> -	data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);

> -

> -	data = pout->pll_vco_count & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);

> -	data = (pout->pll_vco_count >> 8) & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);

> -

> -	data = pout->pll_kvco_count & 0xff;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);

> -	data = (pout->pll_kvco_count >> 8) & 0x3;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);

> -

> -	data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);

> -

> -	if (pin->ssc_en)

> -		pll_db_commit_ssc(pll);

> -

> -	wmb();	/* make sure register committed */

> -}

> -

> -/*

> - * VCO clock Callbacks

> - */

> -static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> -				     unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	struct dsi_pll_input *pin = &pll_14nm->in;

> -	struct dsi_pll_output *pout = &pll_14nm->out;

> -

> -	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,

> -	    parent_rate);

> -

> -	pll_14nm->vco_current_rate = rate;

> -	pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;

> -

> -	dsi_pll_14nm_input_init(pll_14nm);

> -

> -	/*

> -	 * This configures the post divider internal to the VCO. It's

> -	 * fixed to divide by 1 for now.

> -	 *

> -	 * tx_band = pll_postdiv.

> -	 * 0: divided by 1

> -	 * 1: divided by 2

> -	 * 2: divided by 4

> -	 * 3: divided by 8

> -	 */

> -	pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;

> -

> -	pll_14nm_dec_frac_calc(pll_14nm);

> -

> -	if (pin->ssc_en)

> -		pll_14nm_ssc_calc(pll_14nm);

> -

> -	pll_14nm_calc_vco_count(pll_14nm);

> -

> -	/* commit the slave DSI PLL registers if we're master. Note that we

> -	 * don't lock the slave PLL. We just ensure that the PLL/PHY 

> registers

> -	 * of the master and slave are identical

> -	 */

> -	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {

> -		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;

> -

> -		pll_db_commit_14nm(pll_14nm_slave, pin, pout);

> -	}

> -

> -	pll_db_commit_14nm(pll_14nm, pin, pout);

> -

> -	return 0;

> -}

> -

> -static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,

> -						  unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	void __iomem *base = pll_14nm->mmio;

> -	u64 vco_rate, multiplier = BIT(20);

> -	u32 div_frac_start;

> -	u32 dec_start;

> -	u64 ref_clk = parent_rate;

> -

> -	dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);

> -	dec_start &= 0x0ff;

> -

> -	DBG("dec_start = %x", dec_start);

> -

> -	div_frac_start = (pll_read(base + 

> REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)

> -				& 0xf) << 16;

> -	div_frac_start |= (pll_read(base + 

> REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)

> -				& 0xff) << 8;

> -	div_frac_start |= pll_read(base + 

> REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)

> -				& 0xff;

> -

> -	DBG("div_frac_start = %x", div_frac_start);

> -

> -	vco_rate = ref_clk * dec_start;

> -

> -	vco_rate += ((ref_clk * div_frac_start) / multiplier);

> -

> -	/*

> -	 * Recalculating the rate from dec_start and frac_start doesn't end 

> up

> -	 * the rate we originally set. Convert the freq to KHz, round it up 

> and

> -	 * convert it back to MHz.

> -	 */

> -	vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;

> -

> -	DBG("returning vco rate = %lu", (unsigned long)vco_rate);

> -

> -	return (unsigned long)vco_rate;

> -}

> -

> -static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {

> -	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> -	.set_rate = dsi_pll_14nm_vco_set_rate,

> -	.recalc_rate = dsi_pll_14nm_vco_recalc_rate,

> -	.prepare = msm_dsi_pll_helper_clk_prepare,

> -	.unprepare = msm_dsi_pll_helper_clk_unprepare,

> -};

> -

> -/*

> - * N1 and N2 post-divider clock callbacks

> - */

> -#define div_mask(width)	((1 << (width)) - 1)

> -static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw 

> *hw,

> -						      unsigned long parent_rate)

> -{

> -	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);

> -	struct dsi_pll_14nm *pll_14nm = postdiv->pll;

> -	void __iomem *base = pll_14nm->phy_cmn_mmio;

> -	u8 shift = postdiv->shift;

> -	u8 width = postdiv->width;

> -	u32 val;

> -

> -	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);

> -

> -	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;

> -	val &= div_mask(width);

> -

> -	return divider_recalc_rate(hw, parent_rate, val, NULL,

> -				   postdiv->flags, width);

> -}

> -

> -static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,

> -					    unsigned long rate,

> -					    unsigned long *prate)

> -{

> -	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);

> -	struct dsi_pll_14nm *pll_14nm = postdiv->pll;

> -

> -	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);

> -

> -	return divider_round_rate(hw, rate, prate, NULL,

> -				  postdiv->width,

> -				  postdiv->flags);

> -}

> -

> -static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned 

> long rate,

> -					 unsigned long parent_rate)

> -{

> -	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);

> -	struct dsi_pll_14nm *pll_14nm = postdiv->pll;

> -	void __iomem *base = pll_14nm->phy_cmn_mmio;

> -	spinlock_t *lock = &pll_14nm->postdiv_lock;

> -	u8 shift = postdiv->shift;

> -	u8 width = postdiv->width;

> -	unsigned int value;

> -	unsigned long flags = 0;

> -	u32 val;

> -

> -	DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,

> -	    parent_rate);

> -

> -	value = divider_get_val(rate, parent_rate, NULL, postdiv->width,

> -				postdiv->flags);

> -

> -	spin_lock_irqsave(lock, flags);

> -

> -	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);

> -	val &= ~(div_mask(width) << shift);

> -

> -	val |= value << shift;

> -	pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);

> -

> -	/* If we're master in dual DSI mode, then the slave PLL's 

> post-dividers

> -	 * follow the master's post dividers

> -	 */

> -	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {

> -		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;

> -		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;

> -

> -		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);

> -	}

> -

> -	spin_unlock_irqrestore(lock, flags);

> -

> -	return 0;

> -}

> -

> -static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {

> -	.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,

> -	.round_rate = dsi_pll_14nm_postdiv_round_rate,

> -	.set_rate = dsi_pll_14nm_postdiv_set_rate,

> -};

> -

> -/*

> - * PLL Callbacks

> - */

> -

> -static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	void __iomem *base = pll_14nm->mmio;

> -	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> -	bool locked;

> -

> -	DBG("");

> -

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);

> -

> -	locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,

> -					 POLL_TIMEOUT_US);

> -

> -	if (unlikely(!locked))

> -		DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");

> -	else

> -		DBG("DSI PLL lock success");

> -

> -	return locked ? 0 : -EINVAL;

> -}

> -

> -static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> -

> -	DBG("");

> -

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);

> -}

> -

> -static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;

> -	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> -	u32 data;

> -

> -	data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);

> -

> -	cached_state->n1postdiv = data & 0xf;

> -	cached_state->n2postdiv = (data >> 4) & 0xf;

> -

> -	DBG("DSI%d PLL save state %x %x", pll_14nm->id,

> -	    cached_state->n1postdiv, cached_state->n2postdiv);

> -

> -	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);

> -}

> -

> -static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;

> -	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;

> -	u32 data;

> -	int ret;

> -

> -	ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,

> -					cached_state->vco_rate, 0);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pll_14nm->pdev->dev,

> -			"restore vco rate failed. ret=%d\n", ret);

> -		return ret;

> -	}

> -

> -	data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);

> -

> -	DBG("DSI%d PLL restore state %x %x", pll_14nm->id,

> -	    cached_state->n1postdiv, cached_state->n2postdiv);

> -

> -	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);

> -

> -	/* also restore post-dividers for slave DSI PLL */

> -	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {

> -		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;

> -		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;

> -

> -		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);

> -	}

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,

> -				    enum msm_dsi_phy_usecase uc)

> -{

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	void __iomem *base = pll_14nm->mmio;

> -	u32 clkbuflr_en, bandgap = 0;

> -

> -	switch (uc) {

> -	case MSM_DSI_PHY_STANDALONE:

> -		clkbuflr_en = 0x1;

> -		break;

> -	case MSM_DSI_PHY_MASTER:

> -		clkbuflr_en = 0x3;

> -		pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];

> -		break;

> -	case MSM_DSI_PHY_SLAVE:

> -		clkbuflr_en = 0x0;

> -		bandgap = 0x3;

> -		break;

> -	default:

> -		return -EINVAL;

> -	}

> -

> -	pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);

> -	if (bandgap)

> -		pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);

> -

> -	pll_14nm->uc = uc;

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,

> -				     struct clk **byte_clk_provider,

> -				     struct clk **pixel_clk_provider)

> -{

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;

> -

> -	if (byte_clk_provider)

> -		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;

> -	if (pixel_clk_provider)

> -		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;

> -

> -	return 0;

> -}

> -

> -static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);

> -	struct platform_device *pdev = pll_14nm->pdev;

> -	int num_hws = pll_14nm->num_hws;

> -

> -	of_clk_del_provider(pdev->dev.of_node);

> -

> -	while (num_hws--)

> -		clk_hw_unregister(pll_14nm->hws[num_hws]);

> -}

> -

> -static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm 

> *pll_14nm,

> -						const char *name,

> -						const char *parent_name,

> -						unsigned long flags,

> -						u8 shift)

> -{

> -	struct dsi_pll_14nm_postdiv *pll_postdiv;

> -	struct device *dev = &pll_14nm->pdev->dev;

> -	struct clk_init_data postdiv_init = {

> -		.parent_names = (const char *[]) { parent_name },

> -		.num_parents = 1,

> -		.name = name,

> -		.flags = flags,

> -		.ops = &clk_ops_dsi_pll_14nm_postdiv,

> -	};

> -	int ret;

> -

> -	pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);

> -	if (!pll_postdiv)

> -		return ERR_PTR(-ENOMEM);

> -

> -	pll_postdiv->pll = pll_14nm;

> -	pll_postdiv->shift = shift;

> -	/* both N1 and N2 postdividers are 4 bits wide */

> -	pll_postdiv->width = 4;

> -	/* range of each divider is from 1 to 15 */

> -	pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;

> -	pll_postdiv->hw.init = &postdiv_init;

> -

> -	ret = clk_hw_register(dev, &pll_postdiv->hw);

> -	if (ret)

> -		return ERR_PTR(ret);

> -

> -	return &pll_postdiv->hw;

> -}

> -

> -static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)

> -{

> -	char clk_name[32], parent[32], vco_name[32];

> -	struct clk_init_data vco_init = {

> -		.parent_names = (const char *[]){ "xo" },

> -		.num_parents = 1,

> -		.name = vco_name,

> -		.flags = CLK_IGNORE_UNUSED,

> -		.ops = &clk_ops_dsi_pll_14nm_vco,

> -	};

> -	struct device *dev = &pll_14nm->pdev->dev;

> -	struct clk_hw **hws = pll_14nm->hws;

> -	struct clk_hw_onecell_data *hw_data;

> -	struct clk_hw *hw;

> -	int num = 0;

> -	int ret;

> -

> -	DBG("DSI%d", pll_14nm->id);

> -

> -	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +

> -			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),

> -			       GFP_KERNEL);

> -	if (!hw_data)

> -		return -ENOMEM;

> -

> -	snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);

> -	pll_14nm->base.clk_hw.init = &vco_init;

> -

> -	ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);

> -	if (ret)

> -		return ret;

> -

> -	hws[num++] = &pll_14nm->base.clk_hw;

> -

> -	snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);

> -	snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);

> -

> -	/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */

> -	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,

> -				       CLK_SET_RATE_PARENT, 0);

> -	if (IS_ERR(hw))

> -		return PTR_ERR(hw);

> -

> -	hws[num++] = hw;

> -

> -	snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);

> -	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);

> -

> -	/* DSI Byte clock = VCO_CLK / N1 / 8 */

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> -					  CLK_SET_RATE_PARENT, 1, 8);

> -	if (IS_ERR(hw))

> -		return PTR_ERR(hw);

> -

> -	hws[num++] = hw;

> -	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;

> -

> -	snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);

> -	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);

> -

> -	/*

> -	 * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider

> -	 * on the way. Don't let it set parent.

> -	 */

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);

> -	if (IS_ERR(hw))

> -		return PTR_ERR(hw);

> -

> -	hws[num++] = hw;

> -

> -	snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);

> -	snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);

> -

> -	/* DSI pixel clock = VCO_CLK / N1 / 2 / N2

> -	 * This is the output of N2 post-divider, bits 4-7 in

> -	 * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.

> -	 */

> -	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);

> -	if (IS_ERR(hw))

> -		return PTR_ERR(hw);

> -

> -	hws[num++] = hw;

> -	hw_data->hws[DSI_PIXEL_PLL_CLK]	= hw;

> -

> -	pll_14nm->num_hws = num;

> -

> -	hw_data->num = NUM_PROVIDED_CLKS;

> -	pll_14nm->hw_data = hw_data;

> -

> -	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,

> -				     pll_14nm->hw_data);

> -	if (ret) {

> -		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> -		return ret;

> -	}

> -

> -	return 0;

> -}

> -

> -struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device 

> *pdev, int id)

> -{

> -	struct dsi_pll_14nm *pll_14nm;

> -	struct msm_dsi_pll *pll;

> -	int ret;

> -

> -	if (!pdev)

> -		return ERR_PTR(-ENODEV);

> -

> -	pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);

> -	if (!pll_14nm)

> -		return ERR_PTR(-ENOMEM);

> -

> -	DBG("PLL%d", id);

> -

> -	pll_14nm->pdev = pdev;

> -	pll_14nm->id = id;

> -	pll_14nm_list[id] = pll_14nm;

> -

> -	pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");

> -	if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> -	if (IS_ERR_OR_NULL(pll_14nm->mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	spin_lock_init(&pll_14nm->postdiv_lock);

> -

> -	pll = &pll_14nm->base;

> -	pll->min_rate = VCO_MIN_RATE;

> -	pll->max_rate = VCO_MAX_RATE;

> -	pll->get_provider = dsi_pll_14nm_get_provider;

> -	pll->destroy = dsi_pll_14nm_destroy;

> -	pll->disable_seq = dsi_pll_14nm_disable_seq;

> -	pll->save_state = dsi_pll_14nm_save_state;

> -	pll->restore_state = dsi_pll_14nm_restore_state;

> -	pll->set_usecase = dsi_pll_14nm_set_usecase;

> -

> -	pll_14nm->vco_delay = 1;

> -

> -	pll->en_seq_cnt = 1;

> -	pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;

> -

> -	ret = pll_14nm_register(pll_14nm);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> -		return ERR_PTR(ret);

> -	}

> -

> -	return pll;

> -}

> diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c

> b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c

> deleted file mode 100644

> index 37a1f996a588..000000000000

> --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c

> +++ /dev/null

> @@ -1,643 +0,0 @@

> -// SPDX-License-Identifier: GPL-2.0-only

> -/*

> - * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.

> - */

> -

> -#include <linux/clk.h>

> -#include <linux/clk-provider.h>

> -

> -#include "dsi_pll.h"

> -#include "dsi.xml.h"

> -

> -/*

> - * DSI PLL 28nm - clock diagram (eg: DSI0):

> - *

> - *         dsi0analog_postdiv_clk

> - *                             |         dsi0indirect_path_div2_clk

> - *                             |          |

> - *                   +------+  |  +----+  |  |\   dsi0byte_mux

> - *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |

> - *                |  +------+     +----+     | m|  |  +----+

> - *                |                          | u|--o--| /4 |-- 

> dsi0pllbyte

> - *                |                          | x|     +----+

> - *                o--------------------------| /

> - *                |                          |/

> - *                |          +------+

> - *                o----------| DIV3 |------------------------- dsi0pll

> - *                           +------+

> - */

> -

> -#define POLL_MAX_READS			10

> -#define POLL_TIMEOUT_US		50

> -

> -#define NUM_PROVIDED_CLKS		2

> -

> -#define VCO_REF_CLK_RATE		19200000

> -#define VCO_MIN_RATE			350000000

> -#define VCO_MAX_RATE			750000000

> -

> -#define DSI_BYTE_PLL_CLK		0

> -#define DSI_PIXEL_PLL_CLK		1

> -

> -#define LPFR_LUT_SIZE			10

> -struct lpfr_cfg {

> -	unsigned long vco_rate;

> -	u32 resistance;

> -};

> -

> -/* Loop filter resistance: */

> -static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {

> -	{ 479500000,  8 },

> -	{ 480000000, 11 },

> -	{ 575500000,  8 },

> -	{ 576000000, 12 },

> -	{ 610500000,  8 },

> -	{ 659500000,  9 },

> -	{ 671500000, 10 },

> -	{ 672000000, 14 },

> -	{ 708500000, 10 },

> -	{ 750000000, 11 },

> -};

> -

> -struct pll_28nm_cached_state {

> -	unsigned long vco_rate;

> -	u8 postdiv3;

> -	u8 postdiv1;

> -	u8 byte_mux;

> -};

> -

> -struct dsi_pll_28nm {

> -	struct msm_dsi_pll base;

> -

> -	int id;

> -	struct platform_device *pdev;

> -	void __iomem *mmio;

> -

> -	int vco_delay;

> -

> -	/* private clocks: */

> -	struct clk *clks[NUM_DSI_CLOCKS_MAX];

> -	u32 num_clks;

> -

> -	/* clock-provider: */

> -	struct clk *provided_clks[NUM_PROVIDED_CLKS];

> -	struct clk_onecell_data clk_data;

> -

> -	struct pll_28nm_cached_state cached_state;

> -};

> -

> -#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)

> -

> -static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,

> -				u32 nb_tries, u32 timeout_us)

> -{

> -	bool pll_locked = false;

> -	u32 val;

> -

> -	while (nb_tries--) {

> -		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);

> -		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);

> -

> -		if (pll_locked)

> -			break;

> -

> -		udelay(timeout_us);

> -	}

> -	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");

> -

> -	return pll_locked;

> -}

> -

> -static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)

> -{

> -	void __iomem *base = pll_28nm->mmio;

> -

> -	/*

> -	 * Add HW recommended delays after toggling the software

> -	 * reset bit off and back on.

> -	 */

> -	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,

> -			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);

> -	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);

> -}

> -

> -/*

> - * Clock Callbacks

> - */

> -static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> -		unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct device *dev = &pll_28nm->pdev->dev;

> -	void __iomem *base = pll_28nm->mmio;

> -	unsigned long div_fbx1000, gen_vco_clk;

> -	u32 refclk_cfg, frac_n_mode, frac_n_value;

> -	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;

> -	u32 cal_cfg10, cal_cfg11;

> -	u32 rem;

> -	int i;

> -

> -	VERB("rate=%lu, parent's=%lu", rate, parent_rate);

> -

> -	/* Force postdiv2 to be div-4 */

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);

> -

> -	/* Configure the Loop filter resistance */

> -	for (i = 0; i < LPFR_LUT_SIZE; i++)

> -		if (rate <= lpfr_lut[i].vco_rate)

> -			break;

> -	if (i == LPFR_LUT_SIZE) {

> -		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. 

> vco=%lu\n",

> -				rate);

> -		return -EINVAL;

> -	}

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, 

> lpfr_lut[i].resistance);

> -

> -	/* Loop filter capacitance values : c1 and c2 */

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);

> -

> -	rem = rate % VCO_REF_CLK_RATE;

> -	if (rem) {

> -		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;

> -		frac_n_mode = 1;

> -		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);

> -		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);

> -	} else {

> -		refclk_cfg = 0x0;

> -		frac_n_mode = 0;

> -		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);

> -		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);

> -	}

> -

> -	DBG("refclk_cfg = %d", refclk_cfg);

> -

> -	rem = div_fbx1000 % 1000;

> -	frac_n_value = (rem << 16) / 1000;

> -

> -	DBG("div_fb = %lu", div_fbx1000);

> -	DBG("frac_n_value = %d", frac_n_value);

> -

> -	DBG("Generated VCO Clock: %lu", gen_vco_clk);

> -	rem = 0;

> -	sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);

> -	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;

> -	if (frac_n_mode) {

> -		sdm_cfg0 = 0x0;

> -		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);

> -		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(

> -				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));

> -		sdm_cfg3 = frac_n_value >> 8;

> -		sdm_cfg2 = frac_n_value & 0xff;

> -	} else {

> -		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;

> -		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(

> -				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));

> -		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);

> -		sdm_cfg2 = 0;

> -		sdm_cfg3 = 0;

> -	}

> -

> -	DBG("sdm_cfg0=%d", sdm_cfg0);

> -	DBG("sdm_cfg1=%d", sdm_cfg1);

> -	DBG("sdm_cfg2=%d", sdm_cfg2);

> -	DBG("sdm_cfg3=%d", sdm_cfg3);

> -

> -	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));

> -	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);

> -	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);

> -

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);

> -

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,

> -		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,

> -		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);

> -

> -	/* Add hardware recommended delay for correct PLL configuration */

> -	if (pll_28nm->vco_delay)

> -		udelay(pll_28nm->vco_delay);

> -

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -

> -	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,

> -					POLL_TIMEOUT_US);

> -}

> -

> -static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,

> -		unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	void __iomem *base = pll_28nm->mmio;

> -	u32 sdm0, doubler, sdm_byp_div;

> -	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;

> -	u32 ref_clk = VCO_REF_CLK_RATE;

> -	unsigned long vco_rate;

> -

> -	VERB("parent_rate=%lu", parent_rate);

> -

> -	/* Check to see if the ref clk doubler is enabled */

> -	doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &

> -			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;

> -	ref_clk += (doubler * VCO_REF_CLK_RATE);

> -

> -	/* see if it is integer mode or sdm mode */

> -	sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);

> -	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {

> -		/* integer mode */

> -		sdm_byp_div = FIELD(

> -				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),

> -				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;

> -		vco_rate = ref_clk * sdm_byp_div;

> -	} else {

> -		/* sdm mode */

> -		sdm_dc_off = FIELD(

> -				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),

> -				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);

> -		DBG("sdm_dc_off = %d", sdm_dc_off);

> -		sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),

> -				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);

> -		sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),

> -				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);

> -		sdm_freq_seed = (sdm3 << 8) | sdm2;

> -		DBG("sdm_freq_seed = %d", sdm_freq_seed);

> -

> -		vco_rate = (ref_clk * (sdm_dc_off + 1)) +

> -			mult_frac(ref_clk, sdm_freq_seed, BIT(16));

> -		DBG("vco rate = %lu", vco_rate);

> -	}

> -

> -	DBG("returning vco rate = %lu", vco_rate);

> -

> -	return vco_rate;

> -}

> -

> -static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {

> -	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> -	.set_rate = dsi_pll_28nm_clk_set_rate,

> -	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,

> -	.prepare = msm_dsi_pll_helper_clk_prepare,

> -	.unprepare = msm_dsi_pll_helper_clk_unprepare,

> -	.is_enabled = dsi_pll_28nm_clk_is_enabled,

> -};

> -

> -/*

> - * PLL Callbacks

> - */

> -static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct device *dev = &pll_28nm->pdev->dev;

> -	void __iomem *base = pll_28nm->mmio;

> -	u32 max_reads = 5, timeout_us = 100;

> -	bool locked;

> -	u32 val;

> -	int i;

> -

> -	DBG("id=%d", pll_28nm->id);

> -

> -	pll_28nm_software_reset(pll_28nm);

> -

> -	/*

> -	 * PLL power up sequence.

> -	 * Add necessary delays recommended by hardware.

> -	 */

> -	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;

> -	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);

> -

> -	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;

> -	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);

> -

> -	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> -	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> -

> -	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;

> -	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);

> -

> -	for (i = 0; i < 2; i++) {

> -		/* DSI Uniphy lock detect setting */

> -		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,

> -				0x0c, 100);

> -		pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);

> -

> -		/* poll for PLL ready status */

> -		locked = pll_28nm_poll_for_ready(pll_28nm,

> -						max_reads, timeout_us);

> -		if (locked)

> -			break;

> -

> -		pll_28nm_software_reset(pll_28nm);

> -

> -		/*

> -		 * PLL power up sequence.

> -		 * Add necessary delays recommended by hardware.

> -		 */

> -		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;

> -		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);

> -

> -		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;

> -		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);

> -

> -		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> -		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);

> -

> -		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> -		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);

> -

> -		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;

> -		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> -

> -		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;

> -		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);

> -	}

> -

> -	if (unlikely(!locked))

> -		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");

> -	else

> -		DBG("DSI PLL Lock success");

> -

> -	return locked ? 0 : -EINVAL;

> -}

> -

> -static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct device *dev = &pll_28nm->pdev->dev;

> -	void __iomem *base = pll_28nm->mmio;

> -	bool locked;

> -	u32 max_reads = 10, timeout_us = 50;

> -	u32 val;

> -

> -	DBG("id=%d", pll_28nm->id);

> -

> -	pll_28nm_software_reset(pll_28nm);

> -

> -	/*

> -	 * PLL power up sequence.

> -	 * Add necessary delays recommended by hardware.

> -	 */

> -	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);

> -

> -	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;

> -	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> -

> -	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;

> -	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> -

> -	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |

> -		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;

> -	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);

> -

> -	/* DSI PLL toggle lock detect setting */

> -	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);

> -	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);

> -

> -	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);

> -

> -	if (unlikely(!locked))

> -		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");

> -	else

> -		DBG("DSI PLL lock success");

> -

> -	return locked ? 0 : -EINVAL;

> -}

> -

> -static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -

> -	DBG("id=%d", pll_28nm->id);

> -	pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);

> -}

> -

> -static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> -	void __iomem *base = pll_28nm->mmio;

> -

> -	cached_state->postdiv3 =

> -			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);

> -	cached_state->postdiv1 =

> -			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);

> -	cached_state->byte_mux = pll_read(base + 

> REG_DSI_28nm_PHY_PLL_VREG_CFG);

> -	if (dsi_pll_28nm_clk_is_enabled(&pll->clk_hw))

> -		cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);

> -	else

> -		cached_state->vco_rate = 0;

> -}

> -

> -static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> -	void __iomem *base = pll_28nm->mmio;

> -	int ret;

> -

> -	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,

> -					cached_state->vco_rate, 0);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pll_28nm->pdev->dev,

> -			"restore vco rate failed. ret=%d\n", ret);

> -		return ret;

> -	}

> -

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,

> -			cached_state->postdiv3);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,

> -			cached_state->postdiv1);

> -	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,

> -			cached_state->byte_mux);

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,

> -				struct clk **byte_clk_provider,

> -				struct clk **pixel_clk_provider)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -

> -	if (byte_clk_provider)

> -		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];

> -	if (pixel_clk_provider)

> -		*pixel_clk_provider =

> -				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];

> -

> -	return 0;

> -}

> -

> -static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	int i;

> -

> -	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,

> -					pll_28nm->clks, pll_28nm->num_clks);

> -

> -	for (i = 0; i < NUM_PROVIDED_CLKS; i++)

> -		pll_28nm->provided_clks[i] = NULL;

> -

> -	pll_28nm->num_clks = 0;

> -	pll_28nm->clk_data.clks = NULL;

> -	pll_28nm->clk_data.clk_num = 0;

> -}

> -

> -static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)

> -{

> -	char clk_name[32], parent1[32], parent2[32], vco_name[32];

> -	struct clk_init_data vco_init = {

> -		.parent_names = (const char *[]){ "xo" },

> -		.num_parents = 1,

> -		.name = vco_name,

> -		.flags = CLK_IGNORE_UNUSED,

> -		.ops = &clk_ops_dsi_pll_28nm_vco,

> -	};

> -	struct device *dev = &pll_28nm->pdev->dev;

> -	struct clk **clks = pll_28nm->clks;

> -	struct clk **provided_clks = pll_28nm->provided_clks;

> -	int num = 0;

> -	int ret;

> -

> -	DBG("%d", pll_28nm->id);

> -

> -	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);

> -	pll_28nm->base.clk_hw.init = &vco_init;

> -	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);

> -

> -	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);

> -	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);

> -	clks[num++] = clk_register_divider(dev, clk_name,

> -			parent1, CLK_SET_RATE_PARENT,

> -			pll_28nm->mmio +

> -			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,

> -			0, 4, 0, NULL);

> -

> -	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);

> -	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);

> -	clks[num++] = clk_register_fixed_factor(dev, clk_name,

> -			parent1, CLK_SET_RATE_PARENT,

> -			1, 2);

> -

> -	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);

> -	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);

> -	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =

> -			clk_register_divider(dev, clk_name,

> -				parent1, 0, pll_28nm->mmio +

> -				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,

> -				0, 8, 0, NULL);

> -

> -	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);

> -	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);

> -	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);

> -	clks[num++] = clk_register_mux(dev, clk_name,

> -			((const char *[]){

> -				parent1, parent2

> -			}), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +

> -			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);

> -

> -	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);

> -	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);

> -	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =

> -			clk_register_fixed_factor(dev, clk_name,

> -				parent1, CLK_SET_RATE_PARENT, 1, 4);

> -

> -	pll_28nm->num_clks = num;

> -

> -	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;

> -	pll_28nm->clk_data.clks = provided_clks;

> -

> -	ret = of_clk_add_provider(dev->of_node,

> -			of_clk_src_onecell_get, &pll_28nm->clk_data);

> -	if (ret) {

> -		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> -		return ret;

> -	}

> -

> -	return 0;

> -}

> -

> -struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device 

> *pdev,

> -					enum msm_dsi_phy_type type, int id)

> -{

> -	struct dsi_pll_28nm *pll_28nm;

> -	struct msm_dsi_pll *pll;

> -	int ret;

> -

> -	if (!pdev)

> -		return ERR_PTR(-ENODEV);

> -

> -	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);

> -	if (!pll_28nm)

> -		return ERR_PTR(-ENOMEM);

> -

> -	pll_28nm->pdev = pdev;

> -	pll_28nm->id = id;

> -

> -	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> -	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	pll = &pll_28nm->base;

> -	pll->min_rate = VCO_MIN_RATE;

> -	pll->max_rate = VCO_MAX_RATE;

> -	pll->get_provider = dsi_pll_28nm_get_provider;

> -	pll->destroy = dsi_pll_28nm_destroy;

> -	pll->disable_seq = dsi_pll_28nm_disable_seq;

> -	pll->save_state = dsi_pll_28nm_save_state;

> -	pll->restore_state = dsi_pll_28nm_restore_state;

> -

> -	if (type == MSM_DSI_PHY_28NM_HPM) {

> -		pll_28nm->vco_delay = 1;

> -

> -		pll->en_seq_cnt = 3;

> -		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;

> -		pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;

> -		pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;

> -	} else if (type == MSM_DSI_PHY_28NM_LP) {

> -		pll_28nm->vco_delay = 1000;

> -

> -		pll->en_seq_cnt = 1;

> -		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;

> -	} else {

> -		DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);

> -		return ERR_PTR(-EINVAL);

> -	}

> -

> -	ret = pll_28nm_register(pll_28nm);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> -		return ERR_PTR(ret);

> -	}

> -

> -	return pll;

> -}

> -

> diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c

> b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c

> deleted file mode 100644

> index a6e7a2525fe0..000000000000

> --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c

> +++ /dev/null

> @@ -1,526 +0,0 @@

> -// SPDX-License-Identifier: GPL-2.0-only

> -/*

> - * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.

> - */

> -

> -#include <linux/clk-provider.h>

> -

> -#include "dsi_pll.h"

> -#include "dsi.xml.h"

> -

> -/*

> - * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):

> - *

> - *

> - *                        +------+

> - *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)

> - *  F * byte_clk    |     +------+

> - *                  | bit clock divider (F / 8)

> - *                  |

> - *                  |     +------+

> - *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG

> - *                  |     +------+                 | (sets parent 

> rate)

> - *                  | byte clock divider (F)       |

> - *                  |                              |

> - *                  |                              o---> To esc RCG

> - *                  |                                (doesn't set 

> parent rate)

> - *                  |

> - *                  |     +------+

> - *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG

> - *                        +------+                 | (sets parent 

> rate)

> - *                  dsi clock divider (F * magic)  |

> - *                                                 |

> - *                                                 o---> To pixel rcg

> - *                                                  (doesn't set 

> parent rate)

> - */

> -

> -#define POLL_MAX_READS		8000

> -#define POLL_TIMEOUT_US		1

> -

> -#define NUM_PROVIDED_CLKS	2

> -

> -#define VCO_REF_CLK_RATE	27000000

> -#define VCO_MIN_RATE		600000000

> -#define VCO_MAX_RATE		1200000000

> -

> -#define DSI_BYTE_PLL_CLK	0

> -#define DSI_PIXEL_PLL_CLK	1

> -

> -#define VCO_PREF_DIV_RATIO	27

> -

> -struct pll_28nm_cached_state {

> -	unsigned long vco_rate;

> -	u8 postdiv3;

> -	u8 postdiv2;

> -	u8 postdiv1;

> -};

> -

> -struct clk_bytediv {

> -	struct clk_hw hw;

> -	void __iomem *reg;

> -};

> -

> -struct dsi_pll_28nm {

> -	struct msm_dsi_pll base;

> -

> -	int id;

> -	struct platform_device *pdev;

> -	void __iomem *mmio;

> -

> -	/* custom byte clock divider */

> -	struct clk_bytediv *bytediv;

> -

> -	/* private clocks: */

> -	struct clk *clks[NUM_DSI_CLOCKS_MAX];

> -	u32 num_clks;

> -

> -	/* clock-provider: */

> -	struct clk *provided_clks[NUM_PROVIDED_CLKS];

> -	struct clk_onecell_data clk_data;

> -

> -	struct pll_28nm_cached_state cached_state;

> -};

> -

> -#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)

> -

> -static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,

> -				    int nb_tries, int timeout_us)

> -{

> -	bool pll_locked = false;

> -	u32 val;

> -

> -	while (nb_tries--) {

> -		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);

> -		pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);

> -

> -		if (pll_locked)

> -			break;

> -

> -		udelay(timeout_us);

> -	}

> -	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");

> -

> -	return pll_locked;

> -}

> -

> -/*

> - * Clock Callbacks

> - */

> -static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> -				     unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	void __iomem *base = pll_28nm->mmio;

> -	u32 val, temp, fb_divider;

> -

> -	DBG("rate=%lu, parent's=%lu", rate, parent_rate);

> -

> -	temp = rate / 10;

> -	val = VCO_REF_CLK_RATE / 10;

> -	fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;

> -	fb_divider = fb_divider / 2 - 1;

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,

> -			fb_divider & 0xff);

> -

> -	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);

> -

> -	val |= (fb_divider >> 8) & 0x07;

> -

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,

> -			val);

> -

> -	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);

> -

> -	val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;

> -

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,

> -			val);

> -

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,

> -			0xf);

> -

> -	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);

> -	val |= 0x7 << 4;

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,

> -			val);

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -

> -	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,

> -					POLL_TIMEOUT_US);

> -}

> -

> -static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,

> -						  unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	void __iomem *base = pll_28nm->mmio;

> -	unsigned long vco_rate;

> -	u32 status, fb_divider, temp, ref_divider;

> -

> -	VERB("parent_rate=%lu", parent_rate);

> -

> -	status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);

> -

> -	if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {

> -		fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);

> -		fb_divider &= 0xff;

> -		temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;

> -		fb_divider = (temp << 8) | fb_divider;

> -		fb_divider += 1;

> -

> -		ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);

> -		ref_divider &= 0x3f;

> -		ref_divider += 1;

> -

> -		/* multiply by 2 */

> -		vco_rate = (parent_rate / ref_divider) * fb_divider * 2;

> -	} else {

> -		vco_rate = 0;

> -	}

> -

> -	DBG("returning vco rate = %lu", vco_rate);

> -

> -	return vco_rate;

> -}

> -

> -static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {

> -	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> -	.set_rate = dsi_pll_28nm_clk_set_rate,

> -	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,

> -	.prepare = msm_dsi_pll_helper_clk_prepare,

> -	.unprepare = msm_dsi_pll_helper_clk_unprepare,

> -	.is_enabled = dsi_pll_28nm_clk_is_enabled,

> -};

> -

> -/*

> - * Custom byte clock divier clk_ops

> - *

> - * This clock is the entry point to configuring the PLL. The user (dsi 

> host)

> - * will set this clock's rate to the desired byte clock rate. The VCO 

> lock

> - * frequency is a multiple of the byte clock rate. The multiplication 

> factor

> - * (shown as F in the diagram above) is a function of the byte clock 

> rate.

> - *

> - * This custom divider clock ensures that its parent (VCO) is set to 

> the

> - * desired rate, and that the byte clock postdivider (POSTDIV2) is 

> configured

> - * accordingly

> - */

> -#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)

> -

> -static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,

> -		unsigned long parent_rate)

> -{

> -	struct clk_bytediv *bytediv = to_clk_bytediv(hw);

> -	unsigned int div;

> -

> -	div = pll_read(bytediv->reg) & 0xff;

> -

> -	return parent_rate / (div + 1);

> -}

> -

> -/* find multiplication factor(wrt byte clock) at which the VCO should 

> be set */

> -static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)

> -{

> -	unsigned long bit_mhz;

> -

> -	/* convert to bit clock in Mhz */

> -	bit_mhz = (byte_clk_rate * 8) / 1000000;

> -

> -	if (bit_mhz < 125)

> -		return 64;

> -	else if (bit_mhz < 250)

> -		return 32;

> -	else if (bit_mhz < 600)

> -		return 16;

> -	else

> -		return 8;

> -}

> -

> -static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long 

> rate,

> -				   unsigned long *prate)

> -{

> -	unsigned long best_parent;

> -	unsigned int factor;

> -

> -	factor = get_vco_mul_factor(rate);

> -

> -	best_parent = rate * factor;

> -	*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);

> -

> -	return *prate / factor;

> -}

> -

> -static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,

> -				unsigned long parent_rate)

> -{

> -	struct clk_bytediv *bytediv = to_clk_bytediv(hw);

> -	u32 val;

> -	unsigned int factor;

> -

> -	factor = get_vco_mul_factor(rate);

> -

> -	val = pll_read(bytediv->reg);

> -	val |= (factor - 1) & 0xff;

> -	pll_write(bytediv->reg, val);

> -

> -	return 0;

> -}

> -

> -/* Our special byte clock divider ops */

> -static const struct clk_ops clk_bytediv_ops = {

> -	.round_rate = clk_bytediv_round_rate,

> -	.set_rate = clk_bytediv_set_rate,

> -	.recalc_rate = clk_bytediv_recalc_rate,

> -};

> -

> -/*

> - * PLL Callbacks

> - */

> -static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct device *dev = &pll_28nm->pdev->dev;

> -	void __iomem *base = pll_28nm->mmio;

> -	bool locked;

> -	unsigned int bit_div, byte_div;

> -	int max_reads = 1000, timeout_us = 100;

> -	u32 val;

> -

> -	DBG("id=%d", pll_28nm->id);

> -

> -	/*

> -	 * before enabling the PLL, configure the bit clock divider since we

> -	 * don't expose it as a clock to the outside world

> -	 * 1: read back the byte clock divider that should already be set

> -	 * 2: divide by 8 to get bit clock divider

> -	 * 3: write it to POSTDIV1

> -	 */

> -	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);

> -	byte_div = val + 1;

> -	bit_div = byte_div / 8;

> -

> -	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);

> -	val &= ~0xf;

> -	val |= (bit_div - 1);

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);

> -

> -	/* enable the PLL */

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,

> -			DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);

> -

> -	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);

> -

> -	if (unlikely(!locked))

> -		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");

> -	else

> -		DBG("DSI PLL lock success");

> -

> -	return locked ? 0 : -EINVAL;

> -}

> -

> -static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -

> -	DBG("id=%d", pll_28nm->id);

> -	pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);

> -}

> -

> -static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> -	void __iomem *base = pll_28nm->mmio;

> -

> -	cached_state->postdiv3 =

> -			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);

> -	cached_state->postdiv2 =

> -			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);

> -	cached_state->postdiv1 =

> -			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);

> -

> -	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);

> -}

> -

> -static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;

> -	void __iomem *base = pll_28nm->mmio;

> -	int ret;

> -

> -	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,

> -					cached_state->vco_rate, 0);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pll_28nm->pdev->dev,

> -			"restore vco rate failed. ret=%d\n", ret);

> -		return ret;

> -	}

> -

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,

> -			cached_state->postdiv3);

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,

> -			cached_state->postdiv2);

> -	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,

> -			cached_state->postdiv1);

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,

> -				struct clk **byte_clk_provider,

> -				struct clk **pixel_clk_provider)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -

> -	if (byte_clk_provider)

> -		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];

> -	if (pixel_clk_provider)

> -		*pixel_clk_provider =

> -				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];

> -

> -	return 0;

> -}

> -

> -static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);

> -

> -	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,

> -					pll_28nm->clks, pll_28nm->num_clks);

> -}

> -

> -static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)

> -{

> -	char *clk_name, *parent_name, *vco_name;

> -	struct clk_init_data vco_init = {

> -		.parent_names = (const char *[]){ "pxo" },

> -		.num_parents = 1,

> -		.flags = CLK_IGNORE_UNUSED,

> -		.ops = &clk_ops_dsi_pll_28nm_vco,

> -	};

> -	struct device *dev = &pll_28nm->pdev->dev;

> -	struct clk **clks = pll_28nm->clks;

> -	struct clk **provided_clks = pll_28nm->provided_clks;

> -	struct clk_bytediv *bytediv;

> -	struct clk_init_data bytediv_init = { };

> -	int ret, num = 0;

> -

> -	DBG("%d", pll_28nm->id);

> -

> -	bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);

> -	if (!bytediv)

> -		return -ENOMEM;

> -

> -	vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);

> -	if (!vco_name)

> -		return -ENOMEM;

> -

> -	parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);

> -	if (!parent_name)

> -		return -ENOMEM;

> -

> -	clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);

> -	if (!clk_name)

> -		return -ENOMEM;

> -

> -	pll_28nm->bytediv = bytediv;

> -

> -	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);

> -	vco_init.name = vco_name;

> -

> -	pll_28nm->base.clk_hw.init = &vco_init;

> -

> -	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);

> -

> -	/* prepare and register bytediv */

> -	bytediv->hw.init = &bytediv_init;

> -	bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;

> -

> -	snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);

> -	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);

> -

> -	bytediv_init.name = clk_name;

> -	bytediv_init.ops = &clk_bytediv_ops;

> -	bytediv_init.flags = CLK_SET_RATE_PARENT;

> -	bytediv_init.parent_names = (const char * const *) &parent_name;

> -	bytediv_init.num_parents = 1;

> -

> -	/* DIV2 */

> -	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =

> -			clk_register(dev, &bytediv->hw);

> -

> -	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);

> -	/* DIV3 */

> -	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =

> -			clk_register_divider(dev, clk_name,

> -				parent_name, 0, pll_28nm->mmio +

> -				REG_DSI_28nm_8960_PHY_PLL_CTRL_10,

> -				0, 8, 0, NULL);

> -

> -	pll_28nm->num_clks = num;

> -

> -	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;

> -	pll_28nm->clk_data.clks = provided_clks;

> -

> -	ret = of_clk_add_provider(dev->of_node,

> -			of_clk_src_onecell_get, &pll_28nm->clk_data);

> -	if (ret) {

> -		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> -		return ret;

> -	}

> -

> -	return 0;

> -}

> -

> -struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device 

> *pdev,

> -					       int id)

> -{

> -	struct dsi_pll_28nm *pll_28nm;

> -	struct msm_dsi_pll *pll;

> -	int ret;

> -

> -	if (!pdev)

> -		return ERR_PTR(-ENODEV);

> -

> -	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);

> -	if (!pll_28nm)

> -		return ERR_PTR(-ENOMEM);

> -

> -	pll_28nm->pdev = pdev;

> -	pll_28nm->id = id + 1;

> -

> -	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> -	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	pll = &pll_28nm->base;

> -	pll->min_rate = VCO_MIN_RATE;

> -	pll->max_rate = VCO_MAX_RATE;

> -	pll->get_provider = dsi_pll_28nm_get_provider;

> -	pll->destroy = dsi_pll_28nm_destroy;

> -	pll->disable_seq = dsi_pll_28nm_disable_seq;

> -	pll->save_state = dsi_pll_28nm_save_state;

> -	pll->restore_state = dsi_pll_28nm_restore_state;

> -

> -	pll->en_seq_cnt = 1;

> -	pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;

> -

> -	ret = pll_28nm_register(pll_28nm);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> -		return ERR_PTR(ret);

> -	}

> -

> -	return pll;

> -}

> diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c

> b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c

> deleted file mode 100644

> index e29b3bfd63d1..000000000000

> --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c

> +++ /dev/null

> @@ -1,913 +0,0 @@

> -/*

> - * SPDX-License-Identifier: GPL-2.0

> - * Copyright (c) 2018, The Linux Foundation

> - */

> -

> -#include <linux/clk.h>

> -#include <linux/clk-provider.h>

> -#include <linux/iopoll.h>

> -

> -#include "dsi_pll.h"

> -#include "dsi.xml.h"

> -

> -/*

> - * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram

> - *

> - *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk

> - *                              |                |

> - *                              |                |

> - *                 +---------+  |  +----------+  |  +----+

> - *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |--

> dsi0_phy_pll_out_byteclk

> - *                 +---------+  |  +----------+  |  +----+

> - *                              |                |

> - *                              |                |

> dsi0_pll_by_2_bit_clk

> - *                              |                |          |

> - *                              |                |  +----+  |  |\

> dsi0_pclk_mux

> - *                              |                |--| /2 |--o--| \   |

> - *                              |                |  +----+     |  \

> |  +---------+

> - *                              |                --------------|

> |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk

> - *                              |------------------------------|  /

>   +---------+

> - *                              |          +-----+             | /

> - *                              -----------| /4? |--o----------|/

> - *                                         +-----+  |           |

> - *                                                  |           

> |dsiclk_sel

> - *                                                  |

> - *                                                  

> dsi0_pll_post_out_div_clk

> - */

> -

> -#define DSI_BYTE_PLL_CLK		0

> -#define DSI_PIXEL_PLL_CLK		1

> -#define NUM_PROVIDED_CLKS		2

> -

> -#define VCO_REF_CLK_RATE		19200000

> -

> -struct dsi_pll_regs {

> -	u32 pll_prop_gain_rate;

> -	u32 pll_lockdet_rate;

> -	u32 decimal_div_start;

> -	u32 frac_div_start_low;

> -	u32 frac_div_start_mid;

> -	u32 frac_div_start_high;

> -	u32 pll_clock_inverters;

> -	u32 ssc_stepsize_low;

> -	u32 ssc_stepsize_high;

> -	u32 ssc_div_per_low;

> -	u32 ssc_div_per_high;

> -	u32 ssc_adjper_low;

> -	u32 ssc_adjper_high;

> -	u32 ssc_control;

> -};

> -

> -struct dsi_pll_config {

> -	u32 ref_freq;

> -	bool div_override;

> -	u32 output_div;

> -	bool ignore_frac;

> -	bool disable_prescaler;

> -	bool enable_ssc;

> -	bool ssc_center;

> -	u32 dec_bits;

> -	u32 frac_bits;

> -	u32 lock_timer;

> -	u32 ssc_freq;

> -	u32 ssc_offset;

> -	u32 ssc_adj_per;

> -	u32 thresh_cycles;

> -	u32 refclk_cycles;

> -};

> -

> -struct pll_7nm_cached_state {

> -	unsigned long vco_rate;

> -	u8 bit_clk_div;

> -	u8 pix_clk_div;

> -	u8 pll_out_div;

> -	u8 pll_mux;

> -};

> -

> -struct dsi_pll_7nm {

> -	struct msm_dsi_pll base;

> -

> -	int id;

> -	struct platform_device *pdev;

> -

> -	void __iomem *phy_cmn_mmio;

> -	void __iomem *mmio;

> -

> -	u64 vco_ref_clk_rate;

> -	u64 vco_current_rate;

> -

> -	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */

> -	spinlock_t postdiv_lock;

> -

> -	int vco_delay;

> -	struct dsi_pll_config pll_configuration;

> -	struct dsi_pll_regs reg_setup;

> -

> -	/* private clocks: */

> -	struct clk_hw *out_div_clk_hw;

> -	struct clk_hw *bit_clk_hw;

> -	struct clk_hw *byte_clk_hw;

> -	struct clk_hw *by_2_bit_clk_hw;

> -	struct clk_hw *post_out_div_clk_hw;

> -	struct clk_hw *pclk_mux_hw;

> -	struct clk_hw *out_dsiclk_hw;

> -

> -	/* clock-provider: */

> -	struct clk_hw_onecell_data *hw_data;

> -

> -	struct pll_7nm_cached_state cached_state;

> -

> -	enum msm_dsi_phy_usecase uc;

> -	struct dsi_pll_7nm *slave;

> -};

> -

> -#define to_pll_7nm(x)	container_of(x, struct dsi_pll_7nm, base)

> -

> -/*

> - * Global list of private DSI PLL struct pointers. We need this for 

> Dual DSI

> - * mode, where the master PLL's clk_ops needs access the slave's 

> private data

> - */

> -static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];

> -

> -static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)

> -{

> -	struct dsi_pll_config *config = &pll->pll_configuration;

> -

> -	config->ref_freq = pll->vco_ref_clk_rate;

> -	config->output_div = 1;

> -	config->dec_bits = 8;

> -	config->frac_bits = 18;

> -	config->lock_timer = 64;

> -	config->ssc_freq = 31500;

> -	config->ssc_offset = 4800;

> -	config->ssc_adj_per = 2;

> -	config->thresh_cycles = 32;

> -	config->refclk_cycles = 256;

> -

> -	config->div_override = false;

> -	config->ignore_frac = false;

> -	config->disable_prescaler = false;

> -

> -	/* TODO: ssc enable */

> -	config->enable_ssc = false;

> -	config->ssc_center = 0;

> -}

> -

> -static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)

> -{

> -	struct dsi_pll_config *config = &pll->pll_configuration;

> -	struct dsi_pll_regs *regs = &pll->reg_setup;

> -	u64 fref = pll->vco_ref_clk_rate;

> -	u64 pll_freq;

> -	u64 divider;

> -	u64 dec, dec_multiple;

> -	u32 frac;

> -	u64 multiplier;

> -

> -	pll_freq = pll->vco_current_rate;

> -

> -	if (config->disable_prescaler)

> -		divider = fref;

> -	else

> -		divider = fref * 2;

> -

> -	multiplier = 1 << config->frac_bits;

> -	dec_multiple = div_u64(pll_freq * multiplier, divider);

> -	div_u64_rem(dec_multiple, multiplier, &frac);

> -

> -	dec = div_u64(dec_multiple, multiplier);

> -

> -	if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)

> -		regs->pll_clock_inverters = 0x28;

> -	else if (pll_freq <= 1000000000ULL)

> -		regs->pll_clock_inverters = 0xa0;

> -	else if (pll_freq <= 2500000000ULL)

> -		regs->pll_clock_inverters = 0x20;

> -	else if (pll_freq <= 3020000000ULL)

> -		regs->pll_clock_inverters = 0x00;

> -	else

> -		regs->pll_clock_inverters = 0x40;

> -

> -	regs->pll_lockdet_rate = config->lock_timer;

> -	regs->decimal_div_start = dec;

> -	regs->frac_div_start_low = (frac & 0xff);

> -	regs->frac_div_start_mid = (frac & 0xff00) >> 8;

> -	regs->frac_div_start_high = (frac & 0x30000) >> 16;

> -}

> -

> -#define SSC_CENTER		BIT(0)

> -#define SSC_EN			BIT(1)

> -

> -static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)

> -{

> -	struct dsi_pll_config *config = &pll->pll_configuration;

> -	struct dsi_pll_regs *regs = &pll->reg_setup;

> -	u32 ssc_per;

> -	u32 ssc_mod;

> -	u64 ssc_step_size;

> -	u64 frac;

> -

> -	if (!config->enable_ssc) {

> -		DBG("SSC not enabled\n");

> -		return;

> -	}

> -

> -	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 

> 1;

> -	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);

> -	ssc_per -= ssc_mod;

> -

> -	frac = regs->frac_div_start_low |

> -			(regs->frac_div_start_mid << 8) |

> -			(regs->frac_div_start_high << 16);

> -	ssc_step_size = regs->decimal_div_start;

> -	ssc_step_size *= (1 << config->frac_bits);

> -	ssc_step_size += frac;

> -	ssc_step_size *= config->ssc_offset;

> -	ssc_step_size *= (config->ssc_adj_per + 1);

> -	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));

> -	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);

> -

> -	regs->ssc_div_per_low = ssc_per & 0xFF;

> -	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;

> -	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);

> -	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);

> -	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;

> -	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;

> -

> -	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;

> -

> -	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",

> -		 regs->decimal_div_start, frac, config->frac_bits);

> -	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",

> -		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);

> -}

> -

> -static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)

> -{

> -	void __iomem *base = pll->mmio;

> -	struct dsi_pll_regs *regs = &pll->reg_setup;

> -

> -	if (pll->pll_configuration.enable_ssc) {

> -		pr_debug("SSC is enabled\n");

> -

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,

> -			  regs->ssc_stepsize_low);

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,

> -			  regs->ssc_stepsize_high);

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,

> -			  regs->ssc_div_per_low);

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,

> -			  regs->ssc_div_per_high);

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,

> -			  regs->ssc_adjper_low);

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,

> -			  regs->ssc_adjper_high);

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,

> -			  SSC_EN | regs->ssc_control);

> -	}

> -}

> -

> -static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)

> -{

> -	void __iomem *base = pll->mmio;

> -	u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;

> -

> -	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {

> -		if (pll->vco_current_rate >= 3100000000ULL)

> -			analog_controls_five_1 = 0x03;

> -

> -		if (pll->vco_current_rate < 1520000000ULL)

> -			vco_config_1 = 0x08;

> -		else if (pll->vco_current_rate < 2990000000ULL)

> -			vco_config_1 = 0x01;

> -	}

> -

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,

> -		  analog_controls_five_1);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 

> 0xba);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 

> 0x84);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 

> 0x82);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 

> 0x4c);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,

> -		  pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);

> -

> -	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {

> -		pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);

> -		if (pll->slave)

> -			pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 

> 0x22);

> -	}

> -}

> -

> -static void dsi_pll_commit(struct dsi_pll_7nm *pll)

> -{

> -	void __iomem *base = pll->mmio;

> -	struct dsi_pll_regs *reg = &pll->reg_setup;

> -

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,

> reg->decimal_div_start);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,

> reg->frac_div_start_low);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,

> reg->frac_div_start_mid);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,

> reg->frac_div_start_high);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1,

> reg->pll_lockdet_rate);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 

> for CPHY */

> -	pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,

> reg->pll_clock_inverters);

> -}

> -

> -static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long 

> rate,

> -				     unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -

> -	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,

> -	    parent_rate);

> -

> -	pll_7nm->vco_current_rate = rate;

> -	pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;

> -

> -	dsi_pll_setup_config(pll_7nm);

> -

> -	dsi_pll_calc_dec_frac(pll_7nm);

> -

> -	dsi_pll_calc_ssc(pll_7nm);

> -

> -	dsi_pll_commit(pll_7nm);

> -

> -	dsi_pll_config_hzindep_reg(pll_7nm);

> -

> -	dsi_pll_ssc_commit(pll_7nm);

> -

> -	/* flush, ensure all register writes are done*/

> -	wmb();

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)

> -{

> -	int rc;

> -	u32 status = 0;

> -	u32 const delay_us = 100;

> -	u32 const timeout_us = 5000;

> -

> -	rc = readl_poll_timeout_atomic(pll->mmio +

> -				       REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,

> -				       status,

> -				       ((status & BIT(0)) > 0),

> -				       delay_us,

> -				       timeout_us);

> -	if (rc)

> -		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",

> -		       pll->id, status);

> -

> -	return rc;

> -}

> -

> -static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)

> -{

> -	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);

> -

> -	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & 

> ~BIT(5));

> -	ndelay(250);

> -}

> -

> -static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)

> -{

> -	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);

> -

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | 

> BIT(5));

> -	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);

> -	ndelay(250);

> -}

> -

> -static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)

> -{

> -	u32 data;

> -

> -	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & 

> ~BIT(5));

> -}

> -

> -static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)

> -{

> -	u32 data;

> -

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);

> -

> -	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,

> -		  data | BIT(5) | BIT(4));

> -}

> -

> -static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)

> -{

> -	/*

> -	 * Reset the PHY digital domain. This would be needed when

> -	 * coming out of a CX or analog rail power collapse while

> -	 * ensuring that the pads maintain LP00 or LP11 state

> -	 */

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 

> BIT(0));

> -	wmb(); /* Ensure that the reset is deasserted */

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 

> 0x0);

> -	wmb(); /* Ensure that the reset is deasserted */

> -}

> -

> -static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -	int rc;

> -

> -	dsi_pll_enable_pll_bias(pll_7nm);

> -	if (pll_7nm->slave)

> -		dsi_pll_enable_pll_bias(pll_7nm->slave);

> -

> -	/* Start PLL */

> -	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 

> 0x01);

> -

> -	/*

> -	 * ensure all PLL configurations are written prior to checking

> -	 * for PLL lock.

> -	 */

> -	wmb();

> -

> -	/* Check for PLL lock */

> -	rc = dsi_pll_7nm_lock_status(pll_7nm);

> -	if (rc) {

> -		pr_err("PLL(%d) lock failed\n", pll_7nm->id);

> -		goto error;

> -	}

> -

> -	pll->pll_on = true;

> -

> -	/*

> -	 * assert power on reset for PHY digital in case the PLL is

> -	 * enabled after CX of analog domain power collapse. This needs

> -	 * to be done before enabling the global clk.

> -	 */

> -	dsi_pll_phy_dig_reset(pll_7nm);

> -	if (pll_7nm->slave)

> -		dsi_pll_phy_dig_reset(pll_7nm->slave);

> -

> -	dsi_pll_enable_global_clk(pll_7nm);

> -	if (pll_7nm->slave)

> -		dsi_pll_enable_global_clk(pll_7nm->slave);

> -

> -error:

> -	return rc;

> -}

> -

> -static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)

> -{

> -	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);

> -	dsi_pll_disable_pll_bias(pll);

> -}

> -

> -static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -

> -	/*

> -	 * To avoid any stray glitches while abruptly powering down the PLL

> -	 * make sure to gate the clock using the clock enable bit before

> -	 * powering down the PLL

> -	 */

> -	dsi_pll_disable_global_clk(pll_7nm);

> -	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);

> -	dsi_pll_disable_sub(pll_7nm);

> -	if (pll_7nm->slave) {

> -		dsi_pll_disable_global_clk(pll_7nm->slave);

> -		dsi_pll_disable_sub(pll_7nm->slave);

> -	}

> -	/* flush, ensure all register writes are done */

> -	wmb();

> -	pll->pll_on = false;

> -}

> -

> -static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,

> -						  unsigned long parent_rate)

> -{

> -	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -	struct dsi_pll_config *config = &pll_7nm->pll_configuration;

> -	void __iomem *base = pll_7nm->mmio;

> -	u64 ref_clk = pll_7nm->vco_ref_clk_rate;

> -	u64 vco_rate = 0x0;

> -	u64 multiplier;

> -	u32 frac;

> -	u32 dec;

> -	u64 pll_freq, tmp64;

> -

> -	dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);

> -	dec &= 0xff;

> -

> -	frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);

> -	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &

> -		  0xff) << 8);

> -	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) 

> &

> -		  0x3) << 16);

> -

> -	/*

> -	 * TODO:

> -	 *	1. Assumes prescaler is disabled

> -	 */

> -	multiplier = 1 << config->frac_bits;

> -	pll_freq = dec * (ref_clk * 2);

> -	tmp64 = (ref_clk * 2 * frac);

> -	pll_freq += div_u64(tmp64, multiplier);

> -

> -	vco_rate = pll_freq;

> -

> -	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",

> -	    pll_7nm->id, (unsigned long)vco_rate, dec, frac);

> -

> -	return (unsigned long)vco_rate;

> -}

> -

> -static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {

> -	.round_rate = msm_dsi_pll_helper_clk_round_rate,

> -	.set_rate = dsi_pll_7nm_vco_set_rate,

> -	.recalc_rate = dsi_pll_7nm_vco_recalc_rate,

> -	.prepare = dsi_pll_7nm_vco_prepare,

> -	.unprepare = dsi_pll_7nm_vco_unprepare,

> -};

> -

> -/*

> - * PLL Callbacks

> - */

> -

> -static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;

> -	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;

> -	u32 cmn_clk_cfg0, cmn_clk_cfg1;

> -

> -	cached->pll_out_div = pll_read(pll_7nm->mmio +

> -				       REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);

> -	cached->pll_out_div &= 0x3;

> -

> -	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);

> -	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;

> -	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;

> -

> -	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> -	cached->pll_mux = cmn_clk_cfg1 & 0x3;

> -

> -	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",

> -	    pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,

> -	    cached->pix_clk_div, cached->pll_mux);

> -}

> -

> -static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;

> -	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;

> -	u32 val;

> -	int ret;

> -

> -	val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);

> -	val &= ~0x3;

> -	val |= cached->pll_out_div;

> -	pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);

> -

> -	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,

> -		  cached->bit_clk_div | (cached->pix_clk_div << 4));

> -

> -	val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);

> -	val &= ~0x3;

> -	val |= cached->pll_mux;

> -	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);

> -

> -	ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw,

> pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pll_7nm->pdev->dev,

> -			"restore vco rate failed. ret=%d\n", ret);

> -		return ret;

> -	}

> -

> -	DBG("DSI PLL%d", pll_7nm->id);

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,

> -				    enum msm_dsi_phy_usecase uc)

> -{

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -	void __iomem *base = pll_7nm->phy_cmn_mmio;

> -	u32 data = 0x0;	/* internal PLL */

> -

> -	DBG("DSI PLL%d", pll_7nm->id);

> -

> -	switch (uc) {

> -	case MSM_DSI_PHY_STANDALONE:

> -		break;

> -	case MSM_DSI_PHY_MASTER:

> -		pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];

> -		break;

> -	case MSM_DSI_PHY_SLAVE:

> -		data = 0x1; /* external PLL */

> -		break;

> -	default:

> -		return -EINVAL;

> -	}

> -

> -	/* set PLL src */

> -	pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));

> -

> -	pll_7nm->uc = uc;

> -

> -	return 0;

> -}

> -

> -static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,

> -				     struct clk **byte_clk_provider,

> -				     struct clk **pixel_clk_provider)

> -{

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -	struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;

> -

> -	DBG("DSI PLL%d", pll_7nm->id);

> -

> -	if (byte_clk_provider)

> -		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;

> -	if (pixel_clk_provider)

> -		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;

> -

> -	return 0;

> -}

> -

> -static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)

> -{

> -	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);

> -	struct device *dev = &pll_7nm->pdev->dev;

> -

> -	DBG("DSI PLL%d", pll_7nm->id);

> -	of_clk_del_provider(dev->of_node);

> -

> -	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);

> -	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);

> -	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);

> -	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);

> -	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);

> -	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);

> -	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);

> -	clk_hw_unregister(&pll_7nm->base.clk_hw);

> -}

> -

> -/*

> - * The post dividers and mux clocks are created using the standard 

> divider and

> - * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its 

> dividers/mux

> - * state to follow the master PLL's divider/mux state. Therefore, we 

> don't

> - * require special clock ops that also configure the slave PLL 

> registers

> - */

> -static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)

> -{

> -	char clk_name[32], parent[32], vco_name[32];

> -	char parent2[32], parent3[32], parent4[32];

> -	struct clk_init_data vco_init = {

> -		.parent_names = (const char *[]){ "bi_tcxo" },

> -		.num_parents = 1,

> -		.name = vco_name,

> -		.flags = CLK_IGNORE_UNUSED,

> -		.ops = &clk_ops_dsi_pll_7nm_vco,

> -	};

> -	struct device *dev = &pll_7nm->pdev->dev;

> -	struct clk_hw_onecell_data *hw_data;

> -	struct clk_hw *hw;

> -	int ret;

> -

> -	DBG("DSI%d", pll_7nm->id);

> -

> -	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +

> -			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),

> -			       GFP_KERNEL);

> -	if (!hw_data)

> -		return -ENOMEM;

> -

> -	snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);

> -	pll_7nm->base.clk_hw.init = &vco_init;

> -

> -	ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);

> -	if (ret)

> -		return ret;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> -	snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);

> -

> -	hw = clk_hw_register_divider(dev, clk_name,

> -				     parent, CLK_SET_RATE_PARENT,

> -				     pll_7nm->mmio +

> -				     REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,

> -				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_base_clk_hw;

> -	}

> -

> -	pll_7nm->out_div_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> -

> -	/* BIT CLK: DIV_CTRL_3_0 */

> -	hw = clk_hw_register_divider(dev, clk_name, parent,

> -				     CLK_SET_RATE_PARENT,

> -				     pll_7nm->phy_cmn_mmio +

> -				     REG_DSI_7nm_PHY_CMN_CLK_CFG0,

> -				     0, 4, CLK_DIVIDER_ONE_BASED,

> -				     &pll_7nm->postdiv_lock);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_out_div_clk_hw;

> -	}

> -

> -	pll_7nm->bit_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> -

> -	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> -					  CLK_SET_RATE_PARENT, 1, 8);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_bit_clk_hw;

> -	}

> -

> -	pll_7nm->byte_clk_hw = hw;

> -	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> -

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> -					  0, 1, 2);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_byte_clk_hw;

> -	}

> -

> -	pll_7nm->by_2_bit_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> -

> -	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,

> -					  0, 1, 4);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_by_2_bit_clk_hw;

> -	}

> -

> -	pll_7nm->post_out_div_clk_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);

> -	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);

> -	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);

> -	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);

> -	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);

> -

> -	hw = clk_hw_register_mux(dev, clk_name,

> -				 ((const char *[]){

> -				 parent, parent2, parent3, parent4

> -				 }), 4, 0, pll_7nm->phy_cmn_mmio +

> -				 REG_DSI_7nm_PHY_CMN_CLK_CFG1,

> -				 0, 2, 0, NULL);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_post_out_div_clk_hw;

> -	}

> -

> -	pll_7nm->pclk_mux_hw = hw;

> -

> -	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);

> -	snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);

> -

> -	/* PIX CLK DIV : DIV_CTRL_7_4*/

> -	hw = clk_hw_register_divider(dev, clk_name, parent,

> -				     0, pll_7nm->phy_cmn_mmio +

> -					REG_DSI_7nm_PHY_CMN_CLK_CFG0,

> -				     4, 4, CLK_DIVIDER_ONE_BASED,

> -				     &pll_7nm->postdiv_lock);

> -	if (IS_ERR(hw)) {

> -		ret = PTR_ERR(hw);

> -		goto err_pclk_mux_hw;

> -	}

> -

> -	pll_7nm->out_dsiclk_hw = hw;

> -	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;

> -

> -	hw_data->num = NUM_PROVIDED_CLKS;

> -	pll_7nm->hw_data = hw_data;

> -

> -	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,

> -				     pll_7nm->hw_data);

> -	if (ret) {

> -		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);

> -		goto err_dsiclk_hw;

> -	}

> -

> -	return 0;

> -

> -err_dsiclk_hw:

> -	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);

> -err_pclk_mux_hw:

> -	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);

> -err_post_out_div_clk_hw:

> -	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);

> -err_by_2_bit_clk_hw:

> -	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);

> -err_byte_clk_hw:

> -	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);

> -err_bit_clk_hw:

> -	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);

> -err_out_div_clk_hw:

> -	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);

> -err_base_clk_hw:

> -	clk_hw_unregister(&pll_7nm->base.clk_hw);

> -

> -	return ret;

> -}

> -

> -struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,

> -					enum msm_dsi_phy_type type, int id)

> -{

> -	struct dsi_pll_7nm *pll_7nm;

> -	struct msm_dsi_pll *pll;

> -	int ret;

> -

> -	pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);

> -	if (!pll_7nm)

> -		return ERR_PTR(-ENOMEM);

> -

> -	DBG("DSI PLL%d", id);

> -

> -	pll_7nm->pdev = pdev;

> -	pll_7nm->id = id;

> -	pll_7nm_list[id] = pll_7nm;

> -

> -	pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");

> -	if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");

> -	if (IS_ERR_OR_NULL(pll_7nm->mmio)) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");

> -		return ERR_PTR(-ENOMEM);

> -	}

> -

> -	spin_lock_init(&pll_7nm->postdiv_lock);

> -

> -	pll = &pll_7nm->base;

> -	pll->min_rate = 1000000000UL;

> -	pll->max_rate = 3500000000UL;

> -	if (type == MSM_DSI_PHY_7NM_V4_1) {

> -		pll->min_rate = 600000000UL;

> -		pll->max_rate = (unsigned long)5000000000ULL;

> -		/* workaround for max rate overflowing on 32-bit builds: */

> -		pll->max_rate = max(pll->max_rate, 0xffffffffUL);

> -	}

> -	pll->get_provider = dsi_pll_7nm_get_provider;

> -	pll->destroy = dsi_pll_7nm_destroy;

> -	pll->save_state = dsi_pll_7nm_save_state;

> -	pll->restore_state = dsi_pll_7nm_restore_state;

> -	pll->set_usecase = dsi_pll_7nm_set_usecase;

> -

> -	pll_7nm->vco_delay = 1;

> -

> -	ret = pll_7nm_register(pll_7nm);

> -	if (ret) {

> -		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);

> -		return ERR_PTR(ret);

> -	}

> -

> -	/* TODO: Remove this when we have proper display handover support */

> -	msm_dsi_pll_save_state(pll);

> -

> -	return pll;

> -}

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 3cc906121fb3..1be6996b80b7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -136,13 +136,6 @@  msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
 msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
 msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
 
-ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
-msm-y += dsi/pll/dsi_pll.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
-msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
-msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
-msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o
-endif
+msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/phy/dsi_pll.o
 
 obj-$(CONFIG_DRM_MSM)	+= msm.o
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 655fa17a0452..5da369b5c475 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -3,11 +3,884 @@ 
  * Copyright (c) 2018, The Linux Foundation
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/iopoll.h>
 
+#include "dsi_pll.h"
 #include "dsi_phy.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
+ *                              |                |
+ *                              |                |
+ *                 +---------+  |  +----------+  |  +----+
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ *                 +---------+  |  +----------+  |  +----+
+ *                              |                |
+ *                              |                |         dsi0_pll_by_2_bit_clk
+ *                              |                |          |
+ *                              |                |  +----+  |  |\  dsi0_pclk_mux
+ *                              |                |--| /2 |--o--| \   |
+ *                              |                |  +----+     |  \  |  +---------+
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ *                              |------------------------------|  /     +---------+
+ *                              |          +-----+             | /
+ *                              -----------| /4? |--o----------|/
+ *                                         +-----+  |           |
+ *                                                  |           |dsiclk_sel
+ *                                                  |
+ *                                                  dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+
+struct dsi_pll_regs {
+	u32 pll_prop_gain_rate;
+	u32 pll_lockdet_rate;
+	u32 decimal_div_start;
+	u32 frac_div_start_low;
+	u32 frac_div_start_mid;
+	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
+	u32 ssc_stepsize_low;
+	u32 ssc_stepsize_high;
+	u32 ssc_div_per_low;
+	u32 ssc_div_per_high;
+	u32 ssc_adjper_low;
+	u32 ssc_adjper_high;
+	u32 ssc_control;
+};
+
+struct dsi_pll_config {
+	u32 ref_freq;
+	bool div_override;
+	u32 output_div;
+	bool ignore_frac;
+	bool disable_prescaler;
+	bool enable_ssc;
+	bool ssc_center;
+	u32 dec_bits;
+	u32 frac_bits;
+	u32 lock_timer;
+	u32 ssc_freq;
+	u32 ssc_offset;
+	u32 ssc_adj_per;
+	u32 thresh_cycles;
+	u32 refclk_cycles;
+};
+
+struct pll_10nm_cached_state {
+	unsigned long vco_rate;
+	u8 bit_clk_div;
+	u8 pix_clk_div;
+	u8 pll_out_div;
+	u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+
+	void __iomem *phy_cmn_mmio;
+	void __iomem *mmio;
+
+	u64 vco_ref_clk_rate;
+	u64 vco_current_rate;
+
+	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+	spinlock_t postdiv_lock;
+
+	int vco_delay;
+	struct dsi_pll_config pll_configuration;
+	struct dsi_pll_regs reg_setup;
+
+	/* private clocks: */
+	struct clk_hw *out_div_clk_hw;
+	struct clk_hw *bit_clk_hw;
+	struct clk_hw *byte_clk_hw;
+	struct clk_hw *by_2_bit_clk_hw;
+	struct clk_hw *post_out_div_clk_hw;
+	struct clk_hw *pclk_mux_hw;
+	struct clk_hw *out_dsiclk_hw;
+
+	/* clock-provider: */
+	struct clk_hw_onecell_data *hw_data;
+
+	struct pll_10nm_cached_state cached_state;
+
+	enum msm_dsi_phy_usecase uc;
+	struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+
+	config->ref_freq = pll->vco_ref_clk_rate;
+	config->output_div = 1;
+	config->dec_bits = 8;
+	config->frac_bits = 18;
+	config->lock_timer = 64;
+	config->ssc_freq = 31500;
+	config->ssc_offset = 5000;
+	config->ssc_adj_per = 2;
+	config->thresh_cycles = 32;
+	config->refclk_cycles = 256;
+
+	config->div_override = false;
+	config->ignore_frac = false;
+	config->disable_prescaler = false;
+
+	config->enable_ssc = false;
+	config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u64 fref = pll->vco_ref_clk_rate;
+	u64 pll_freq;
+	u64 divider;
+	u64 dec, dec_multiple;
+	u32 frac;
+	u64 multiplier;
+
+	pll_freq = pll->vco_current_rate;
+
+	if (config->disable_prescaler)
+		divider = fref;
+	else
+		divider = fref * 2;
+
+	multiplier = 1 << config->frac_bits;
+	dec_multiple = div_u64(pll_freq * multiplier, divider);
+	dec = div_u64_rem(dec_multiple, multiplier, &frac);
+
+	if (pll_freq <= 1900000000UL)
+		regs->pll_prop_gain_rate = 8;
+	else if (pll_freq <= 3000000000UL)
+		regs->pll_prop_gain_rate = 10;
+	else
+		regs->pll_prop_gain_rate = 12;
+	if (pll_freq < 1100000000UL)
+		regs->pll_clock_inverters = 8;
+	else
+		regs->pll_clock_inverters = 0;
+
+	regs->pll_lockdet_rate = config->lock_timer;
+	regs->decimal_div_start = dec;
+	regs->frac_div_start_low = (frac & 0xff);
+	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER		BIT(0)
+#define SSC_EN			BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u32 ssc_per;
+	u32 ssc_mod;
+	u64 ssc_step_size;
+	u64 frac;
+
+	if (!config->enable_ssc) {
+		DBG("SSC not enabled\n");
+		return;
+	}
+
+	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+	ssc_per -= ssc_mod;
+
+	frac = regs->frac_div_start_low |
+			(regs->frac_div_start_mid << 8) |
+			(regs->frac_div_start_high << 16);
+	ssc_step_size = regs->decimal_div_start;
+	ssc_step_size *= (1 << config->frac_bits);
+	ssc_step_size += frac;
+	ssc_step_size *= config->ssc_offset;
+	ssc_step_size *= (config->ssc_adj_per + 1);
+	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+	regs->ssc_div_per_low = ssc_per & 0xFF;
+	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+		 regs->decimal_div_start, frac, config->frac_bits);
+	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	if (pll->pll_configuration.enable_ssc) {
+		pr_debug("SSC is enabled\n");
+
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+			  regs->ssc_stepsize_low);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+			  regs->ssc_stepsize_high);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+			  regs->ssc_div_per_low);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+			  regs->ssc_div_per_high);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+			  regs->ssc_adjper_low);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+			  regs->ssc_adjper_high);
+		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+			  SSC_EN | regs->ssc_control);
+	}
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+	void __iomem *base = pll->mmio;
+
+	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+		  0xba);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+		  0x4c);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+		  reg->decimal_div_start);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+		  reg->frac_div_start_low);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+		  reg->frac_div_start_mid);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+		  reg->frac_div_start_high);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
+		  reg->pll_lockdet_rate);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+	pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+		  reg->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
+	    parent_rate);
+
+	pll_10nm->vco_current_rate = rate;
+	pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+	dsi_pll_setup_config(pll_10nm);
+
+	dsi_pll_calc_dec_frac(pll_10nm);
+
+	dsi_pll_calc_ssc(pll_10nm);
+
+	dsi_pll_commit(pll_10nm);
+
+	dsi_pll_config_hzindep_reg(pll_10nm);
+
+	dsi_pll_ssc_commit(pll_10nm);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+	struct device *dev = &pll->pdev->dev;
+	int rc;
+	u32 status = 0;
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+
+	rc = readl_poll_timeout_atomic(pll->mmio +
+				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+				       status,
+				       ((status & BIT(0)) > 0),
+				       delay_us,
+				       timeout_us);
+	if (rc)
+		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
+			      pll->id, status);
+
+	return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+		  data & ~BIT(5));
+	ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+		  data | BIT(5));
+	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+	ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+	u32 data;
+
+	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+		  data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+	u32 data;
+
+	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+		  data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct device *dev = &pll_10nm->pdev->dev;
+	int rc;
+
+	dsi_pll_enable_pll_bias(pll_10nm);
+	if (pll_10nm->slave)
+		dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+	if (rc) {
+		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Start PLL */
+	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+		  0x01);
+
+	/*
+	 * ensure all PLL configurations are written prior to checking
+	 * for PLL lock.
+	 */
+	wmb();
+
+	/* Check for PLL lock */
+	rc = dsi_pll_10nm_lock_status(pll_10nm);
+	if (rc) {
+		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
+		goto error;
+	}
+
+	pll->pll_on = true;
+
+	dsi_pll_enable_global_clk(pll_10nm);
+	if (pll_10nm->slave)
+		dsi_pll_enable_global_clk(pll_10nm->slave);
+
+	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+		  0x01);
+	if (pll_10nm->slave)
+		pll_write(pll_10nm->slave->phy_cmn_mmio +
+			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+	return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+	dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+	/*
+	 * To avoid any stray glitches while abruptly powering down the PLL
+	 * make sure to gate the clock using the clock enable bit before
+	 * powering down the PLL
+	 */
+	dsi_pll_disable_global_clk(pll_10nm);
+	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+	dsi_pll_disable_sub(pll_10nm);
+	if (pll_10nm->slave) {
+		dsi_pll_disable_global_clk(pll_10nm->slave);
+		dsi_pll_disable_sub(pll_10nm->slave);
+	}
+	/* flush, ensure all register writes are done */
+	wmb();
+	pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct dsi_pll_config *config = &pll_10nm->pll_configuration;
+	void __iomem *base = pll_10nm->mmio;
+	u64 ref_clk = pll_10nm->vco_ref_clk_rate;
+	u64 vco_rate = 0x0;
+	u64 multiplier;
+	u32 frac;
+	u32 dec;
+	u64 pll_freq, tmp64;
+
+	dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+	dec &= 0xff;
+
+	frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+		  0xff) << 8);
+	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+		  0x3) << 16);
+
+	/*
+	 * TODO:
+	 *	1. Assumes prescaler is disabled
+	 */
+	multiplier = 1 << config->frac_bits;
+	pll_freq = dec * (ref_clk * 2);
+	tmp64 = (ref_clk * 2 * frac);
+	pll_freq += div_u64(tmp64, multiplier);
+
+	vco_rate = pll_freq;
+
+	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+	    pll_10nm->id, (unsigned long)vco_rate, dec, frac);
+
+	return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_10nm_vco_set_rate,
+	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+	.prepare = dsi_pll_10nm_vco_prepare,
+	.unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+	u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+	cached->pll_out_div = pll_read(pll_10nm->mmio +
+				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+	cached->pll_out_div &= 0x3;
+
+	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+	    pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
+	    cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+	u32 val;
+	int ret;
+
+	val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+	val &= ~0x3;
+	val |= cached->pll_out_div;
+	pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+		  cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+	val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+	val &= ~0x3;
+	val |= cached->pll_mux;
+	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+	ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_10nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	DBG("DSI PLL%d", pll_10nm->id);
+
+	return 0;
+}
+
+static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
+				    enum msm_dsi_phy_usecase uc)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	void __iomem *base = pll_10nm->phy_cmn_mmio;
+	u32 data = 0x0;	/* internal PLL */
+
+	DBG("DSI PLL%d", pll_10nm->id);
+
+	switch (uc) {
+	case MSM_DSI_PHY_STANDALONE:
+		break;
+	case MSM_DSI_PHY_MASTER:
+		pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
+		break;
+	case MSM_DSI_PHY_SLAVE:
+		data = 0x1; /* external PLL */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* set PLL src */
+	pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+	pll_10nm->uc = uc;
+
+	return 0;
+}
+
+static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
+				     struct clk **byte_clk_provider,
+				     struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
+
+	DBG("DSI PLL%d", pll_10nm->id);
+
+	if (byte_clk_provider)
+		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+	if (pixel_clk_provider)
+		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+	return 0;
+}
+
+static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+	struct device *dev = &pll_10nm->pdev->dev;
+
+	DBG("DSI PLL%d", pll_10nm->id);
+	of_clk_del_provider(dev->of_node);
+
+	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
+	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
+	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
+	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
+	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
+	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
+	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
+	clk_hw_unregister(&pll_10nm->base.clk_hw);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
+{
+	char clk_name[32], parent[32], vco_name[32];
+	char parent2[32], parent3[32], parent4[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_10nm_vco,
+	};
+	struct device *dev = &pll_10nm->pdev->dev;
+	struct clk_hw_onecell_data *hw_data;
+	struct clk_hw *hw;
+	int ret;
+
+	DBG("DSI%d", pll_10nm->id);
+
+	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+			       GFP_KERNEL);
+	if (!hw_data)
+		return -ENOMEM;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
+	pll_10nm->base.clk_hw.init = &vco_init;
+
+	ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
+	if (ret)
+		return ret;
+
+	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
+
+	hw = clk_hw_register_divider(dev, clk_name,
+				     parent, CLK_SET_RATE_PARENT,
+				     pll_10nm->mmio +
+				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_base_clk_hw;
+	}
+
+	pll_10nm->out_div_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+	/* BIT CLK: DIV_CTRL_3_0 */
+	hw = clk_hw_register_divider(dev, clk_name, parent,
+				     CLK_SET_RATE_PARENT,
+				     pll_10nm->phy_cmn_mmio +
+				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+				     0, 4, CLK_DIVIDER_ONE_BASED,
+				     &pll_10nm->postdiv_lock);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_out_div_clk_hw;
+	}
+
+	pll_10nm->bit_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  CLK_SET_RATE_PARENT, 1, 8);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_bit_clk_hw;
+	}
+
+	pll_10nm->byte_clk_hw = hw;
+	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  0, 1, 2);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_byte_clk_hw;
+	}
+
+	pll_10nm->by_2_bit_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  0, 1, 4);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_by_2_bit_clk_hw;
+	}
+
+	pll_10nm->post_out_div_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+
+	hw = clk_hw_register_mux(dev, clk_name,
+				 ((const char *[]){
+				 parent, parent2, parent3, parent4
+				 }), 4, 0, pll_10nm->phy_cmn_mmio +
+				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+				 0, 2, 0, NULL);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_post_out_div_clk_hw;
+	}
+
+	pll_10nm->pclk_mux_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
+	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
+
+	/* PIX CLK DIV : DIV_CTRL_7_4*/
+	hw = clk_hw_register_divider(dev, clk_name, parent,
+				     0, pll_10nm->phy_cmn_mmio +
+					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+				     4, 4, CLK_DIVIDER_ONE_BASED,
+				     &pll_10nm->postdiv_lock);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_pclk_mux_hw;
+	}
+
+	pll_10nm->out_dsiclk_hw = hw;
+	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+	hw_data->num = NUM_PROVIDED_CLKS;
+	pll_10nm->hw_data = hw_data;
+
+	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+				     pll_10nm->hw_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		goto err_dsiclk_hw;
+	}
+
+	return 0;
+
+err_dsiclk_hw:
+	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
+err_pclk_mux_hw:
+	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
+err_post_out_div_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
+err_by_2_bit_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
+err_byte_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
+err_bit_clk_hw:
+	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
+err_out_div_clk_hw:
+	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
+err_base_clk_hw:
+	clk_hw_unregister(&pll_10nm->base.clk_hw);
+
+	return ret;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+	struct dsi_pll_10nm *pll_10nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+	if (!pll_10nm)
+		return ERR_PTR(-ENOMEM);
+
+	DBG("DSI PLL%d", id);
+
+	pll_10nm->pdev = pdev;
+	pll_10nm->id = id;
+	pll_10nm_list[id] = pll_10nm;
+
+	pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&pll_10nm->postdiv_lock);
+
+	pll = &pll_10nm->base;
+	pll->min_rate = 1000000000UL;
+	pll->max_rate = 3500000000UL;
+	pll->get_provider = dsi_pll_10nm_get_provider;
+	pll->destroy = dsi_pll_10nm_destroy;
+	pll->save_state = dsi_pll_10nm_save_state;
+	pll->restore_state = dsi_pll_10nm_restore_state;
+	pll->set_usecase = dsi_pll_10nm_set_usecase;
+
+	pll_10nm->vco_delay = 1;
+
+	ret = pll_10nm_register(pll_10nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	/* TODO: Remove this when we have proper display handover support */
+	msm_dsi_pll_save_state(pll);
+
+	return pll;
+}
+
 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
 {
 	void __iomem *base = phy->base;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 6989730b5fbd..6a63901da7a4 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -3,13 +3,1102 @@ 
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 
 #include "dsi_phy.h"
+#include "dsi_pll.h"
 #include "dsi.xml.h"
 
 #define PHY_14NM_CKLN_IDX	4
 
+/*
+ * DSI PLL 14nm - clock diagram (eg: DSI0):
+ *
+ *         dsi0n1_postdiv_clk
+ *                         |
+ *                         |
+ *                 +----+  |  +----+
+ *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
+ *                 +----+  |  +----+
+ *                         |           dsi0n1_postdivby2_clk
+ *                         |   +----+  |
+ *                         o---| /2 |--o--|\
+ *                         |   +----+     | \   +----+
+ *                         |              |  |--| n2 |-- dsi0pll
+ *                         o--------------| /   +----+
+ *                                        |/
+ */
+
+#define POLL_MAX_READS			15
+#define POLL_TIMEOUT_US			1000
+
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+#define VCO_MIN_RATE			1300000000UL
+#define VCO_MAX_RATE			2600000000UL
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+
+#define DSI_PLL_DEFAULT_VCO_POSTDIV	1
+
+struct dsi_pll_input {
+	u32 fref;	/* reference clk */
+	u32 fdata;	/* bit clock rate */
+	u32 dsiclk_sel; /* Mux configuration (see diagram) */
+	u32 ssc_en;	/* SSC enable/disable */
+	u32 ldo_en;
+
+	/* fixed params */
+	u32 refclk_dbler_en;
+	u32 vco_measure_time;
+	u32 kvco_measure_time;
+	u32 bandgap_timer;
+	u32 pll_wakeup_timer;
+	u32 plllock_cnt;
+	u32 plllock_rng;
+	u32 ssc_center;
+	u32 ssc_adj_period;
+	u32 ssc_spread;
+	u32 ssc_freq;
+	u32 pll_ie_trim;
+	u32 pll_ip_trim;
+	u32 pll_iptat_trim;
+	u32 pll_cpcset_cur;
+	u32 pll_cpmset_cur;
+
+	u32 pll_icpmset;
+	u32 pll_icpcset;
+
+	u32 pll_icpmset_p;
+	u32 pll_icpmset_m;
+
+	u32 pll_icpcset_p;
+	u32 pll_icpcset_m;
+
+	u32 pll_lpf_res1;
+	u32 pll_lpf_cap1;
+	u32 pll_lpf_cap2;
+	u32 pll_c3ctrl;
+	u32 pll_r3ctrl;
+};
+
+struct dsi_pll_output {
+	u32 pll_txclk_en;
+	u32 dec_start;
+	u32 div_frac_start;
+	u32 ssc_period;
+	u32 ssc_step_size;
+	u32 plllock_cmp;
+	u32 pll_vco_div_ref;
+	u32 pll_vco_count;
+	u32 pll_kvco_div_ref;
+	u32 pll_kvco_count;
+	u32 pll_misc1;
+	u32 pll_lpf2_postdiv;
+	u32 pll_resetsm_cntrl;
+	u32 pll_resetsm_cntrl2;
+	u32 pll_resetsm_cntrl5;
+	u32 pll_kvco_code;
+
+	u32 cmn_clk_cfg0;
+	u32 cmn_clk_cfg1;
+	u32 cmn_ldo_cntrl;
+
+	u32 pll_postdiv;
+	u32 fcvo;
+};
+
+struct pll_14nm_cached_state {
+	unsigned long vco_rate;
+	u8 n2postdiv;
+	u8 n1postdiv;
+};
+
+struct dsi_pll_14nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+
+	void __iomem *phy_cmn_mmio;
+	void __iomem *mmio;
+
+	int vco_delay;
+
+	struct dsi_pll_input in;
+	struct dsi_pll_output out;
+
+	/* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
+	spinlock_t postdiv_lock;
+
+	u64 vco_current_rate;
+	u64 vco_ref_clk_rate;
+
+	/* private clocks: */
+	struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
+	u32 num_hws;
+
+	/* clock-provider: */
+	struct clk_hw_onecell_data *hw_data;
+
+	struct pll_14nm_cached_state cached_state;
+
+	enum msm_dsi_phy_usecase uc;
+	struct dsi_pll_14nm *slave;
+};
+
+#define to_pll_14nm(x)	container_of(x, struct dsi_pll_14nm, base)
+
+/*
+ * Private struct for N1/N2 post-divider clocks. These clocks are similar to
+ * the generic clk_divider class of clocks. The only difference is that it
+ * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
+ */
+struct dsi_pll_14nm_postdiv {
+	struct clk_hw hw;
+
+	/* divider params */
+	u8 shift;
+	u8 width;
+	u8 flags; /* same flags as used by clk_divider struct */
+
+	struct dsi_pll_14nm *pll;
+};
+
+#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
+
+static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
+				    u32 nb_tries, u32 timeout_us)
+{
+	bool pll_locked = false;
+	void __iomem *base = pll_14nm->mmio;
+	u32 tries, val;
+
+	tries = nb_tries;
+	while (tries--) {
+		val = pll_read(base +
+			       REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+		pll_locked = !!(val & BIT(5));
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout_us);
+	}
+
+	if (!pll_locked) {
+		tries = nb_tries;
+		while (tries--) {
+			val = pll_read(base +
+				REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+			pll_locked = !!(val & BIT(0));
+
+			if (pll_locked)
+				break;
+
+			udelay(timeout_us);
+		}
+	}
+
+	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)
+{
+	pll->in.fref = pll->vco_ref_clk_rate;
+	pll->in.fdata = 0;
+	pll->in.dsiclk_sel = 1;	/* Use the /2 path in Mux */
+	pll->in.ldo_en = 0;	/* disabled for now */
+
+	/* fixed input */
+	pll->in.refclk_dbler_en = 0;
+	pll->in.vco_measure_time = 5;
+	pll->in.kvco_measure_time = 5;
+	pll->in.bandgap_timer = 4;
+	pll->in.pll_wakeup_timer = 5;
+	pll->in.plllock_cnt = 1;
+	pll->in.plllock_rng = 0;
+
+	/*
+	 * SSC is enabled by default. We might need DT props for configuring
+	 * some SSC params like PPM and center/down spread etc.
+	 */
+	pll->in.ssc_en = 1;
+	pll->in.ssc_center = 0;		/* down spread by default */
+	pll->in.ssc_spread = 5;		/* PPM / 1000 */
+	pll->in.ssc_freq = 31500;	/* default recommended */
+	pll->in.ssc_adj_period = 37;
+
+	pll->in.pll_ie_trim = 4;
+	pll->in.pll_ip_trim = 4;
+	pll->in.pll_cpcset_cur = 1;
+	pll->in.pll_cpmset_cur = 1;
+	pll->in.pll_icpmset = 4;
+	pll->in.pll_icpcset = 4;
+	pll->in.pll_icpmset_p = 0;
+	pll->in.pll_icpmset_m = 0;
+	pll->in.pll_icpcset_p = 0;
+	pll->in.pll_icpcset_m = 0;
+	pll->in.pll_lpf_res1 = 3;
+	pll->in.pll_lpf_cap1 = 11;
+	pll->in.pll_lpf_cap2 = 1;
+	pll->in.pll_iptat_trim = 7;
+	pll->in.pll_c3ctrl = 2;
+	pll->in.pll_r3ctrl = 1;
+}
+
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+
+static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)
+{
+	u32 period, ssc_period;
+	u32 ref, rem;
+	u64 step_size;
+
+	DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+	ssc_period = pll->in.ssc_freq / 500;
+	period = (u32)pll->vco_ref_clk_rate / 1000;
+	ssc_period  = CEIL(period, ssc_period);
+	ssc_period -= 1;
+	pll->out.ssc_period = ssc_period;
+
+	DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,
+	    pll->in.ssc_spread, pll->out.ssc_period);
+
+	step_size = (u32)pll->vco_current_rate;
+	ref = pll->vco_ref_clk_rate;
+	ref /= 1000;
+	step_size = div_u64(step_size, ref);
+	step_size <<= 20;
+	step_size = div_u64(step_size, 1000);
+	step_size *= pll->in.ssc_spread;
+	step_size = div_u64(step_size, 1000);
+	step_size *= (pll->in.ssc_adj_period + 1);
+
+	rem = 0;
+	step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
+	if (rem)
+		step_size++;
+
+	DBG("step_size=%lld", step_size);
+
+	step_size &= 0x0ffff;	/* take lower 16 bits */
+
+	pll->out.ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)
+{
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u64 multiplier = BIT(20);
+	u64 dec_start_multiple, dec_start, pll_comp_val;
+	u32 duration, div_frac_start;
+	u64 vco_clk_rate = pll->vco_current_rate;
+	u64 fref = pll->vco_ref_clk_rate;
+
+	DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
+
+	dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
+	div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+	dec_start = div_u64(dec_start_multiple, multiplier);
+
+	pout->dec_start = (u32)dec_start;
+	pout->div_frac_start = div_frac_start;
+
+	if (pin->plllock_cnt == 0)
+		duration = 1024;
+	else if (pin->plllock_cnt == 1)
+		duration = 256;
+	else if (pin->plllock_cnt == 2)
+		duration = 128;
+	else
+		duration = 32;
+
+	pll_comp_val = duration * dec_start_multiple;
+	pll_comp_val = div_u64(pll_comp_val, multiplier);
+	do_div(pll_comp_val, 10);
+
+	pout->plllock_cmp = (u32)pll_comp_val;
+
+	pout->pll_txclk_en = 1;
+	pout->cmn_ldo_cntrl = 0x3c;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+	u32 slop = 0;
+
+	if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
+		slop =  600;
+	else if (vrate > 1800000000UL && vrate < 2300000000UL)
+		slop = 400;
+	else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
+		slop = 280;
+
+	return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)
+{
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u64 vco_clk_rate = pll->vco_current_rate;
+	u64 fref = pll->vco_ref_clk_rate;
+	u64 data;
+	u32 cnt;
+
+	data = fref * pin->vco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 2;
+	pout->pll_vco_div_ref = data;
+
+	data = div_u64(vco_clk_rate, 1000000);	/* unit is Mhz */
+	data *= pin->vco_measure_time;
+	do_div(data, 10);
+	pout->pll_vco_count = data;
+
+	data = fref * pin->kvco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 1;
+	pout->pll_kvco_div_ref = data;
+
+	cnt = pll_14nm_kvco_slop(vco_clk_rate);
+	cnt *= 2;
+	cnt /= 100;
+	cnt *= pin->kvco_measure_time;
+	pout->pll_kvco_count = cnt;
+
+	pout->pll_misc1 = 16;
+	pout->pll_resetsm_cntrl = 48;
+	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+	pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_input *pin = &pll->in;
+	struct dsi_pll_output *pout = &pll->out;
+	u8 data;
+
+	data = pin->ssc_adj_period;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
+	data = (pin->ssc_adj_period >> 8);
+	data &= 0x03;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
+
+	data = pout->ssc_period;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
+	data = (pout->ssc_period >> 8);
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
+
+	data = pout->ssc_step_size;
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
+	data = (pout->ssc_step_size >> 8);
+	data &= 0x0ff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
+
+	data = (pin->ssc_center & 0x01);
+	data <<= 1;
+	data |= 0x01; /* enable */
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
+
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_common(struct dsi_pll_14nm *pll,
+				 struct dsi_pll_input *pin,
+				 struct dsi_pll_output *pout)
+{
+	void __iomem *base = pll->mmio;
+	u8 data;
+
+	/* confgiure the non frequency dependent pll registers */
+	data = 0;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
+
+	data = pout->pll_txclk_en;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);
+
+	data = pout->pll_resetsm_cntrl;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);
+	data = pout->pll_resetsm_cntrl2;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);
+	data = pout->pll_resetsm_cntrl5;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);
+
+	data = pout->pll_vco_div_ref & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
+	data = (pout->pll_vco_div_ref >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
+
+	data = pout->pll_kvco_div_ref & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
+	data = (pout->pll_kvco_div_ref >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
+
+	data = pout->pll_misc1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);
+
+	data = pin->pll_ie_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);
+
+	data = pin->pll_ip_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);
+
+	data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);
+
+	data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);
+
+	data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);
+
+	data = pin->pll_icpmset << 3 | pin->pll_icpcset;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);
+
+	data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);
+
+	data = pin->pll_iptat_trim;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);
+
+	data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
+{
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+	/* de assert pll start and apply pll sw reset */
+
+	/* stop pll */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+	/* pll sw reset */
+	pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
+	wmb();	/* make sure register committed */
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
+			       struct dsi_pll_input *pin,
+			       struct dsi_pll_output *pout)
+{
+	void __iomem *base = pll->mmio;
+	void __iomem *cmn_base = pll->phy_cmn_mmio;
+	u8 data;
+
+	DBG("DSI%d PLL", pll->id);
+
+	data = pout->cmn_ldo_cntrl;
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+	pll_db_commit_common(pll, pin, pout);
+
+	pll_14nm_software_reset(pll);
+
+	data = pin->dsiclk_sel; /* set dsiclk_sel = 1  */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);
+
+	data = 0xff; /* data, clk, pll normal operation */
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
+
+	/* configure the frequency dependent pll registers */
+	data = pout->dec_start;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
+
+	data = pout->div_frac_start & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
+	data = (pout->div_frac_start >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
+	data = (pout->div_frac_start >> 16) & 0xf;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
+
+	data = pout->plllock_cmp & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
+
+	data = (pout->plllock_cmp >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
+
+	data = (pout->plllock_cmp >> 16) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
+
+	data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
+
+	data = pout->pll_vco_count & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
+	data = (pout->pll_vco_count >> 8) & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
+
+	data = pout->pll_kvco_count & 0xff;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
+	data = (pout->pll_kvco_count >> 8) & 0x3;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
+
+	data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;
+	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);
+
+	if (pin->ssc_en)
+		pll_db_commit_ssc(pll);
+
+	wmb();	/* make sure register committed */
+}
+
+/*
+ * VCO clock Callbacks
+ */
+static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct dsi_pll_input *pin = &pll_14nm->in;
+	struct dsi_pll_output *pout = &pll_14nm->out;
+
+	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,
+	    parent_rate);
+
+	pll_14nm->vco_current_rate = rate;
+	pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+	dsi_pll_14nm_input_init(pll_14nm);
+
+	/*
+	 * This configures the post divider internal to the VCO. It's
+	 * fixed to divide by 1 for now.
+	 *
+	 * tx_band = pll_postdiv.
+	 * 0: divided by 1
+	 * 1: divided by 2
+	 * 2: divided by 4
+	 * 3: divided by 8
+	 */
+	pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;
+
+	pll_14nm_dec_frac_calc(pll_14nm);
+
+	if (pin->ssc_en)
+		pll_14nm_ssc_calc(pll_14nm);
+
+	pll_14nm_calc_vco_count(pll_14nm);
+
+	/* commit the slave DSI PLL registers if we're master. Note that we
+	 * don't lock the slave PLL. We just ensure that the PLL/PHY registers
+	 * of the master and slave are identical
+	 */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+
+		pll_db_commit_14nm(pll_14nm_slave, pin, pout);
+	}
+
+	pll_db_commit_14nm(pll_14nm, pin, pout);
+
+	return 0;
+}
+
+static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	u64 vco_rate, multiplier = BIT(20);
+	u32 div_frac_start;
+	u32 dec_start;
+	u64 ref_clk = parent_rate;
+
+	dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
+	dec_start &= 0x0ff;
+
+	DBG("dec_start = %x", dec_start);
+
+	div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
+				& 0xf) << 16;
+	div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
+				& 0xff) << 8;
+	div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
+				& 0xff;
+
+	DBG("div_frac_start = %x", div_frac_start);
+
+	vco_rate = ref_clk * dec_start;
+
+	vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+	/*
+	 * Recalculating the rate from dec_start and frac_start doesn't end up
+	 * the rate we originally set. Convert the freq to KHz, round it up and
+	 * convert it back to MHz.
+	 */
+	vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
+
+	DBG("returning vco rate = %lu", (unsigned long)vco_rate);
+
+	return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_14nm_vco_set_rate,
+	.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
+	.prepare = msm_dsi_pll_helper_clk_prepare,
+	.unprepare = msm_dsi_pll_helper_clk_unprepare,
+};
+
+/*
+ * N1 and N2 post-divider clock callbacks
+ */
+#define div_mask(width)	((1 << (width)) - 1)
+static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+						      unsigned long parent_rate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+	void __iomem *base = pll_14nm->phy_cmn_mmio;
+	u8 shift = postdiv->shift;
+	u8 width = postdiv->width;
+	u32 val;
+
+	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);
+
+	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
+	val &= div_mask(width);
+
+	return divider_recalc_rate(hw, parent_rate, val, NULL,
+				   postdiv->flags, width);
+}
+
+static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+					    unsigned long rate,
+					    unsigned long *prate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+
+	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);
+
+	return divider_round_rate(hw, rate, prate, NULL,
+				  postdiv->width,
+				  postdiv->flags);
+}
+
+static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+					 unsigned long parent_rate)
+{
+	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+	void __iomem *base = pll_14nm->phy_cmn_mmio;
+	spinlock_t *lock = &pll_14nm->postdiv_lock;
+	u8 shift = postdiv->shift;
+	u8 width = postdiv->width;
+	unsigned int value;
+	unsigned long flags = 0;
+	u32 val;
+
+	DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,
+	    parent_rate);
+
+	value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
+				postdiv->flags);
+
+	spin_lock_irqsave(lock, flags);
+
+	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+	val &= ~(div_mask(width) << shift);
+
+	val |= value << shift;
+	pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+
+	/* If we're master in dual DSI mode, then the slave PLL's post-dividers
+	 * follow the master's post dividers
+	 */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+	}
+
+	spin_unlock_irqrestore(lock, flags);
+
+	return 0;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
+	.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
+	.round_rate = dsi_pll_14nm_postdiv_round_rate,
+	.set_rate = dsi_pll_14nm_postdiv_set_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	bool locked;
+
+	DBG("");
+
+	pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+	locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
+					 POLL_TIMEOUT_US);
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+	DBG("");
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+}
+
+static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	u32 data;
+
+	data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+
+	cached_state->n1postdiv = data & 0xf;
+	cached_state->n2postdiv = (data >> 4) & 0xf;
+
+	DBG("DSI%d PLL save state %x %x", pll_14nm->id,
+	    cached_state->n1postdiv, cached_state->n2postdiv);
+
+	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+	u32 data;
+	int ret;
+
+	ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_14nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
+
+	DBG("DSI%d PLL restore state %x %x", pll_14nm->id,
+	    cached_state->n1postdiv, cached_state->n2postdiv);
+
+	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+
+	/* also restore post-dividers for slave DSI PLL */
+	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+	}
+
+	return 0;
+}
+
+static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,
+				    enum msm_dsi_phy_usecase uc)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	void __iomem *base = pll_14nm->mmio;
+	u32 clkbuflr_en, bandgap = 0;
+
+	switch (uc) {
+	case MSM_DSI_PHY_STANDALONE:
+		clkbuflr_en = 0x1;
+		break;
+	case MSM_DSI_PHY_MASTER:
+		clkbuflr_en = 0x3;
+		pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];
+		break;
+	case MSM_DSI_PHY_SLAVE:
+		clkbuflr_en = 0x0;
+		bandgap = 0x3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
+	if (bandgap)
+		pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
+
+	pll_14nm->uc = uc;
+
+	return 0;
+}
+
+static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,
+				     struct clk **byte_clk_provider,
+				     struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;
+
+	if (byte_clk_provider)
+		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+	if (pixel_clk_provider)
+		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+	return 0;
+}
+
+static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+	struct platform_device *pdev = pll_14nm->pdev;
+	int num_hws = pll_14nm->num_hws;
+
+	of_clk_del_provider(pdev->dev.of_node);
+
+	while (num_hws--)
+		clk_hw_unregister(pll_14nm->hws[num_hws]);
+}
+
+static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
+						const char *name,
+						const char *parent_name,
+						unsigned long flags,
+						u8 shift)
+{
+	struct dsi_pll_14nm_postdiv *pll_postdiv;
+	struct device *dev = &pll_14nm->pdev->dev;
+	struct clk_init_data postdiv_init = {
+		.parent_names = (const char *[]) { parent_name },
+		.num_parents = 1,
+		.name = name,
+		.flags = flags,
+		.ops = &clk_ops_dsi_pll_14nm_postdiv,
+	};
+	int ret;
+
+	pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
+	if (!pll_postdiv)
+		return ERR_PTR(-ENOMEM);
+
+	pll_postdiv->pll = pll_14nm;
+	pll_postdiv->shift = shift;
+	/* both N1 and N2 postdividers are 4 bits wide */
+	pll_postdiv->width = 4;
+	/* range of each divider is from 1 to 15 */
+	pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
+	pll_postdiv->hw.init = &postdiv_init;
+
+	ret = clk_hw_register(dev, &pll_postdiv->hw);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return &pll_postdiv->hw;
+}
+
+static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
+{
+	char clk_name[32], parent[32], vco_name[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_14nm_vco,
+	};
+	struct device *dev = &pll_14nm->pdev->dev;
+	struct clk_hw **hws = pll_14nm->hws;
+	struct clk_hw_onecell_data *hw_data;
+	struct clk_hw *hw;
+	int num = 0;
+	int ret;
+
+	DBG("DSI%d", pll_14nm->id);
+
+	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+			       GFP_KERNEL);
+	if (!hw_data)
+		return -ENOMEM;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);
+	pll_14nm->base.clk_hw.init = &vco_init;
+
+	ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);
+	if (ret)
+		return ret;
+
+	hws[num++] = &pll_14nm->base.clk_hw;
+
+	snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);
+
+	/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
+	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
+				       CLK_SET_RATE_PARENT, 0);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+
+	snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+	/* DSI Byte clock = VCO_CLK / N1 / 8 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  CLK_SET_RATE_PARENT, 1, 8);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+	snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+	/*
+	 * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
+	 * on the way. Don't let it set parent.
+	 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+
+	snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);
+	snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+
+	/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
+	 * This is the output of N2 post-divider, bits 4-7 in
+	 * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
+	 */
+	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
+	hws[num++] = hw;
+	hw_data->hws[DSI_PIXEL_PLL_CLK]	= hw;
+
+	pll_14nm->num_hws = num;
+
+	hw_data->num = NUM_PROVIDED_CLKS;
+	pll_14nm->hw_data = hw_data;
+
+	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+				     pll_14nm->hw_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
+{
+	struct dsi_pll_14nm *pll_14nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
+	if (!pll_14nm)
+		return ERR_PTR(-ENOMEM);
+
+	DBG("PLL%d", id);
+
+	pll_14nm->pdev = pdev;
+	pll_14nm->id = id;
+	pll_14nm_list[id] = pll_14nm;
+
+	pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&pll_14nm->postdiv_lock);
+
+	pll = &pll_14nm->base;
+	pll->min_rate = VCO_MIN_RATE;
+	pll->max_rate = VCO_MAX_RATE;
+	pll->get_provider = dsi_pll_14nm_get_provider;
+	pll->destroy = dsi_pll_14nm_destroy;
+	pll->disable_seq = dsi_pll_14nm_disable_seq;
+	pll->save_state = dsi_pll_14nm_save_state;
+	pll->restore_state = dsi_pll_14nm_restore_state;
+	pll->set_usecase = dsi_pll_14nm_set_usecase;
+
+	pll_14nm->vco_delay = 1;
+
+	pll->en_seq_cnt = 1;
+	pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;
+
+	ret = pll_14nm_register(pll_14nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return pll;
+}
+
 static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
 				     struct msm_dsi_dphy_timing *timing,
 				     int lane_idx)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 5bf79de0da67..2f502efa4dd5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -3,9 +3,646 @@ 
  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
 #include "dsi_phy.h"
+#include "dsi_pll.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 28nm - clock diagram (eg: DSI0):
+ *
+ *         dsi0analog_postdiv_clk
+ *                             |         dsi0indirect_path_div2_clk
+ *                             |          |
+ *                   +------+  |  +----+  |  |\   dsi0byte_mux
+ *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
+ *                |  +------+     +----+     | m|  |  +----+
+ *                |                          | u|--o--| /4 |-- dsi0pllbyte
+ *                |                          | x|     +----+
+ *                o--------------------------| /
+ *                |                          |/
+ *                |          +------+
+ *                o----------| DIV3 |------------------------- dsi0pll
+ *                           +------+
+ */
+
+#define POLL_MAX_READS			10
+#define POLL_TIMEOUT_US		50
+
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+#define VCO_MIN_RATE			350000000
+#define VCO_MAX_RATE			750000000
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+
+#define LPFR_LUT_SIZE			10
+struct lpfr_cfg {
+	unsigned long vco_rate;
+	u32 resistance;
+};
+
+/* Loop filter resistance: */
+static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
+	{ 479500000,  8 },
+	{ 480000000, 11 },
+	{ 575500000,  8 },
+	{ 576000000, 12 },
+	{ 610500000,  8 },
+	{ 659500000,  9 },
+	{ 671500000, 10 },
+	{ 672000000, 14 },
+	{ 708500000, 10 },
+	{ 750000000, 11 },
+};
+
+struct pll_28nm_cached_state {
+	unsigned long vco_rate;
+	u8 postdiv3;
+	u8 postdiv1;
+	u8 byte_mux;
+};
+
+struct dsi_pll_28nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+	void __iomem *mmio;
+
+	int vco_delay;
+
+	/* private clocks: */
+	struct clk *clks[NUM_DSI_CLOCKS_MAX];
+	u32 num_clks;
+
+	/* clock-provider: */
+	struct clk *provided_clks[NUM_PROVIDED_CLKS];
+	struct clk_onecell_data clk_data;
+
+	struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+				u32 nb_tries, u32 timeout_us)
+{
+	bool pll_locked = false;
+	u32 val;
+
+	while (nb_tries--) {
+		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
+		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout_us);
+	}
+	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
+{
+	void __iomem *base = pll_28nm->mmio;
+
+	/*
+	 * Add HW recommended delays after toggling the software
+	 * reset bit off and back on.
+	 */
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
+			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	unsigned long div_fbx1000, gen_vco_clk;
+	u32 refclk_cfg, frac_n_mode, frac_n_value;
+	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
+	u32 cal_cfg10, cal_cfg11;
+	u32 rem;
+	int i;
+
+	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
+
+	/* Force postdiv2 to be div-4 */
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
+
+	/* Configure the Loop filter resistance */
+	for (i = 0; i < LPFR_LUT_SIZE; i++)
+		if (rate <= lpfr_lut[i].vco_rate)
+			break;
+	if (i == LPFR_LUT_SIZE) {
+		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
+				rate);
+		return -EINVAL;
+	}
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
+
+	/* Loop filter capacitance values : c1 and c2 */
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
+
+	rem = rate % VCO_REF_CLK_RATE;
+	if (rem) {
+		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+		frac_n_mode = 1;
+		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
+		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
+	} else {
+		refclk_cfg = 0x0;
+		frac_n_mode = 0;
+		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
+		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
+	}
+
+	DBG("refclk_cfg = %d", refclk_cfg);
+
+	rem = div_fbx1000 % 1000;
+	frac_n_value = (rem << 16) / 1000;
+
+	DBG("div_fb = %lu", div_fbx1000);
+	DBG("frac_n_value = %d", frac_n_value);
+
+	DBG("Generated VCO Clock: %lu", gen_vco_clk);
+	rem = 0;
+	sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
+	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+	if (frac_n_mode) {
+		sdm_cfg0 = 0x0;
+		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
+		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
+				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+		sdm_cfg3 = frac_n_value >> 8;
+		sdm_cfg2 = frac_n_value & 0xff;
+	} else {
+		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
+		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
+				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
+		sdm_cfg2 = 0;
+		sdm_cfg3 = 0;
+	}
+
+	DBG("sdm_cfg0=%d", sdm_cfg0);
+	DBG("sdm_cfg1=%d", sdm_cfg1);
+	DBG("sdm_cfg2=%d", sdm_cfg2);
+	DBG("sdm_cfg3=%d", sdm_cfg3);
+
+	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
+	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
+	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
+		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
+		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
+
+	/* Add hardware recommended delay for correct PLL configuration */
+	if (pll_28nm->vco_delay)
+		udelay(pll_28nm->vco_delay);
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+					POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	void __iomem *base = pll_28nm->mmio;
+	u32 sdm0, doubler, sdm_byp_div;
+	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+	u32 ref_clk = VCO_REF_CLK_RATE;
+	unsigned long vco_rate;
+
+	VERB("parent_rate=%lu", parent_rate);
+
+	/* Check to see if the ref clk doubler is enabled */
+	doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
+			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+	ref_clk += (doubler * VCO_REF_CLK_RATE);
+
+	/* see if it is integer mode or sdm mode */
+	sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
+	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
+		/* integer mode */
+		sdm_byp_div = FIELD(
+				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
+				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
+		vco_rate = ref_clk * sdm_byp_div;
+	} else {
+		/* sdm mode */
+		sdm_dc_off = FIELD(
+				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
+				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
+		DBG("sdm_dc_off = %d", sdm_dc_off);
+		sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
+				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
+		sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
+				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
+		sdm_freq_seed = (sdm3 << 8) | sdm2;
+		DBG("sdm_freq_seed = %d", sdm_freq_seed);
+
+		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+		DBG("vco rate = %lu", vco_rate);
+	}
+
+	DBG("returning vco rate = %lu", vco_rate);
+
+	return vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_28nm_clk_set_rate,
+	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+	.prepare = msm_dsi_pll_helper_clk_prepare,
+	.unprepare = msm_dsi_pll_helper_clk_unprepare,
+	.is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * PLL Callbacks
+ */
+static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	u32 max_reads = 5, timeout_us = 100;
+	bool locked;
+	u32 val;
+	int i;
+
+	DBG("id=%d", pll_28nm->id);
+
+	pll_28nm_software_reset(pll_28nm);
+
+	/*
+	 * PLL power up sequence.
+	 * Add necessary delays recommended by hardware.
+	 */
+	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+
+	for (i = 0; i < 2; i++) {
+		/* DSI Uniphy lock detect setting */
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
+				0x0c, 100);
+		pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+		/* poll for PLL ready status */
+		locked = pll_28nm_poll_for_ready(pll_28nm,
+						max_reads, timeout_us);
+		if (locked)
+			break;
+
+		pll_28nm_software_reset(pll_28nm);
+
+		/*
+		 * PLL power up sequence.
+		 * Add necessary delays recommended by hardware.
+		 */
+		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
+
+		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+	}
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL Lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	bool locked;
+	u32 max_reads = 10, timeout_us = 50;
+	u32 val;
+
+	DBG("id=%d", pll_28nm->id);
+
+	pll_28nm_software_reset(pll_28nm);
+
+	/*
+	 * PLL power up sequence.
+	 * Add necessary delays recommended by hardware.
+	 */
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
+
+	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
+		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+	/* DSI PLL toggle lock detect setting */
+	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
+	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
+
+	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	DBG("id=%d", pll_28nm->id);
+	pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
+}
+
+static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+
+	cached_state->postdiv3 =
+			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
+	cached_state->postdiv1 =
+			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
+	cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
+	if (dsi_pll_28nm_clk_is_enabled(&pll->clk_hw))
+		cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+	else
+		cached_state->vco_rate = 0;
+}
+
+static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+	int ret;
+
+	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+			cached_state->postdiv3);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+			cached_state->postdiv1);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+			cached_state->byte_mux);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
+				struct clk **byte_clk_provider,
+				struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	if (byte_clk_provider)
+		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
+	if (pixel_clk_provider)
+		*pixel_clk_provider =
+				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
+
+	return 0;
+}
+
+static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	int i;
+
+	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
+					pll_28nm->clks, pll_28nm->num_clks);
+
+	for (i = 0; i < NUM_PROVIDED_CLKS; i++)
+		pll_28nm->provided_clks[i] = NULL;
+
+	pll_28nm->num_clks = 0;
+	pll_28nm->clk_data.clks = NULL;
+	pll_28nm->clk_data.clk_num = 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
+{
+	char clk_name[32], parent1[32], parent2[32], vco_name[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "xo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_28nm_vco,
+	};
+	struct device *dev = &pll_28nm->pdev->dev;
+	struct clk **clks = pll_28nm->clks;
+	struct clk **provided_clks = pll_28nm->provided_clks;
+	int num = 0;
+	int ret;
+
+	DBG("%d", pll_28nm->id);
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
+	pll_28nm->base.clk_hw.init = &vco_init;
+	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
+
+	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+	clks[num++] = clk_register_divider(dev, clk_name,
+			parent1, CLK_SET_RATE_PARENT,
+			pll_28nm->mmio +
+			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+			0, 4, 0, NULL);
+
+	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
+	clks[num++] = clk_register_fixed_factor(dev, clk_name,
+			parent1, CLK_SET_RATE_PARENT,
+			1, 2);
+
+	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
+			clk_register_divider(dev, clk_name,
+				parent1, 0, pll_28nm->mmio +
+				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+				0, 8, 0, NULL);
+
+	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
+	clks[num++] = clk_register_mux(dev, clk_name,
+			((const char *[]){
+				parent1, parent2
+			}), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+
+	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
+	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
+	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
+			clk_register_fixed_factor(dev, clk_name,
+				parent1, CLK_SET_RATE_PARENT, 1, 4);
+
+	pll_28nm->num_clks = num;
+
+	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
+	pll_28nm->clk_data.clks = provided_clks;
+
+	ret = of_clk_add_provider(dev->of_node,
+			of_clk_src_onecell_get, &pll_28nm->clk_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
+					enum msm_dsi_phy_type type, int id)
+{
+	struct dsi_pll_28nm *pll_28nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+	if (!pll_28nm)
+		return ERR_PTR(-ENOMEM);
+
+	pll_28nm->pdev = pdev;
+	pll_28nm->id = id;
+
+	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll = &pll_28nm->base;
+	pll->min_rate = VCO_MIN_RATE;
+	pll->max_rate = VCO_MAX_RATE;
+	pll->get_provider = dsi_pll_28nm_get_provider;
+	pll->destroy = dsi_pll_28nm_destroy;
+	pll->disable_seq = dsi_pll_28nm_disable_seq;
+	pll->save_state = dsi_pll_28nm_save_state;
+	pll->restore_state = dsi_pll_28nm_restore_state;
+
+	if (type == MSM_DSI_PHY_28NM_HPM) {
+		pll_28nm->vco_delay = 1;
+
+		pll->en_seq_cnt = 3;
+		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
+		pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
+		pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
+	} else if (type == MSM_DSI_PHY_28NM_LP) {
+		pll_28nm->vco_delay = 1000;
+
+		pll->en_seq_cnt = 1;
+		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
+	} else {
+		DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = pll_28nm_register(pll_28nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return pll;
+}
+
+
 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
 		struct msm_dsi_dphy_timing *timing)
 {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 5d33de27a0f4..4a40513057e8 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -3,11 +3,530 @@ 
  * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 
 #include "dsi_phy.h"
+#include "dsi_pll.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
+ *
+ *
+ *                        +------+
+ *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
+ *  F * byte_clk    |     +------+
+ *                  | bit clock divider (F / 8)
+ *                  |
+ *                  |     +------+
+ *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
+ *                  |     +------+                 | (sets parent rate)
+ *                  | byte clock divider (F)       |
+ *                  |                              |
+ *                  |                              o---> To esc RCG
+ *                  |                                (doesn't set parent rate)
+ *                  |
+ *                  |     +------+
+ *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG
+ *                        +------+                 | (sets parent rate)
+ *                  dsi clock divider (F * magic)  |
+ *                                                 |
+ *                                                 o---> To pixel rcg
+ *                                                  (doesn't set parent rate)
+ */
+
+#define POLL_MAX_READS		8000
+#define POLL_TIMEOUT_US		1
+
+#define NUM_PROVIDED_CLKS	2
+
+#define VCO_REF_CLK_RATE	27000000
+#define VCO_MIN_RATE		600000000
+#define VCO_MAX_RATE		1200000000
+
+#define DSI_BYTE_PLL_CLK	0
+#define DSI_PIXEL_PLL_CLK	1
+
+#define VCO_PREF_DIV_RATIO	27
+
+struct pll_28nm_cached_state {
+	unsigned long vco_rate;
+	u8 postdiv3;
+	u8 postdiv2;
+	u8 postdiv1;
+};
+
+struct clk_bytediv {
+	struct clk_hw hw;
+	void __iomem *reg;
+};
+
+struct dsi_pll_28nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+	void __iomem *mmio;
+
+	/* custom byte clock divider */
+	struct clk_bytediv *bytediv;
+
+	/* private clocks: */
+	struct clk *clks[NUM_DSI_CLOCKS_MAX];
+	u32 num_clks;
+
+	/* clock-provider: */
+	struct clk *provided_clks[NUM_PROVIDED_CLKS];
+	struct clk_onecell_data clk_data;
+
+	struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+				    int nb_tries, int timeout_us)
+{
+	bool pll_locked = false;
+	u32 val;
+
+	while (nb_tries--) {
+		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
+		pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout_us);
+	}
+	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	void __iomem *base = pll_28nm->mmio;
+	u32 val, temp, fb_divider;
+
+	DBG("rate=%lu, parent's=%lu", rate, parent_rate);
+
+	temp = rate / 10;
+	val = VCO_REF_CLK_RATE / 10;
+	fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
+	fb_divider = fb_divider / 2 - 1;
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
+			fb_divider & 0xff);
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
+
+	val |= (fb_divider >> 8) & 0x07;
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
+			val);
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+
+	val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
+			val);
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
+			0xf);
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+	val |= 0x7 << 4;
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+			val);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+					POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	void __iomem *base = pll_28nm->mmio;
+	unsigned long vco_rate;
+	u32 status, fb_divider, temp, ref_divider;
+
+	VERB("parent_rate=%lu", parent_rate);
+
+	status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
+
+	if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
+		fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
+		fb_divider &= 0xff;
+		temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
+		fb_divider = (temp << 8) | fb_divider;
+		fb_divider += 1;
+
+		ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+		ref_divider &= 0x3f;
+		ref_divider += 1;
+
+		/* multiply by 2 */
+		vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
+	} else {
+		vco_rate = 0;
+	}
+
+	DBG("returning vco rate = %lu", vco_rate);
+
+	return vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_28nm_clk_set_rate,
+	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+	.prepare = msm_dsi_pll_helper_clk_prepare,
+	.unprepare = msm_dsi_pll_helper_clk_unprepare,
+	.is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * Custom byte clock divier clk_ops
+ *
+ * This clock is the entry point to configuring the PLL. The user (dsi host)
+ * will set this clock's rate to the desired byte clock rate. The VCO lock
+ * frequency is a multiple of the byte clock rate. The multiplication factor
+ * (shown as F in the diagram above) is a function of the byte clock rate.
+ *
+ * This custom divider clock ensures that its parent (VCO) is set to the
+ * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
+ * accordingly
+ */
+#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
+
+static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+	unsigned int div;
+
+	div = pll_read(bytediv->reg) & 0xff;
+
+	return parent_rate / (div + 1);
+}
+
+/* find multiplication factor(wrt byte clock) at which the VCO should be set */
+static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
+{
+	unsigned long bit_mhz;
+
+	/* convert to bit clock in Mhz */
+	bit_mhz = (byte_clk_rate * 8) / 1000000;
+
+	if (bit_mhz < 125)
+		return 64;
+	else if (bit_mhz < 250)
+		return 32;
+	else if (bit_mhz < 600)
+		return 16;
+	else
+		return 8;
+}
+
+static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long *prate)
+{
+	unsigned long best_parent;
+	unsigned int factor;
+
+	factor = get_vco_mul_factor(rate);
+
+	best_parent = rate * factor;
+	*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+
+	return *prate / factor;
+}
+
+static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+	u32 val;
+	unsigned int factor;
+
+	factor = get_vco_mul_factor(rate);
+
+	val = pll_read(bytediv->reg);
+	val |= (factor - 1) & 0xff;
+	pll_write(bytediv->reg, val);
+
+	return 0;
+}
+
+/* Our special byte clock divider ops */
+static const struct clk_ops clk_bytediv_ops = {
+	.round_rate = clk_bytediv_round_rate,
+	.set_rate = clk_bytediv_set_rate,
+	.recalc_rate = clk_bytediv_recalc_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct device *dev = &pll_28nm->pdev->dev;
+	void __iomem *base = pll_28nm->mmio;
+	bool locked;
+	unsigned int bit_div, byte_div;
+	int max_reads = 1000, timeout_us = 100;
+	u32 val;
+
+	DBG("id=%d", pll_28nm->id);
+
+	/*
+	 * before enabling the PLL, configure the bit clock divider since we
+	 * don't expose it as a clock to the outside world
+	 * 1: read back the byte clock divider that should already be set
+	 * 2: divide by 8 to get bit clock divider
+	 * 3: write it to POSTDIV1
+	 */
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+	byte_div = val + 1;
+	bit_div = byte_div / 8;
+
+	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+	val &= ~0xf;
+	val |= (bit_div - 1);
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
+
+	/* enable the PLL */
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
+			DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+
+	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+	if (unlikely(!locked))
+		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+	else
+		DBG("DSI PLL lock success");
+
+	return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	DBG("id=%d", pll_28nm->id);
+	pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
+}
+
+static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+
+	cached_state->postdiv3 =
+			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
+	cached_state->postdiv2 =
+			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+	cached_state->postdiv1 =
+			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+
+	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+	void __iomem *base = pll_28nm->mmio;
+	int ret;
+
+	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+			cached_state->postdiv3);
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
+			cached_state->postdiv2);
+	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+			cached_state->postdiv1);
+
+	return 0;
+}
+
+static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
+				struct clk **byte_clk_provider,
+				struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	if (byte_clk_provider)
+		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
+	if (pixel_clk_provider)
+		*pixel_clk_provider =
+				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
+
+	return 0;
+}
+
+static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
+					pll_28nm->clks, pll_28nm->num_clks);
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
+{
+	char *clk_name, *parent_name, *vco_name;
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_28nm_vco,
+	};
+	struct device *dev = &pll_28nm->pdev->dev;
+	struct clk **clks = pll_28nm->clks;
+	struct clk **provided_clks = pll_28nm->provided_clks;
+	struct clk_bytediv *bytediv;
+	struct clk_init_data bytediv_init = { };
+	int ret, num = 0;
+
+	DBG("%d", pll_28nm->id);
+
+	bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
+	if (!bytediv)
+		return -ENOMEM;
+
+	vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+	if (!vco_name)
+		return -ENOMEM;
+
+	parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+	if (!parent_name)
+		return -ENOMEM;
+
+	clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+	if (!clk_name)
+		return -ENOMEM;
+
+	pll_28nm->bytediv = bytediv;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
+	vco_init.name = vco_name;
+
+	pll_28nm->base.clk_hw.init = &vco_init;
+
+	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
+
+	/* prepare and register bytediv */
+	bytediv->hw.init = &bytediv_init;
+	bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
+
+	snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
+	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
+
+	bytediv_init.name = clk_name;
+	bytediv_init.ops = &clk_bytediv_ops;
+	bytediv_init.flags = CLK_SET_RATE_PARENT;
+	bytediv_init.parent_names = (const char * const *) &parent_name;
+	bytediv_init.num_parents = 1;
+
+	/* DIV2 */
+	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
+			clk_register(dev, &bytediv->hw);
+
+	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
+	/* DIV3 */
+	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
+			clk_register_divider(dev, clk_name,
+				parent_name, 0, pll_28nm->mmio +
+				REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+				0, 8, 0, NULL);
+
+	pll_28nm->num_clks = num;
+
+	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
+	pll_28nm->clk_data.clks = provided_clks;
+
+	ret = of_clk_add_provider(dev->of_node,
+			of_clk_src_onecell_get, &pll_28nm->clk_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
+					       int id)
+{
+	struct dsi_pll_28nm *pll_28nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+	if (!pll_28nm)
+		return ERR_PTR(-ENOMEM);
+
+	pll_28nm->pdev = pdev;
+	pll_28nm->id = id + 1;
+
+	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll = &pll_28nm->base;
+	pll->min_rate = VCO_MIN_RATE;
+	pll->max_rate = VCO_MAX_RATE;
+	pll->get_provider = dsi_pll_28nm_get_provider;
+	pll->destroy = dsi_pll_28nm_destroy;
+	pll->disable_seq = dsi_pll_28nm_disable_seq;
+	pll->save_state = dsi_pll_28nm_save_state;
+	pll->restore_state = dsi_pll_28nm_restore_state;
+
+	pll->en_seq_cnt = 1;
+	pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
+
+	ret = pll_28nm_register(pll_28nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return pll;
+}
+
 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
 		struct msm_dsi_dphy_timing *timing)
 {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index cbfeec860e69..f9af9d70b56a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -3,11 +3,916 @@ 
  * Copyright (c) 2018, The Linux Foundation
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/iopoll.h>
 
+#include "dsi_pll.h"
 #include "dsi_phy.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
+ *
+ *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
+ *                              |                |
+ *                              |                |
+ *                 +---------+  |  +----------+  |  +----+
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ *                 +---------+  |  +----------+  |  +----+
+ *                              |                |
+ *                              |                |         dsi0_pll_by_2_bit_clk
+ *                              |                |          |
+ *                              |                |  +----+  |  |\  dsi0_pclk_mux
+ *                              |                |--| /2 |--o--| \   |
+ *                              |                |  +----+     |  \  |  +---------+
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ *                              |------------------------------|  /     +---------+
+ *                              |          +-----+             | /
+ *                              -----------| /4? |--o----------|/
+ *                                         +-----+  |           |
+ *                                                  |           |dsiclk_sel
+ *                                                  |
+ *                                                  dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK		0
+#define DSI_PIXEL_PLL_CLK		1
+#define NUM_PROVIDED_CLKS		2
+
+#define VCO_REF_CLK_RATE		19200000
+
+struct dsi_pll_regs {
+	u32 pll_prop_gain_rate;
+	u32 pll_lockdet_rate;
+	u32 decimal_div_start;
+	u32 frac_div_start_low;
+	u32 frac_div_start_mid;
+	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
+	u32 ssc_stepsize_low;
+	u32 ssc_stepsize_high;
+	u32 ssc_div_per_low;
+	u32 ssc_div_per_high;
+	u32 ssc_adjper_low;
+	u32 ssc_adjper_high;
+	u32 ssc_control;
+};
+
+struct dsi_pll_config {
+	u32 ref_freq;
+	bool div_override;
+	u32 output_div;
+	bool ignore_frac;
+	bool disable_prescaler;
+	bool enable_ssc;
+	bool ssc_center;
+	u32 dec_bits;
+	u32 frac_bits;
+	u32 lock_timer;
+	u32 ssc_freq;
+	u32 ssc_offset;
+	u32 ssc_adj_per;
+	u32 thresh_cycles;
+	u32 refclk_cycles;
+};
+
+struct pll_7nm_cached_state {
+	unsigned long vco_rate;
+	u8 bit_clk_div;
+	u8 pix_clk_div;
+	u8 pll_out_div;
+	u8 pll_mux;
+};
+
+struct dsi_pll_7nm {
+	struct msm_dsi_pll base;
+
+	int id;
+	struct platform_device *pdev;
+
+	void __iomem *phy_cmn_mmio;
+	void __iomem *mmio;
+
+	u64 vco_ref_clk_rate;
+	u64 vco_current_rate;
+
+	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+	spinlock_t postdiv_lock;
+
+	int vco_delay;
+	struct dsi_pll_config pll_configuration;
+	struct dsi_pll_regs reg_setup;
+
+	/* private clocks: */
+	struct clk_hw *out_div_clk_hw;
+	struct clk_hw *bit_clk_hw;
+	struct clk_hw *byte_clk_hw;
+	struct clk_hw *by_2_bit_clk_hw;
+	struct clk_hw *post_out_div_clk_hw;
+	struct clk_hw *pclk_mux_hw;
+	struct clk_hw *out_dsiclk_hw;
+
+	/* clock-provider: */
+	struct clk_hw_onecell_data *hw_data;
+
+	struct pll_7nm_cached_state cached_state;
+
+	enum msm_dsi_phy_usecase uc;
+	struct dsi_pll_7nm *slave;
+};
+
+#define to_pll_7nm(x)	container_of(x, struct dsi_pll_7nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+
+	config->ref_freq = pll->vco_ref_clk_rate;
+	config->output_div = 1;
+	config->dec_bits = 8;
+	config->frac_bits = 18;
+	config->lock_timer = 64;
+	config->ssc_freq = 31500;
+	config->ssc_offset = 4800;
+	config->ssc_adj_per = 2;
+	config->thresh_cycles = 32;
+	config->refclk_cycles = 256;
+
+	config->div_override = false;
+	config->ignore_frac = false;
+	config->disable_prescaler = false;
+
+	/* TODO: ssc enable */
+	config->enable_ssc = false;
+	config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u64 fref = pll->vco_ref_clk_rate;
+	u64 pll_freq;
+	u64 divider;
+	u64 dec, dec_multiple;
+	u32 frac;
+	u64 multiplier;
+
+	pll_freq = pll->vco_current_rate;
+
+	if (config->disable_prescaler)
+		divider = fref;
+	else
+		divider = fref * 2;
+
+	multiplier = 1 << config->frac_bits;
+	dec_multiple = div_u64(pll_freq * multiplier, divider);
+	div_u64_rem(dec_multiple, multiplier, &frac);
+
+	dec = div_u64(dec_multiple, multiplier);
+
+	if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)
+		regs->pll_clock_inverters = 0x28;
+	else if (pll_freq <= 1000000000ULL)
+		regs->pll_clock_inverters = 0xa0;
+	else if (pll_freq <= 2500000000ULL)
+		regs->pll_clock_inverters = 0x20;
+	else if (pll_freq <= 3020000000ULL)
+		regs->pll_clock_inverters = 0x00;
+	else
+		regs->pll_clock_inverters = 0x40;
+
+	regs->pll_lockdet_rate = config->lock_timer;
+	regs->decimal_div_start = dec;
+	regs->frac_div_start_low = (frac & 0xff);
+	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER		BIT(0)
+#define SSC_EN			BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u32 ssc_per;
+	u32 ssc_mod;
+	u64 ssc_step_size;
+	u64 frac;
+
+	if (!config->enable_ssc) {
+		DBG("SSC not enabled\n");
+		return;
+	}
+
+	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+	ssc_per -= ssc_mod;
+
+	frac = regs->frac_div_start_low |
+			(regs->frac_div_start_mid << 8) |
+			(regs->frac_div_start_high << 16);
+	ssc_step_size = regs->decimal_div_start;
+	ssc_step_size *= (1 << config->frac_bits);
+	ssc_step_size += frac;
+	ssc_step_size *= config->ssc_offset;
+	ssc_step_size *= (config->ssc_adj_per + 1);
+	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+	regs->ssc_div_per_low = ssc_per & 0xFF;
+	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+		 regs->decimal_div_start, frac, config->frac_bits);
+	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	if (pll->pll_configuration.enable_ssc) {
+		pr_debug("SSC is enabled\n");
+
+		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+			  regs->ssc_stepsize_low);
+		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+			  regs->ssc_stepsize_high);
+		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+			  regs->ssc_div_per_low);
+		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+			  regs->ssc_div_per_high);
+		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
+			  regs->ssc_adjper_low);
+		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
+			  regs->ssc_adjper_high);
+		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
+			  SSC_EN | regs->ssc_control);
+	}
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
+
+	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+		if (pll->vco_current_rate >= 3100000000ULL)
+			analog_controls_five_1 = 0x03;
+
+		if (pll->vco_current_rate < 1520000000ULL)
+			vco_config_1 = 0x08;
+		else if (pll->vco_current_rate < 2990000000ULL)
+			vco_config_1 = 0x01;
+	}
+
+	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
+		  analog_controls_five_1);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
+		  pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);
+
+	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+		pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+		if (pll->slave)
+			pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+	}
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll)
+{
+	void __iomem *base = pll->mmio;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+	pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+	pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+}
+
+static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,
+	    parent_rate);
+
+	pll_7nm->vco_current_rate = rate;
+	pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+	dsi_pll_setup_config(pll_7nm);
+
+	dsi_pll_calc_dec_frac(pll_7nm);
+
+	dsi_pll_calc_ssc(pll_7nm);
+
+	dsi_pll_commit(pll_7nm);
+
+	dsi_pll_config_hzindep_reg(pll_7nm);
+
+	dsi_pll_ssc_commit(pll_7nm);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
+{
+	int rc;
+	u32 status = 0;
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+
+	rc = readl_poll_timeout_atomic(pll->mmio +
+				       REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
+				       status,
+				       ((status & BIT(0)) > 0),
+				       delay_us,
+				       timeout_us);
+	if (rc)
+		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+		       pll->id, status);
+
+	return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
+{
+	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
+	ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+{
+	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
+	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+	ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+	u32 data;
+
+	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+{
+	u32 data;
+
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
+
+	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+		  data | BIT(5) | BIT(4));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+{
+	/*
+	 * Reset the PHY digital domain. This would be needed when
+	 * coming out of a CX or analog rail power collapse while
+	 * ensuring that the pads maintain LP00 or LP11 state
+	 */
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+	wmb(); /* Ensure that the reset is deasserted */
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+	wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+	int rc;
+
+	dsi_pll_enable_pll_bias(pll_7nm);
+	if (pll_7nm->slave)
+		dsi_pll_enable_pll_bias(pll_7nm->slave);
+
+	/* Start PLL */
+	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
+
+	/*
+	 * ensure all PLL configurations are written prior to checking
+	 * for PLL lock.
+	 */
+	wmb();
+
+	/* Check for PLL lock */
+	rc = dsi_pll_7nm_lock_status(pll_7nm);
+	if (rc) {
+		pr_err("PLL(%d) lock failed\n", pll_7nm->id);
+		goto error;
+	}
+
+	pll->pll_on = true;
+
+	/*
+	 * assert power on reset for PHY digital in case the PLL is
+	 * enabled after CX of analog domain power collapse. This needs
+	 * to be done before enabling the global clk.
+	 */
+	dsi_pll_phy_dig_reset(pll_7nm);
+	if (pll_7nm->slave)
+		dsi_pll_phy_dig_reset(pll_7nm->slave);
+
+	dsi_pll_enable_global_clk(pll_7nm);
+	if (pll_7nm->slave)
+		dsi_pll_enable_global_clk(pll_7nm->slave);
+
+error:
+	return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
+{
+	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
+	dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+	/*
+	 * To avoid any stray glitches while abruptly powering down the PLL
+	 * make sure to gate the clock using the clock enable bit before
+	 * powering down the PLL
+	 */
+	dsi_pll_disable_global_clk(pll_7nm);
+	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
+	dsi_pll_disable_sub(pll_7nm);
+	if (pll_7nm->slave) {
+		dsi_pll_disable_global_clk(pll_7nm->slave);
+		dsi_pll_disable_sub(pll_7nm->slave);
+	}
+	/* flush, ensure all register writes are done */
+	wmb();
+	pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+	struct dsi_pll_config *config = &pll_7nm->pll_configuration;
+	void __iomem *base = pll_7nm->mmio;
+	u64 ref_clk = pll_7nm->vco_ref_clk_rate;
+	u64 vco_rate = 0x0;
+	u64 multiplier;
+	u32 frac;
+	u32 dec;
+	u64 pll_freq, tmp64;
+
+	dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
+	dec &= 0xff;
+
+	frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+		  0xff) << 8);
+	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+		  0x3) << 16);
+
+	/*
+	 * TODO:
+	 *	1. Assumes prescaler is disabled
+	 */
+	multiplier = 1 << config->frac_bits;
+	pll_freq = dec * (ref_clk * 2);
+	tmp64 = (ref_clk * 2 * frac);
+	pll_freq += div_u64(tmp64, multiplier);
+
+	vco_rate = pll_freq;
+
+	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+	    pll_7nm->id, (unsigned long)vco_rate, dec, frac);
+
+	return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
+	.round_rate = msm_dsi_pll_helper_clk_round_rate,
+	.set_rate = dsi_pll_7nm_vco_set_rate,
+	.recalc_rate = dsi_pll_7nm_vco_recalc_rate,
+	.prepare = dsi_pll_7nm_vco_prepare,
+	.unprepare = dsi_pll_7nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+	u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+	cached->pll_out_div = pll_read(pll_7nm->mmio +
+				       REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+	cached->pll_out_div &= 0x3;
+
+	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+	cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+	    pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,
+	    cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+	u32 val;
+	int ret;
+
+	val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+	val &= ~0x3;
+	val |= cached->pll_out_div;
+	pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+		  cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+	val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+	val &= ~0x3;
+	val |= cached->pll_mux;
+	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
+
+	ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw, pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate);
+	if (ret) {
+		DRM_DEV_ERROR(&pll_7nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	DBG("DSI PLL%d", pll_7nm->id);
+
+	return 0;
+}
+
+static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,
+				    enum msm_dsi_phy_usecase uc)
+{
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+	void __iomem *base = pll_7nm->phy_cmn_mmio;
+	u32 data = 0x0;	/* internal PLL */
+
+	DBG("DSI PLL%d", pll_7nm->id);
+
+	switch (uc) {
+	case MSM_DSI_PHY_STANDALONE:
+		break;
+	case MSM_DSI_PHY_MASTER:
+		pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];
+		break;
+	case MSM_DSI_PHY_SLAVE:
+		data = 0x1; /* external PLL */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* set PLL src */
+	pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+	pll_7nm->uc = uc;
+
+	return 0;
+}
+
+static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,
+				     struct clk **byte_clk_provider,
+				     struct clk **pixel_clk_provider)
+{
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+	struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;
+
+	DBG("DSI PLL%d", pll_7nm->id);
+
+	if (byte_clk_provider)
+		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+	if (pixel_clk_provider)
+		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+	return 0;
+}
+
+static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)
+{
+	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+	struct device *dev = &pll_7nm->pdev->dev;
+
+	DBG("DSI PLL%d", pll_7nm->id);
+	of_clk_del_provider(dev->of_node);
+
+	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+	clk_hw_unregister(&pll_7nm->base.clk_hw);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
+{
+	char clk_name[32], parent[32], vco_name[32];
+	char parent2[32], parent3[32], parent4[32];
+	struct clk_init_data vco_init = {
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
+		.ops = &clk_ops_dsi_pll_7nm_vco,
+	};
+	struct device *dev = &pll_7nm->pdev->dev;
+	struct clk_hw_onecell_data *hw_data;
+	struct clk_hw *hw;
+	int ret;
+
+	DBG("DSI%d", pll_7nm->id);
+
+	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+			       GFP_KERNEL);
+	if (!hw_data)
+		return -ENOMEM;
+
+	snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);
+	pll_7nm->base.clk_hw.init = &vco_init;
+
+	ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);
+	if (ret)
+		return ret;
+
+	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+	snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);
+
+	hw = clk_hw_register_divider(dev, clk_name,
+				     parent, CLK_SET_RATE_PARENT,
+				     pll_7nm->mmio +
+				     REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_base_clk_hw;
+	}
+
+	pll_7nm->out_div_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+	/* BIT CLK: DIV_CTRL_3_0 */
+	hw = clk_hw_register_divider(dev, clk_name, parent,
+				     CLK_SET_RATE_PARENT,
+				     pll_7nm->phy_cmn_mmio +
+				     REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+				     0, 4, CLK_DIVIDER_ONE_BASED,
+				     &pll_7nm->postdiv_lock);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_out_div_clk_hw;
+	}
+
+	pll_7nm->bit_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  CLK_SET_RATE_PARENT, 1, 8);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_bit_clk_hw;
+	}
+
+	pll_7nm->byte_clk_hw = hw;
+	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  0, 1, 2);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_byte_clk_hw;
+	}
+
+	pll_7nm->by_2_bit_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+					  0, 1, 4);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_by_2_bit_clk_hw;
+	}
+
+	pll_7nm->post_out_div_clk_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);
+	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+
+	hw = clk_hw_register_mux(dev, clk_name,
+				 ((const char *[]){
+				 parent, parent2, parent3, parent4
+				 }), 4, 0, pll_7nm->phy_cmn_mmio +
+				 REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+				 0, 2, 0, NULL);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_post_out_div_clk_hw;
+	}
+
+	pll_7nm->pclk_mux_hw = hw;
+
+	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);
+	snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);
+
+	/* PIX CLK DIV : DIV_CTRL_7_4*/
+	hw = clk_hw_register_divider(dev, clk_name, parent,
+				     0, pll_7nm->phy_cmn_mmio +
+					REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+				     4, 4, CLK_DIVIDER_ONE_BASED,
+				     &pll_7nm->postdiv_lock);
+	if (IS_ERR(hw)) {
+		ret = PTR_ERR(hw);
+		goto err_pclk_mux_hw;
+	}
+
+	pll_7nm->out_dsiclk_hw = hw;
+	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+	hw_data->num = NUM_PROVIDED_CLKS;
+	pll_7nm->hw_data = hw_data;
+
+	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+				     pll_7nm->hw_data);
+	if (ret) {
+		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+		goto err_dsiclk_hw;
+	}
+
+	return 0;
+
+err_dsiclk_hw:
+	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+err_pclk_mux_hw:
+	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+err_post_out_div_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+err_by_2_bit_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+err_byte_clk_hw:
+	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+err_bit_clk_hw:
+	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+err_out_div_clk_hw:
+	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+err_base_clk_hw:
+	clk_hw_unregister(&pll_7nm->base.clk_hw);
+
+	return ret;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
+					enum msm_dsi_phy_type type, int id)
+{
+	struct dsi_pll_7nm *pll_7nm;
+	struct msm_dsi_pll *pll;
+	int ret;
+
+	pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
+	if (!pll_7nm)
+		return ERR_PTR(-ENOMEM);
+
+	DBG("DSI PLL%d", id);
+
+	pll_7nm->pdev = pdev;
+	pll_7nm->id = id;
+	pll_7nm_list[id] = pll_7nm;
+
+	pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+	if (IS_ERR_OR_NULL(pll_7nm->mmio)) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&pll_7nm->postdiv_lock);
+
+	pll = &pll_7nm->base;
+	pll->min_rate = 1000000000UL;
+	pll->max_rate = 3500000000UL;
+	if (type == MSM_DSI_PHY_7NM_V4_1) {
+		pll->min_rate = 600000000UL;
+		pll->max_rate = (unsigned long)5000000000ULL;
+		/* workaround for max rate overflowing on 32-bit builds: */
+		pll->max_rate = max(pll->max_rate, 0xffffffffUL);
+	}
+	pll->get_provider = dsi_pll_7nm_get_provider;
+	pll->destroy = dsi_pll_7nm_destroy;
+	pll->save_state = dsi_pll_7nm_save_state;
+	pll->restore_state = dsi_pll_7nm_restore_state;
+	pll->set_usecase = dsi_pll_7nm_set_usecase;
+
+	pll_7nm->vco_delay = 1;
+
+	ret = pll_7nm_register(pll_7nm);
+	if (ret) {
+		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	/* TODO: Remove this when we have proper display handover support */
+	msm_dsi_pll_save_state(pll);
+
+	return pll;
+}
+
 static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
 {
 	void __iomem *base = phy->base;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/phy/dsi_pll.c
similarity index 100%
rename from drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
rename to drivers/gpu/drm/msm/dsi/phy/dsi_pll.c
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/phy/dsi_pll.h
similarity index 100%
rename from drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
rename to drivers/gpu/drm/msm/dsi/phy/dsi_pll.h
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
deleted file mode 100644
index de3b802ccd3d..000000000000
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ /dev/null
@@ -1,881 +0,0 @@ 
-/*
- * SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/iopoll.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 10nm - clock diagram (eg: DSI0):
- *
- *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
- *                              |                |
- *                              |                |
- *                 +---------+  |  +----------+  |  +----+
- *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
- *                 +---------+  |  +----------+  |  +----+
- *                              |                |
- *                              |                |         dsi0_pll_by_2_bit_clk
- *                              |                |          |
- *                              |                |  +----+  |  |\  dsi0_pclk_mux
- *                              |                |--| /2 |--o--| \   |
- *                              |                |  +----+     |  \  |  +---------+
- *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
- *                              |------------------------------|  /     +---------+
- *                              |          +-----+             | /
- *                              -----------| /4? |--o----------|/
- *                                         +-----+  |           |
- *                                                  |           |dsiclk_sel
- *                                                  |
- *                                                  dsi0_pll_post_out_div_clk
- */
-
-#define DSI_BYTE_PLL_CLK		0
-#define DSI_PIXEL_PLL_CLK		1
-#define NUM_PROVIDED_CLKS		2
-
-#define VCO_REF_CLK_RATE		19200000
-
-struct dsi_pll_regs {
-	u32 pll_prop_gain_rate;
-	u32 pll_lockdet_rate;
-	u32 decimal_div_start;
-	u32 frac_div_start_low;
-	u32 frac_div_start_mid;
-	u32 frac_div_start_high;
-	u32 pll_clock_inverters;
-	u32 ssc_stepsize_low;
-	u32 ssc_stepsize_high;
-	u32 ssc_div_per_low;
-	u32 ssc_div_per_high;
-	u32 ssc_adjper_low;
-	u32 ssc_adjper_high;
-	u32 ssc_control;
-};
-
-struct dsi_pll_config {
-	u32 ref_freq;
-	bool div_override;
-	u32 output_div;
-	bool ignore_frac;
-	bool disable_prescaler;
-	bool enable_ssc;
-	bool ssc_center;
-	u32 dec_bits;
-	u32 frac_bits;
-	u32 lock_timer;
-	u32 ssc_freq;
-	u32 ssc_offset;
-	u32 ssc_adj_per;
-	u32 thresh_cycles;
-	u32 refclk_cycles;
-};
-
-struct pll_10nm_cached_state {
-	unsigned long vco_rate;
-	u8 bit_clk_div;
-	u8 pix_clk_div;
-	u8 pll_out_div;
-	u8 pll_mux;
-};
-
-struct dsi_pll_10nm {
-	struct msm_dsi_pll base;
-
-	int id;
-	struct platform_device *pdev;
-
-	void __iomem *phy_cmn_mmio;
-	void __iomem *mmio;
-
-	u64 vco_ref_clk_rate;
-	u64 vco_current_rate;
-
-	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
-	spinlock_t postdiv_lock;
-
-	int vco_delay;
-	struct dsi_pll_config pll_configuration;
-	struct dsi_pll_regs reg_setup;
-
-	/* private clocks: */
-	struct clk_hw *out_div_clk_hw;
-	struct clk_hw *bit_clk_hw;
-	struct clk_hw *byte_clk_hw;
-	struct clk_hw *by_2_bit_clk_hw;
-	struct clk_hw *post_out_div_clk_hw;
-	struct clk_hw *pclk_mux_hw;
-	struct clk_hw *out_dsiclk_hw;
-
-	/* clock-provider: */
-	struct clk_hw_onecell_data *hw_data;
-
-	struct pll_10nm_cached_state cached_state;
-
-	enum msm_dsi_phy_usecase uc;
-	struct dsi_pll_10nm *slave;
-};
-
-#define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, base)
-
-/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
- * mode, where the master PLL's clk_ops needs access the slave's private data
- */
-static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
-
-static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-
-	config->ref_freq = pll->vco_ref_clk_rate;
-	config->output_div = 1;
-	config->dec_bits = 8;
-	config->frac_bits = 18;
-	config->lock_timer = 64;
-	config->ssc_freq = 31500;
-	config->ssc_offset = 5000;
-	config->ssc_adj_per = 2;
-	config->thresh_cycles = 32;
-	config->refclk_cycles = 256;
-
-	config->div_override = false;
-	config->ignore_frac = false;
-	config->disable_prescaler = false;
-
-	config->enable_ssc = false;
-	config->ssc_center = 0;
-}
-
-static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u64 fref = pll->vco_ref_clk_rate;
-	u64 pll_freq;
-	u64 divider;
-	u64 dec, dec_multiple;
-	u32 frac;
-	u64 multiplier;
-
-	pll_freq = pll->vco_current_rate;
-
-	if (config->disable_prescaler)
-		divider = fref;
-	else
-		divider = fref * 2;
-
-	multiplier = 1 << config->frac_bits;
-	dec_multiple = div_u64(pll_freq * multiplier, divider);
-	dec = div_u64_rem(dec_multiple, multiplier, &frac);
-
-	if (pll_freq <= 1900000000UL)
-		regs->pll_prop_gain_rate = 8;
-	else if (pll_freq <= 3000000000UL)
-		regs->pll_prop_gain_rate = 10;
-	else
-		regs->pll_prop_gain_rate = 12;
-	if (pll_freq < 1100000000UL)
-		regs->pll_clock_inverters = 8;
-	else
-		regs->pll_clock_inverters = 0;
-
-	regs->pll_lockdet_rate = config->lock_timer;
-	regs->decimal_div_start = dec;
-	regs->frac_div_start_low = (frac & 0xff);
-	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
-	regs->frac_div_start_high = (frac & 0x30000) >> 16;
-}
-
-#define SSC_CENTER		BIT(0)
-#define SSC_EN			BIT(1)
-
-static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u32 ssc_per;
-	u32 ssc_mod;
-	u64 ssc_step_size;
-	u64 frac;
-
-	if (!config->enable_ssc) {
-		DBG("SSC not enabled\n");
-		return;
-	}
-
-	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
-	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
-	ssc_per -= ssc_mod;
-
-	frac = regs->frac_div_start_low |
-			(regs->frac_div_start_mid << 8) |
-			(regs->frac_div_start_high << 16);
-	ssc_step_size = regs->decimal_div_start;
-	ssc_step_size *= (1 << config->frac_bits);
-	ssc_step_size += frac;
-	ssc_step_size *= config->ssc_offset;
-	ssc_step_size *= (config->ssc_adj_per + 1);
-	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
-	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
-
-	regs->ssc_div_per_low = ssc_per & 0xFF;
-	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
-	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
-	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
-	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
-	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
-
-	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
-
-	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
-		 regs->decimal_div_start, frac, config->frac_bits);
-	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
-		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
-}
-
-static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
-{
-	void __iomem *base = pll->mmio;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-
-	if (pll->pll_configuration.enable_ssc) {
-		pr_debug("SSC is enabled\n");
-
-		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
-			  regs->ssc_stepsize_low);
-		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
-			  regs->ssc_stepsize_high);
-		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
-			  regs->ssc_div_per_low);
-		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
-			  regs->ssc_div_per_high);
-		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
-			  regs->ssc_adjper_low);
-		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
-			  regs->ssc_adjper_high);
-		pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
-			  SSC_EN | regs->ssc_control);
-	}
-}
-
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
-{
-	void __iomem *base = pll->mmio;
-
-	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
-		  0xba);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
-		  0x4c);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
-}
-
-static void dsi_pll_commit(struct dsi_pll_10nm *pll)
-{
-	void __iomem *base = pll->mmio;
-	struct dsi_pll_regs *reg = &pll->reg_setup;
-
-	pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
-		  reg->decimal_div_start);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
-		  reg->frac_div_start_low);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
-		  reg->frac_div_start_mid);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
-		  reg->frac_div_start_high);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
-		  reg->pll_lockdet_rate);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
-	pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
-		  reg->pll_clock_inverters);
-}
-
-static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
-				     unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-
-	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
-	    parent_rate);
-
-	pll_10nm->vco_current_rate = rate;
-	pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
-
-	dsi_pll_setup_config(pll_10nm);
-
-	dsi_pll_calc_dec_frac(pll_10nm);
-
-	dsi_pll_calc_ssc(pll_10nm);
-
-	dsi_pll_commit(pll_10nm);
-
-	dsi_pll_config_hzindep_reg(pll_10nm);
-
-	dsi_pll_ssc_commit(pll_10nm);
-
-	/* flush, ensure all register writes are done*/
-	wmb();
-
-	return 0;
-}
-
-static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
-{
-	struct device *dev = &pll->pdev->dev;
-	int rc;
-	u32 status = 0;
-	u32 const delay_us = 100;
-	u32 const timeout_us = 5000;
-
-	rc = readl_poll_timeout_atomic(pll->mmio +
-				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
-				       status,
-				       ((status & BIT(0)) > 0),
-				       delay_us,
-				       timeout_us);
-	if (rc)
-		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
-			      pll->id, status);
-
-	return rc;
-}
-
-static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
-{
-	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
-
-	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
-	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
-		  data & ~BIT(5));
-	ndelay(250);
-}
-
-static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
-{
-	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
-
-	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
-		  data | BIT(5));
-	pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
-	ndelay(250);
-}
-
-static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
-{
-	u32 data;
-
-	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
-		  data & ~BIT(5));
-}
-
-static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
-{
-	u32 data;
-
-	data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
-		  data | BIT(5));
-}
-
-static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-	struct device *dev = &pll_10nm->pdev->dev;
-	int rc;
-
-	dsi_pll_enable_pll_bias(pll_10nm);
-	if (pll_10nm->slave)
-		dsi_pll_enable_pll_bias(pll_10nm->slave);
-
-	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
-	if (rc) {
-		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
-		return rc;
-	}
-
-	/* Start PLL */
-	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
-		  0x01);
-
-	/*
-	 * ensure all PLL configurations are written prior to checking
-	 * for PLL lock.
-	 */
-	wmb();
-
-	/* Check for PLL lock */
-	rc = dsi_pll_10nm_lock_status(pll_10nm);
-	if (rc) {
-		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
-		goto error;
-	}
-
-	pll->pll_on = true;
-
-	dsi_pll_enable_global_clk(pll_10nm);
-	if (pll_10nm->slave)
-		dsi_pll_enable_global_clk(pll_10nm->slave);
-
-	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
-		  0x01);
-	if (pll_10nm->slave)
-		pll_write(pll_10nm->slave->phy_cmn_mmio +
-			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
-
-error:
-	return rc;
-}
-
-static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
-{
-	pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
-	dsi_pll_disable_pll_bias(pll);
-}
-
-static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-
-	/*
-	 * To avoid any stray glitches while abruptly powering down the PLL
-	 * make sure to gate the clock using the clock enable bit before
-	 * powering down the PLL
-	 */
-	dsi_pll_disable_global_clk(pll_10nm);
-	pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
-	dsi_pll_disable_sub(pll_10nm);
-	if (pll_10nm->slave) {
-		dsi_pll_disable_global_clk(pll_10nm->slave);
-		dsi_pll_disable_sub(pll_10nm->slave);
-	}
-	/* flush, ensure all register writes are done */
-	wmb();
-	pll->pll_on = false;
-}
-
-static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
-						  unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-	struct dsi_pll_config *config = &pll_10nm->pll_configuration;
-	void __iomem *base = pll_10nm->mmio;
-	u64 ref_clk = pll_10nm->vco_ref_clk_rate;
-	u64 vco_rate = 0x0;
-	u64 multiplier;
-	u32 frac;
-	u32 dec;
-	u64 pll_freq, tmp64;
-
-	dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
-	dec &= 0xff;
-
-	frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
-	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
-		  0xff) << 8);
-	frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
-		  0x3) << 16);
-
-	/*
-	 * TODO:
-	 *	1. Assumes prescaler is disabled
-	 */
-	multiplier = 1 << config->frac_bits;
-	pll_freq = dec * (ref_clk * 2);
-	tmp64 = (ref_clk * 2 * frac);
-	pll_freq += div_u64(tmp64, multiplier);
-
-	vco_rate = pll_freq;
-
-	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
-	    pll_10nm->id, (unsigned long)vco_rate, dec, frac);
-
-	return (unsigned long)vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
-	.round_rate = msm_dsi_pll_helper_clk_round_rate,
-	.set_rate = dsi_pll_10nm_vco_set_rate,
-	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
-	.prepare = dsi_pll_10nm_vco_prepare,
-	.unprepare = dsi_pll_10nm_vco_unprepare,
-};
-
-/*
- * PLL Callbacks
- */
-
-static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
-	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
-	u32 cmn_clk_cfg0, cmn_clk_cfg1;
-
-	cached->pll_out_div = pll_read(pll_10nm->mmio +
-				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
-	cached->pll_out_div &= 0x3;
-
-	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
-	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
-	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
-
-	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-	cached->pll_mux = cmn_clk_cfg1 & 0x3;
-
-	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
-	    pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
-	    cached->pix_clk_div, cached->pll_mux);
-}
-
-static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
-	void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
-	u32 val;
-	int ret;
-
-	val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
-	val &= ~0x3;
-	val |= cached->pll_out_div;
-	pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
-
-	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
-		  cached->bit_clk_div | (cached->pix_clk_div << 4));
-
-	val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-	val &= ~0x3;
-	val |= cached->pll_mux;
-	pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
-
-	ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
-	if (ret) {
-		DRM_DEV_ERROR(&pll_10nm->pdev->dev,
-			"restore vco rate failed. ret=%d\n", ret);
-		return ret;
-	}
-
-	DBG("DSI PLL%d", pll_10nm->id);
-
-	return 0;
-}
-
-static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
-				    enum msm_dsi_phy_usecase uc)
-{
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-	void __iomem *base = pll_10nm->phy_cmn_mmio;
-	u32 data = 0x0;	/* internal PLL */
-
-	DBG("DSI PLL%d", pll_10nm->id);
-
-	switch (uc) {
-	case MSM_DSI_PHY_STANDALONE:
-		break;
-	case MSM_DSI_PHY_MASTER:
-		pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
-		break;
-	case MSM_DSI_PHY_SLAVE:
-		data = 0x1; /* external PLL */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* set PLL src */
-	pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
-
-	pll_10nm->uc = uc;
-
-	return 0;
-}
-
-static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
-				     struct clk **byte_clk_provider,
-				     struct clk **pixel_clk_provider)
-{
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-	struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
-
-	DBG("DSI PLL%d", pll_10nm->id);
-
-	if (byte_clk_provider)
-		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
-	if (pixel_clk_provider)
-		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
-
-	return 0;
-}
-
-static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-	struct device *dev = &pll_10nm->pdev->dev;
-
-	DBG("DSI PLL%d", pll_10nm->id);
-	of_clk_del_provider(dev->of_node);
-
-	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
-	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
-	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
-	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
-	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
-	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
-	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
-	clk_hw_unregister(&pll_10nm->base.clk_hw);
-}
-
-/*
- * The post dividers and mux clocks are created using the standard divider and
- * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
- * state to follow the master PLL's divider/mux state. Therefore, we don't
- * require special clock ops that also configure the slave PLL registers
- */
-static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
-{
-	char clk_name[32], parent[32], vco_name[32];
-	char parent2[32], parent3[32], parent4[32];
-	struct clk_init_data vco_init = {
-		.parent_names = (const char *[]){ "xo" },
-		.num_parents = 1,
-		.name = vco_name,
-		.flags = CLK_IGNORE_UNUSED,
-		.ops = &clk_ops_dsi_pll_10nm_vco,
-	};
-	struct device *dev = &pll_10nm->pdev->dev;
-	struct clk_hw_onecell_data *hw_data;
-	struct clk_hw *hw;
-	int ret;
-
-	DBG("DSI%d", pll_10nm->id);
-
-	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
-			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
-			       GFP_KERNEL);
-	if (!hw_data)
-		return -ENOMEM;
-
-	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
-	pll_10nm->base.clk_hw.init = &vco_init;
-
-	ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
-	if (ret)
-		return ret;
-
-	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
-
-	hw = clk_hw_register_divider(dev, clk_name,
-				     parent, CLK_SET_RATE_PARENT,
-				     pll_10nm->mmio +
-				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
-				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_base_clk_hw;
-	}
-
-	pll_10nm->out_div_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-
-	/* BIT CLK: DIV_CTRL_3_0 */
-	hw = clk_hw_register_divider(dev, clk_name, parent,
-				     CLK_SET_RATE_PARENT,
-				     pll_10nm->phy_cmn_mmio +
-				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
-				     0, 4, CLK_DIVIDER_ONE_BASED,
-				     &pll_10nm->postdiv_lock);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_out_div_clk_hw;
-	}
-
-	pll_10nm->bit_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
-	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-
-	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-					  CLK_SET_RATE_PARENT, 1, 8);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_bit_clk_hw;
-	}
-
-	pll_10nm->byte_clk_hw = hw;
-	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
-	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-					  0, 1, 2);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_byte_clk_hw;
-	}
-
-	pll_10nm->by_2_bit_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
-	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-					  0, 1, 4);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_by_2_bit_clk_hw;
-	}
-
-	pll_10nm->post_out_div_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
-	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
-	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
-
-	hw = clk_hw_register_mux(dev, clk_name,
-				 ((const char *[]){
-				 parent, parent2, parent3, parent4
-				 }), 4, 0, pll_10nm->phy_cmn_mmio +
-				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
-				 0, 2, 0, NULL);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_post_out_div_clk_hw;
-	}
-
-	pll_10nm->pclk_mux_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
-	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
-
-	/* PIX CLK DIV : DIV_CTRL_7_4*/
-	hw = clk_hw_register_divider(dev, clk_name, parent,
-				     0, pll_10nm->phy_cmn_mmio +
-					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
-				     4, 4, CLK_DIVIDER_ONE_BASED,
-				     &pll_10nm->postdiv_lock);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_pclk_mux_hw;
-	}
-
-	pll_10nm->out_dsiclk_hw = hw;
-	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
-
-	hw_data->num = NUM_PROVIDED_CLKS;
-	pll_10nm->hw_data = hw_data;
-
-	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
-				     pll_10nm->hw_data);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-		goto err_dsiclk_hw;
-	}
-
-	return 0;
-
-err_dsiclk_hw:
-	clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
-err_pclk_mux_hw:
-	clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
-err_post_out_div_clk_hw:
-	clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
-err_by_2_bit_clk_hw:
-	clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
-err_byte_clk_hw:
-	clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
-err_bit_clk_hw:
-	clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
-err_out_div_clk_hw:
-	clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
-err_base_clk_hw:
-	clk_hw_unregister(&pll_10nm->base.clk_hw);
-
-	return ret;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
-{
-	struct dsi_pll_10nm *pll_10nm;
-	struct msm_dsi_pll *pll;
-	int ret;
-
-	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
-	if (!pll_10nm)
-		return ERR_PTR(-ENOMEM);
-
-	DBG("DSI PLL%d", id);
-
-	pll_10nm->pdev = pdev;
-	pll_10nm->id = id;
-	pll_10nm_list[id] = pll_10nm;
-
-	pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-	if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-	if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	spin_lock_init(&pll_10nm->postdiv_lock);
-
-	pll = &pll_10nm->base;
-	pll->min_rate = 1000000000UL;
-	pll->max_rate = 3500000000UL;
-	pll->get_provider = dsi_pll_10nm_get_provider;
-	pll->destroy = dsi_pll_10nm_destroy;
-	pll->save_state = dsi_pll_10nm_save_state;
-	pll->restore_state = dsi_pll_10nm_restore_state;
-	pll->set_usecase = dsi_pll_10nm_set_usecase;
-
-	pll_10nm->vco_delay = 1;
-
-	ret = pll_10nm_register(pll_10nm);
-	if (ret) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	/* TODO: Remove this when we have proper display handover support */
-	msm_dsi_pll_save_state(pll);
-
-	return pll;
-}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
deleted file mode 100644
index f847376d501e..000000000000
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+++ /dev/null
@@ -1,1096 +0,0 @@ 
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 14nm - clock diagram (eg: DSI0):
- *
- *         dsi0n1_postdiv_clk
- *                         |
- *                         |
- *                 +----+  |  +----+
- *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
- *                 +----+  |  +----+
- *                         |           dsi0n1_postdivby2_clk
- *                         |   +----+  |
- *                         o---| /2 |--o--|\
- *                         |   +----+     | \   +----+
- *                         |              |  |--| n2 |-- dsi0pll
- *                         o--------------| /   +----+
- *                                        |/
- */
-
-#define POLL_MAX_READS			15
-#define POLL_TIMEOUT_US			1000
-
-#define NUM_PROVIDED_CLKS		2
-
-#define VCO_REF_CLK_RATE		19200000
-#define VCO_MIN_RATE			1300000000UL
-#define VCO_MAX_RATE			2600000000UL
-
-#define DSI_BYTE_PLL_CLK		0
-#define DSI_PIXEL_PLL_CLK		1
-
-#define DSI_PLL_DEFAULT_VCO_POSTDIV	1
-
-struct dsi_pll_input {
-	u32 fref;	/* reference clk */
-	u32 fdata;	/* bit clock rate */
-	u32 dsiclk_sel; /* Mux configuration (see diagram) */
-	u32 ssc_en;	/* SSC enable/disable */
-	u32 ldo_en;
-
-	/* fixed params */
-	u32 refclk_dbler_en;
-	u32 vco_measure_time;
-	u32 kvco_measure_time;
-	u32 bandgap_timer;
-	u32 pll_wakeup_timer;
-	u32 plllock_cnt;
-	u32 plllock_rng;
-	u32 ssc_center;
-	u32 ssc_adj_period;
-	u32 ssc_spread;
-	u32 ssc_freq;
-	u32 pll_ie_trim;
-	u32 pll_ip_trim;
-	u32 pll_iptat_trim;
-	u32 pll_cpcset_cur;
-	u32 pll_cpmset_cur;
-
-	u32 pll_icpmset;
-	u32 pll_icpcset;
-
-	u32 pll_icpmset_p;
-	u32 pll_icpmset_m;
-
-	u32 pll_icpcset_p;
-	u32 pll_icpcset_m;
-
-	u32 pll_lpf_res1;
-	u32 pll_lpf_cap1;
-	u32 pll_lpf_cap2;
-	u32 pll_c3ctrl;
-	u32 pll_r3ctrl;
-};
-
-struct dsi_pll_output {
-	u32 pll_txclk_en;
-	u32 dec_start;
-	u32 div_frac_start;
-	u32 ssc_period;
-	u32 ssc_step_size;
-	u32 plllock_cmp;
-	u32 pll_vco_div_ref;
-	u32 pll_vco_count;
-	u32 pll_kvco_div_ref;
-	u32 pll_kvco_count;
-	u32 pll_misc1;
-	u32 pll_lpf2_postdiv;
-	u32 pll_resetsm_cntrl;
-	u32 pll_resetsm_cntrl2;
-	u32 pll_resetsm_cntrl5;
-	u32 pll_kvco_code;
-
-	u32 cmn_clk_cfg0;
-	u32 cmn_clk_cfg1;
-	u32 cmn_ldo_cntrl;
-
-	u32 pll_postdiv;
-	u32 fcvo;
-};
-
-struct pll_14nm_cached_state {
-	unsigned long vco_rate;
-	u8 n2postdiv;
-	u8 n1postdiv;
-};
-
-struct dsi_pll_14nm {
-	struct msm_dsi_pll base;
-
-	int id;
-	struct platform_device *pdev;
-
-	void __iomem *phy_cmn_mmio;
-	void __iomem *mmio;
-
-	int vco_delay;
-
-	struct dsi_pll_input in;
-	struct dsi_pll_output out;
-
-	/* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
-	spinlock_t postdiv_lock;
-
-	u64 vco_current_rate;
-	u64 vco_ref_clk_rate;
-
-	/* private clocks: */
-	struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
-	u32 num_hws;
-
-	/* clock-provider: */
-	struct clk_hw_onecell_data *hw_data;
-
-	struct pll_14nm_cached_state cached_state;
-
-	enum msm_dsi_phy_usecase uc;
-	struct dsi_pll_14nm *slave;
-};
-
-#define to_pll_14nm(x)	container_of(x, struct dsi_pll_14nm, base)
-
-/*
- * Private struct for N1/N2 post-divider clocks. These clocks are similar to
- * the generic clk_divider class of clocks. The only difference is that it
- * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
- */
-struct dsi_pll_14nm_postdiv {
-	struct clk_hw hw;
-
-	/* divider params */
-	u8 shift;
-	u8 width;
-	u8 flags; /* same flags as used by clk_divider struct */
-
-	struct dsi_pll_14nm *pll;
-};
-
-#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
-
-/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
- * mode, where the master PLL's clk_ops needs access the slave's private data
- */
-static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
-
-static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
-				    u32 nb_tries, u32 timeout_us)
-{
-	bool pll_locked = false;
-	void __iomem *base = pll_14nm->mmio;
-	u32 tries, val;
-
-	tries = nb_tries;
-	while (tries--) {
-		val = pll_read(base +
-			       REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
-		pll_locked = !!(val & BIT(5));
-
-		if (pll_locked)
-			break;
-
-		udelay(timeout_us);
-	}
-
-	if (!pll_locked) {
-		tries = nb_tries;
-		while (tries--) {
-			val = pll_read(base +
-				REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
-			pll_locked = !!(val & BIT(0));
-
-			if (pll_locked)
-				break;
-
-			udelay(timeout_us);
-		}
-	}
-
-	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
-
-	return pll_locked;
-}
-
-static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)
-{
-	pll->in.fref = pll->vco_ref_clk_rate;
-	pll->in.fdata = 0;
-	pll->in.dsiclk_sel = 1;	/* Use the /2 path in Mux */
-	pll->in.ldo_en = 0;	/* disabled for now */
-
-	/* fixed input */
-	pll->in.refclk_dbler_en = 0;
-	pll->in.vco_measure_time = 5;
-	pll->in.kvco_measure_time = 5;
-	pll->in.bandgap_timer = 4;
-	pll->in.pll_wakeup_timer = 5;
-	pll->in.plllock_cnt = 1;
-	pll->in.plllock_rng = 0;
-
-	/*
-	 * SSC is enabled by default. We might need DT props for configuring
-	 * some SSC params like PPM and center/down spread etc.
-	 */
-	pll->in.ssc_en = 1;
-	pll->in.ssc_center = 0;		/* down spread by default */
-	pll->in.ssc_spread = 5;		/* PPM / 1000 */
-	pll->in.ssc_freq = 31500;	/* default recommended */
-	pll->in.ssc_adj_period = 37;
-
-	pll->in.pll_ie_trim = 4;
-	pll->in.pll_ip_trim = 4;
-	pll->in.pll_cpcset_cur = 1;
-	pll->in.pll_cpmset_cur = 1;
-	pll->in.pll_icpmset = 4;
-	pll->in.pll_icpcset = 4;
-	pll->in.pll_icpmset_p = 0;
-	pll->in.pll_icpmset_m = 0;
-	pll->in.pll_icpcset_p = 0;
-	pll->in.pll_icpcset_m = 0;
-	pll->in.pll_lpf_res1 = 3;
-	pll->in.pll_lpf_cap1 = 11;
-	pll->in.pll_lpf_cap2 = 1;
-	pll->in.pll_iptat_trim = 7;
-	pll->in.pll_c3ctrl = 2;
-	pll->in.pll_r3ctrl = 1;
-}
-
-#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
-
-static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)
-{
-	u32 period, ssc_period;
-	u32 ref, rem;
-	u64 step_size;
-
-	DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate);
-
-	ssc_period = pll->in.ssc_freq / 500;
-	period = (u32)pll->vco_ref_clk_rate / 1000;
-	ssc_period  = CEIL(period, ssc_period);
-	ssc_period -= 1;
-	pll->out.ssc_period = ssc_period;
-
-	DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,
-	    pll->in.ssc_spread, pll->out.ssc_period);
-
-	step_size = (u32)pll->vco_current_rate;
-	ref = pll->vco_ref_clk_rate;
-	ref /= 1000;
-	step_size = div_u64(step_size, ref);
-	step_size <<= 20;
-	step_size = div_u64(step_size, 1000);
-	step_size *= pll->in.ssc_spread;
-	step_size = div_u64(step_size, 1000);
-	step_size *= (pll->in.ssc_adj_period + 1);
-
-	rem = 0;
-	step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
-	if (rem)
-		step_size++;
-
-	DBG("step_size=%lld", step_size);
-
-	step_size &= 0x0ffff;	/* take lower 16 bits */
-
-	pll->out.ssc_step_size = step_size;
-}
-
-static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)
-{
-	struct dsi_pll_input *pin = &pll->in;
-	struct dsi_pll_output *pout = &pll->out;
-	u64 multiplier = BIT(20);
-	u64 dec_start_multiple, dec_start, pll_comp_val;
-	u32 duration, div_frac_start;
-	u64 vco_clk_rate = pll->vco_current_rate;
-	u64 fref = pll->vco_ref_clk_rate;
-
-	DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
-
-	dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
-	div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
-
-	dec_start = div_u64(dec_start_multiple, multiplier);
-
-	pout->dec_start = (u32)dec_start;
-	pout->div_frac_start = div_frac_start;
-
-	if (pin->plllock_cnt == 0)
-		duration = 1024;
-	else if (pin->plllock_cnt == 1)
-		duration = 256;
-	else if (pin->plllock_cnt == 2)
-		duration = 128;
-	else
-		duration = 32;
-
-	pll_comp_val = duration * dec_start_multiple;
-	pll_comp_val = div_u64(pll_comp_val, multiplier);
-	do_div(pll_comp_val, 10);
-
-	pout->plllock_cmp = (u32)pll_comp_val;
-
-	pout->pll_txclk_en = 1;
-	pout->cmn_ldo_cntrl = 0x3c;
-}
-
-static u32 pll_14nm_kvco_slop(u32 vrate)
-{
-	u32 slop = 0;
-
-	if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
-		slop =  600;
-	else if (vrate > 1800000000UL && vrate < 2300000000UL)
-		slop = 400;
-	else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
-		slop = 280;
-
-	return slop;
-}
-
-static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)
-{
-	struct dsi_pll_input *pin = &pll->in;
-	struct dsi_pll_output *pout = &pll->out;
-	u64 vco_clk_rate = pll->vco_current_rate;
-	u64 fref = pll->vco_ref_clk_rate;
-	u64 data;
-	u32 cnt;
-
-	data = fref * pin->vco_measure_time;
-	do_div(data, 1000000);
-	data &= 0x03ff;	/* 10 bits */
-	data -= 2;
-	pout->pll_vco_div_ref = data;
-
-	data = div_u64(vco_clk_rate, 1000000);	/* unit is Mhz */
-	data *= pin->vco_measure_time;
-	do_div(data, 10);
-	pout->pll_vco_count = data;
-
-	data = fref * pin->kvco_measure_time;
-	do_div(data, 1000000);
-	data &= 0x03ff;	/* 10 bits */
-	data -= 1;
-	pout->pll_kvco_div_ref = data;
-
-	cnt = pll_14nm_kvco_slop(vco_clk_rate);
-	cnt *= 2;
-	cnt /= 100;
-	cnt *= pin->kvco_measure_time;
-	pout->pll_kvco_count = cnt;
-
-	pout->pll_misc1 = 16;
-	pout->pll_resetsm_cntrl = 48;
-	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
-	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
-	pout->pll_kvco_code = 0;
-}
-
-static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)
-{
-	void __iomem *base = pll->mmio;
-	struct dsi_pll_input *pin = &pll->in;
-	struct dsi_pll_output *pout = &pll->out;
-	u8 data;
-
-	data = pin->ssc_adj_period;
-	data &= 0x0ff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
-	data = (pin->ssc_adj_period >> 8);
-	data &= 0x03;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
-
-	data = pout->ssc_period;
-	data &= 0x0ff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
-	data = (pout->ssc_period >> 8);
-	data &= 0x0ff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
-
-	data = pout->ssc_step_size;
-	data &= 0x0ff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
-	data = (pout->ssc_step_size >> 8);
-	data &= 0x0ff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
-
-	data = (pin->ssc_center & 0x01);
-	data <<= 1;
-	data |= 0x01; /* enable */
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
-
-	wmb();	/* make sure register committed */
-}
-
-static void pll_db_commit_common(struct dsi_pll_14nm *pll,
-				 struct dsi_pll_input *pin,
-				 struct dsi_pll_output *pout)
-{
-	void __iomem *base = pll->mmio;
-	u8 data;
-
-	/* confgiure the non frequency dependent pll registers */
-	data = 0;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
-
-	data = pout->pll_txclk_en;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);
-
-	data = pout->pll_resetsm_cntrl;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);
-	data = pout->pll_resetsm_cntrl2;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);
-	data = pout->pll_resetsm_cntrl5;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);
-
-	data = pout->pll_vco_div_ref & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
-	data = (pout->pll_vco_div_ref >> 8) & 0x3;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
-
-	data = pout->pll_kvco_div_ref & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
-	data = (pout->pll_kvco_div_ref >> 8) & 0x3;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
-
-	data = pout->pll_misc1;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);
-
-	data = pin->pll_ie_trim;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);
-
-	data = pin->pll_ip_trim;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);
-
-	data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);
-
-	data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);
-
-	data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);
-
-	data = pin->pll_icpmset << 3 | pin->pll_icpcset;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);
-
-	data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);
-
-	data = pin->pll_iptat_trim;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);
-
-	data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);
-}
-
-static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
-{
-	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-
-	/* de assert pll start and apply pll sw reset */
-
-	/* stop pll */
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
-
-	/* pll sw reset */
-	pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
-	wmb();	/* make sure register committed */
-
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
-	wmb();	/* make sure register committed */
-}
-
-static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
-			       struct dsi_pll_input *pin,
-			       struct dsi_pll_output *pout)
-{
-	void __iomem *base = pll->mmio;
-	void __iomem *cmn_base = pll->phy_cmn_mmio;
-	u8 data;
-
-	DBG("DSI%d PLL", pll->id);
-
-	data = pout->cmn_ldo_cntrl;
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
-
-	pll_db_commit_common(pll, pin, pout);
-
-	pll_14nm_software_reset(pll);
-
-	data = pin->dsiclk_sel; /* set dsiclk_sel = 1  */
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);
-
-	data = 0xff; /* data, clk, pll normal operation */
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
-
-	/* configure the frequency dependent pll registers */
-	data = pout->dec_start;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
-
-	data = pout->div_frac_start & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
-	data = (pout->div_frac_start >> 8) & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
-	data = (pout->div_frac_start >> 16) & 0xf;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
-
-	data = pout->plllock_cmp & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
-
-	data = (pout->plllock_cmp >> 8) & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
-
-	data = (pout->plllock_cmp >> 16) & 0x3;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
-
-	data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
-
-	data = pout->pll_vco_count & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
-	data = (pout->pll_vco_count >> 8) & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
-
-	data = pout->pll_kvco_count & 0xff;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
-	data = (pout->pll_kvco_count >> 8) & 0x3;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
-
-	data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;
-	pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);
-
-	if (pin->ssc_en)
-		pll_db_commit_ssc(pll);
-
-	wmb();	/* make sure register committed */
-}
-
-/*
- * VCO clock Callbacks
- */
-static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
-				     unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	struct dsi_pll_input *pin = &pll_14nm->in;
-	struct dsi_pll_output *pout = &pll_14nm->out;
-
-	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,
-	    parent_rate);
-
-	pll_14nm->vco_current_rate = rate;
-	pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
-
-	dsi_pll_14nm_input_init(pll_14nm);
-
-	/*
-	 * This configures the post divider internal to the VCO. It's
-	 * fixed to divide by 1 for now.
-	 *
-	 * tx_band = pll_postdiv.
-	 * 0: divided by 1
-	 * 1: divided by 2
-	 * 2: divided by 4
-	 * 3: divided by 8
-	 */
-	pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;
-
-	pll_14nm_dec_frac_calc(pll_14nm);
-
-	if (pin->ssc_en)
-		pll_14nm_ssc_calc(pll_14nm);
-
-	pll_14nm_calc_vco_count(pll_14nm);
-
-	/* commit the slave DSI PLL registers if we're master. Note that we
-	 * don't lock the slave PLL. We just ensure that the PLL/PHY registers
-	 * of the master and slave are identical
-	 */
-	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
-		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
-
-		pll_db_commit_14nm(pll_14nm_slave, pin, pout);
-	}
-
-	pll_db_commit_14nm(pll_14nm, pin, pout);
-
-	return 0;
-}
-
-static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
-						  unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	void __iomem *base = pll_14nm->mmio;
-	u64 vco_rate, multiplier = BIT(20);
-	u32 div_frac_start;
-	u32 dec_start;
-	u64 ref_clk = parent_rate;
-
-	dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
-	dec_start &= 0x0ff;
-
-	DBG("dec_start = %x", dec_start);
-
-	div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
-				& 0xf) << 16;
-	div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
-				& 0xff) << 8;
-	div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
-				& 0xff;
-
-	DBG("div_frac_start = %x", div_frac_start);
-
-	vco_rate = ref_clk * dec_start;
-
-	vco_rate += ((ref_clk * div_frac_start) / multiplier);
-
-	/*
-	 * Recalculating the rate from dec_start and frac_start doesn't end up
-	 * the rate we originally set. Convert the freq to KHz, round it up and
-	 * convert it back to MHz.
-	 */
-	vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
-
-	DBG("returning vco rate = %lu", (unsigned long)vco_rate);
-
-	return (unsigned long)vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
-	.round_rate = msm_dsi_pll_helper_clk_round_rate,
-	.set_rate = dsi_pll_14nm_vco_set_rate,
-	.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
-	.prepare = msm_dsi_pll_helper_clk_prepare,
-	.unprepare = msm_dsi_pll_helper_clk_unprepare,
-};
-
-/*
- * N1 and N2 post-divider clock callbacks
- */
-#define div_mask(width)	((1 << (width)) - 1)
-static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
-						      unsigned long parent_rate)
-{
-	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
-	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
-	void __iomem *base = pll_14nm->phy_cmn_mmio;
-	u8 shift = postdiv->shift;
-	u8 width = postdiv->width;
-	u32 val;
-
-	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);
-
-	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
-	val &= div_mask(width);
-
-	return divider_recalc_rate(hw, parent_rate, val, NULL,
-				   postdiv->flags, width);
-}
-
-static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
-					    unsigned long rate,
-					    unsigned long *prate)
-{
-	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
-	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
-
-	DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);
-
-	return divider_round_rate(hw, rate, prate, NULL,
-				  postdiv->width,
-				  postdiv->flags);
-}
-
-static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
-					 unsigned long parent_rate)
-{
-	struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
-	struct dsi_pll_14nm *pll_14nm = postdiv->pll;
-	void __iomem *base = pll_14nm->phy_cmn_mmio;
-	spinlock_t *lock = &pll_14nm->postdiv_lock;
-	u8 shift = postdiv->shift;
-	u8 width = postdiv->width;
-	unsigned int value;
-	unsigned long flags = 0;
-	u32 val;
-
-	DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,
-	    parent_rate);
-
-	value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
-				postdiv->flags);
-
-	spin_lock_irqsave(lock, flags);
-
-	val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
-	val &= ~(div_mask(width) << shift);
-
-	val |= value << shift;
-	pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
-
-	/* If we're master in dual DSI mode, then the slave PLL's post-dividers
-	 * follow the master's post dividers
-	 */
-	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
-		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
-		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
-
-		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
-	}
-
-	spin_unlock_irqrestore(lock, flags);
-
-	return 0;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
-	.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
-	.round_rate = dsi_pll_14nm_postdiv_round_rate,
-	.set_rate = dsi_pll_14nm_postdiv_set_rate,
-};
-
-/*
- * PLL Callbacks
- */
-
-static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	void __iomem *base = pll_14nm->mmio;
-	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-	bool locked;
-
-	DBG("");
-
-	pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
-
-	locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
-					 POLL_TIMEOUT_US);
-
-	if (unlikely(!locked))
-		DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
-	else
-		DBG("DSI PLL lock success");
-
-	return locked ? 0 : -EINVAL;
-}
-
-static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-
-	DBG("");
-
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
-}
-
-static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
-	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-	u32 data;
-
-	data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
-
-	cached_state->n1postdiv = data & 0xf;
-	cached_state->n2postdiv = (data >> 4) & 0xf;
-
-	DBG("DSI%d PLL save state %x %x", pll_14nm->id,
-	    cached_state->n1postdiv, cached_state->n2postdiv);
-
-	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
-}
-
-static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
-	void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-	u32 data;
-	int ret;
-
-	ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
-					cached_state->vco_rate, 0);
-	if (ret) {
-		DRM_DEV_ERROR(&pll_14nm->pdev->dev,
-			"restore vco rate failed. ret=%d\n", ret);
-		return ret;
-	}
-
-	data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
-
-	DBG("DSI%d PLL restore state %x %x", pll_14nm->id,
-	    cached_state->n1postdiv, cached_state->n2postdiv);
-
-	pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
-
-	/* also restore post-dividers for slave DSI PLL */
-	if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
-		struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
-		void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
-
-		pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
-	}
-
-	return 0;
-}
-
-static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,
-				    enum msm_dsi_phy_usecase uc)
-{
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	void __iomem *base = pll_14nm->mmio;
-	u32 clkbuflr_en, bandgap = 0;
-
-	switch (uc) {
-	case MSM_DSI_PHY_STANDALONE:
-		clkbuflr_en = 0x1;
-		break;
-	case MSM_DSI_PHY_MASTER:
-		clkbuflr_en = 0x3;
-		pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];
-		break;
-	case MSM_DSI_PHY_SLAVE:
-		clkbuflr_en = 0x0;
-		bandgap = 0x3;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
-	if (bandgap)
-		pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
-
-	pll_14nm->uc = uc;
-
-	return 0;
-}
-
-static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,
-				     struct clk **byte_clk_provider,
-				     struct clk **pixel_clk_provider)
-{
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;
-
-	if (byte_clk_provider)
-		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
-	if (pixel_clk_provider)
-		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
-
-	return 0;
-}
-
-static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-	struct platform_device *pdev = pll_14nm->pdev;
-	int num_hws = pll_14nm->num_hws;
-
-	of_clk_del_provider(pdev->dev.of_node);
-
-	while (num_hws--)
-		clk_hw_unregister(pll_14nm->hws[num_hws]);
-}
-
-static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
-						const char *name,
-						const char *parent_name,
-						unsigned long flags,
-						u8 shift)
-{
-	struct dsi_pll_14nm_postdiv *pll_postdiv;
-	struct device *dev = &pll_14nm->pdev->dev;
-	struct clk_init_data postdiv_init = {
-		.parent_names = (const char *[]) { parent_name },
-		.num_parents = 1,
-		.name = name,
-		.flags = flags,
-		.ops = &clk_ops_dsi_pll_14nm_postdiv,
-	};
-	int ret;
-
-	pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
-	if (!pll_postdiv)
-		return ERR_PTR(-ENOMEM);
-
-	pll_postdiv->pll = pll_14nm;
-	pll_postdiv->shift = shift;
-	/* both N1 and N2 postdividers are 4 bits wide */
-	pll_postdiv->width = 4;
-	/* range of each divider is from 1 to 15 */
-	pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
-	pll_postdiv->hw.init = &postdiv_init;
-
-	ret = clk_hw_register(dev, &pll_postdiv->hw);
-	if (ret)
-		return ERR_PTR(ret);
-
-	return &pll_postdiv->hw;
-}
-
-static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
-{
-	char clk_name[32], parent[32], vco_name[32];
-	struct clk_init_data vco_init = {
-		.parent_names = (const char *[]){ "xo" },
-		.num_parents = 1,
-		.name = vco_name,
-		.flags = CLK_IGNORE_UNUSED,
-		.ops = &clk_ops_dsi_pll_14nm_vco,
-	};
-	struct device *dev = &pll_14nm->pdev->dev;
-	struct clk_hw **hws = pll_14nm->hws;
-	struct clk_hw_onecell_data *hw_data;
-	struct clk_hw *hw;
-	int num = 0;
-	int ret;
-
-	DBG("DSI%d", pll_14nm->id);
-
-	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
-			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
-			       GFP_KERNEL);
-	if (!hw_data)
-		return -ENOMEM;
-
-	snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);
-	pll_14nm->base.clk_hw.init = &vco_init;
-
-	ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);
-	if (ret)
-		return ret;
-
-	hws[num++] = &pll_14nm->base.clk_hw;
-
-	snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
-	snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);
-
-	/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
-	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
-				       CLK_SET_RATE_PARENT, 0);
-	if (IS_ERR(hw))
-		return PTR_ERR(hw);
-
-	hws[num++] = hw;
-
-	snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);
-	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
-
-	/* DSI Byte clock = VCO_CLK / N1 / 8 */
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-					  CLK_SET_RATE_PARENT, 1, 8);
-	if (IS_ERR(hw))
-		return PTR_ERR(hw);
-
-	hws[num++] = hw;
-	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
-
-	snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
-	snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
-
-	/*
-	 * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
-	 * on the way. Don't let it set parent.
-	 */
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
-	if (IS_ERR(hw))
-		return PTR_ERR(hw);
-
-	hws[num++] = hw;
-
-	snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);
-	snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
-
-	/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
-	 * This is the output of N2 post-divider, bits 4-7 in
-	 * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
-	 */
-	hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
-	if (IS_ERR(hw))
-		return PTR_ERR(hw);
-
-	hws[num++] = hw;
-	hw_data->hws[DSI_PIXEL_PLL_CLK]	= hw;
-
-	pll_14nm->num_hws = num;
-
-	hw_data->num = NUM_PROVIDED_CLKS;
-	pll_14nm->hw_data = hw_data;
-
-	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
-				     pll_14nm->hw_data);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
-{
-	struct dsi_pll_14nm *pll_14nm;
-	struct msm_dsi_pll *pll;
-	int ret;
-
-	if (!pdev)
-		return ERR_PTR(-ENODEV);
-
-	pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
-	if (!pll_14nm)
-		return ERR_PTR(-ENOMEM);
-
-	DBG("PLL%d", id);
-
-	pll_14nm->pdev = pdev;
-	pll_14nm->id = id;
-	pll_14nm_list[id] = pll_14nm;
-
-	pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-	if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-	if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	spin_lock_init(&pll_14nm->postdiv_lock);
-
-	pll = &pll_14nm->base;
-	pll->min_rate = VCO_MIN_RATE;
-	pll->max_rate = VCO_MAX_RATE;
-	pll->get_provider = dsi_pll_14nm_get_provider;
-	pll->destroy = dsi_pll_14nm_destroy;
-	pll->disable_seq = dsi_pll_14nm_disable_seq;
-	pll->save_state = dsi_pll_14nm_save_state;
-	pll->restore_state = dsi_pll_14nm_restore_state;
-	pll->set_usecase = dsi_pll_14nm_set_usecase;
-
-	pll_14nm->vco_delay = 1;
-
-	pll->en_seq_cnt = 1;
-	pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;
-
-	ret = pll_14nm_register(pll_14nm);
-	if (ret) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	return pll;
-}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
deleted file mode 100644
index 37a1f996a588..000000000000
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ /dev/null
@@ -1,643 +0,0 @@ 
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 28nm - clock diagram (eg: DSI0):
- *
- *         dsi0analog_postdiv_clk
- *                             |         dsi0indirect_path_div2_clk
- *                             |          |
- *                   +------+  |  +----+  |  |\   dsi0byte_mux
- *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
- *                |  +------+     +----+     | m|  |  +----+
- *                |                          | u|--o--| /4 |-- dsi0pllbyte
- *                |                          | x|     +----+
- *                o--------------------------| /
- *                |                          |/
- *                |          +------+
- *                o----------| DIV3 |------------------------- dsi0pll
- *                           +------+
- */
-
-#define POLL_MAX_READS			10
-#define POLL_TIMEOUT_US		50
-
-#define NUM_PROVIDED_CLKS		2
-
-#define VCO_REF_CLK_RATE		19200000
-#define VCO_MIN_RATE			350000000
-#define VCO_MAX_RATE			750000000
-
-#define DSI_BYTE_PLL_CLK		0
-#define DSI_PIXEL_PLL_CLK		1
-
-#define LPFR_LUT_SIZE			10
-struct lpfr_cfg {
-	unsigned long vco_rate;
-	u32 resistance;
-};
-
-/* Loop filter resistance: */
-static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
-	{ 479500000,  8 },
-	{ 480000000, 11 },
-	{ 575500000,  8 },
-	{ 576000000, 12 },
-	{ 610500000,  8 },
-	{ 659500000,  9 },
-	{ 671500000, 10 },
-	{ 672000000, 14 },
-	{ 708500000, 10 },
-	{ 750000000, 11 },
-};
-
-struct pll_28nm_cached_state {
-	unsigned long vco_rate;
-	u8 postdiv3;
-	u8 postdiv1;
-	u8 byte_mux;
-};
-
-struct dsi_pll_28nm {
-	struct msm_dsi_pll base;
-
-	int id;
-	struct platform_device *pdev;
-	void __iomem *mmio;
-
-	int vco_delay;
-
-	/* private clocks: */
-	struct clk *clks[NUM_DSI_CLOCKS_MAX];
-	u32 num_clks;
-
-	/* clock-provider: */
-	struct clk *provided_clks[NUM_PROVIDED_CLKS];
-	struct clk_onecell_data clk_data;
-
-	struct pll_28nm_cached_state cached_state;
-};
-
-#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
-
-static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
-				u32 nb_tries, u32 timeout_us)
-{
-	bool pll_locked = false;
-	u32 val;
-
-	while (nb_tries--) {
-		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
-		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
-
-		if (pll_locked)
-			break;
-
-		udelay(timeout_us);
-	}
-	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
-
-	return pll_locked;
-}
-
-static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
-{
-	void __iomem *base = pll_28nm->mmio;
-
-	/*
-	 * Add HW recommended delays after toggling the software
-	 * reset bit off and back on.
-	 */
-	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
-			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
-	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
-}
-
-/*
- * Clock Callbacks
- */
-static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct device *dev = &pll_28nm->pdev->dev;
-	void __iomem *base = pll_28nm->mmio;
-	unsigned long div_fbx1000, gen_vco_clk;
-	u32 refclk_cfg, frac_n_mode, frac_n_value;
-	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
-	u32 cal_cfg10, cal_cfg11;
-	u32 rem;
-	int i;
-
-	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
-
-	/* Force postdiv2 to be div-4 */
-	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
-
-	/* Configure the Loop filter resistance */
-	for (i = 0; i < LPFR_LUT_SIZE; i++)
-		if (rate <= lpfr_lut[i].vco_rate)
-			break;
-	if (i == LPFR_LUT_SIZE) {
-		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
-				rate);
-		return -EINVAL;
-	}
-	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
-
-	/* Loop filter capacitance values : c1 and c2 */
-	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
-
-	rem = rate % VCO_REF_CLK_RATE;
-	if (rem) {
-		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
-		frac_n_mode = 1;
-		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
-		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
-	} else {
-		refclk_cfg = 0x0;
-		frac_n_mode = 0;
-		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
-		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
-	}
-
-	DBG("refclk_cfg = %d", refclk_cfg);
-
-	rem = div_fbx1000 % 1000;
-	frac_n_value = (rem << 16) / 1000;
-
-	DBG("div_fb = %lu", div_fbx1000);
-	DBG("frac_n_value = %d", frac_n_value);
-
-	DBG("Generated VCO Clock: %lu", gen_vco_clk);
-	rem = 0;
-	sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
-	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
-	if (frac_n_mode) {
-		sdm_cfg0 = 0x0;
-		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
-		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
-				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
-		sdm_cfg3 = frac_n_value >> 8;
-		sdm_cfg2 = frac_n_value & 0xff;
-	} else {
-		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
-		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
-				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
-		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
-		sdm_cfg2 = 0;
-		sdm_cfg3 = 0;
-	}
-
-	DBG("sdm_cfg0=%d", sdm_cfg0);
-	DBG("sdm_cfg1=%d", sdm_cfg1);
-	DBG("sdm_cfg2=%d", sdm_cfg2);
-	DBG("sdm_cfg3=%d", sdm_cfg3);
-
-	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
-	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
-	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
-
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
-
-	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
-		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
-	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
-		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
-	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
-
-	/* Add hardware recommended delay for correct PLL configuration */
-	if (pll_28nm->vco_delay)
-		udelay(pll_28nm->vco_delay);
-
-	pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
-
-	return 0;
-}
-
-static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
-					POLL_TIMEOUT_US);
-}
-
-static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
-		unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	void __iomem *base = pll_28nm->mmio;
-	u32 sdm0, doubler, sdm_byp_div;
-	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
-	u32 ref_clk = VCO_REF_CLK_RATE;
-	unsigned long vco_rate;
-
-	VERB("parent_rate=%lu", parent_rate);
-
-	/* Check to see if the ref clk doubler is enabled */
-	doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
-			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
-	ref_clk += (doubler * VCO_REF_CLK_RATE);
-
-	/* see if it is integer mode or sdm mode */
-	sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
-	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
-		/* integer mode */
-		sdm_byp_div = FIELD(
-				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
-				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
-		vco_rate = ref_clk * sdm_byp_div;
-	} else {
-		/* sdm mode */
-		sdm_dc_off = FIELD(
-				pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
-				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
-		DBG("sdm_dc_off = %d", sdm_dc_off);
-		sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
-				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
-		sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
-				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
-		sdm_freq_seed = (sdm3 << 8) | sdm2;
-		DBG("sdm_freq_seed = %d", sdm_freq_seed);
-
-		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
-			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
-		DBG("vco rate = %lu", vco_rate);
-	}
-
-	DBG("returning vco rate = %lu", vco_rate);
-
-	return vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
-	.round_rate = msm_dsi_pll_helper_clk_round_rate,
-	.set_rate = dsi_pll_28nm_clk_set_rate,
-	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
-	.prepare = msm_dsi_pll_helper_clk_prepare,
-	.unprepare = msm_dsi_pll_helper_clk_unprepare,
-	.is_enabled = dsi_pll_28nm_clk_is_enabled,
-};
-
-/*
- * PLL Callbacks
- */
-static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct device *dev = &pll_28nm->pdev->dev;
-	void __iomem *base = pll_28nm->mmio;
-	u32 max_reads = 5, timeout_us = 100;
-	bool locked;
-	u32 val;
-	int i;
-
-	DBG("id=%d", pll_28nm->id);
-
-	pll_28nm_software_reset(pll_28nm);
-
-	/*
-	 * PLL power up sequence.
-	 * Add necessary delays recommended by hardware.
-	 */
-	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
-	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
-
-	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
-	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
-
-	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
-	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
-
-	for (i = 0; i < 2; i++) {
-		/* DSI Uniphy lock detect setting */
-		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
-				0x0c, 100);
-		pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
-
-		/* poll for PLL ready status */
-		locked = pll_28nm_poll_for_ready(pll_28nm,
-						max_reads, timeout_us);
-		if (locked)
-			break;
-
-		pll_28nm_software_reset(pll_28nm);
-
-		/*
-		 * PLL power up sequence.
-		 * Add necessary delays recommended by hardware.
-		 */
-		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
-		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
-
-		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
-		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
-
-		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
-
-		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
-
-		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
-		pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
-	}
-
-	if (unlikely(!locked))
-		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
-	else
-		DBG("DSI PLL Lock success");
-
-	return locked ? 0 : -EINVAL;
-}
-
-static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct device *dev = &pll_28nm->pdev->dev;
-	void __iomem *base = pll_28nm->mmio;
-	bool locked;
-	u32 max_reads = 10, timeout_us = 50;
-	u32 val;
-
-	DBG("id=%d", pll_28nm->id);
-
-	pll_28nm_software_reset(pll_28nm);
-
-	/*
-	 * PLL power up sequence.
-	 * Add necessary delays recommended by hardware.
-	 */
-	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
-
-	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
-	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
-	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
-		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
-	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-	/* DSI PLL toggle lock detect setting */
-	pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
-	pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
-
-	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
-
-	if (unlikely(!locked))
-		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
-	else
-		DBG("DSI PLL lock success");
-
-	return locked ? 0 : -EINVAL;
-}
-
-static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-	DBG("id=%d", pll_28nm->id);
-	pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
-}
-
-static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-	void __iomem *base = pll_28nm->mmio;
-
-	cached_state->postdiv3 =
-			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
-	cached_state->postdiv1 =
-			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
-	cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
-	if (dsi_pll_28nm_clk_is_enabled(&pll->clk_hw))
-		cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
-	else
-		cached_state->vco_rate = 0;
-}
-
-static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-	void __iomem *base = pll_28nm->mmio;
-	int ret;
-
-	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
-					cached_state->vco_rate, 0);
-	if (ret) {
-		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
-			"restore vco rate failed. ret=%d\n", ret);
-		return ret;
-	}
-
-	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
-			cached_state->postdiv3);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
-			cached_state->postdiv1);
-	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
-			cached_state->byte_mux);
-
-	return 0;
-}
-
-static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
-				struct clk **byte_clk_provider,
-				struct clk **pixel_clk_provider)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-	if (byte_clk_provider)
-		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
-	if (pixel_clk_provider)
-		*pixel_clk_provider =
-				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
-
-	return 0;
-}
-
-static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	int i;
-
-	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
-					pll_28nm->clks, pll_28nm->num_clks);
-
-	for (i = 0; i < NUM_PROVIDED_CLKS; i++)
-		pll_28nm->provided_clks[i] = NULL;
-
-	pll_28nm->num_clks = 0;
-	pll_28nm->clk_data.clks = NULL;
-	pll_28nm->clk_data.clk_num = 0;
-}
-
-static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
-{
-	char clk_name[32], parent1[32], parent2[32], vco_name[32];
-	struct clk_init_data vco_init = {
-		.parent_names = (const char *[]){ "xo" },
-		.num_parents = 1,
-		.name = vco_name,
-		.flags = CLK_IGNORE_UNUSED,
-		.ops = &clk_ops_dsi_pll_28nm_vco,
-	};
-	struct device *dev = &pll_28nm->pdev->dev;
-	struct clk **clks = pll_28nm->clks;
-	struct clk **provided_clks = pll_28nm->provided_clks;
-	int num = 0;
-	int ret;
-
-	DBG("%d", pll_28nm->id);
-
-	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
-	pll_28nm->base.clk_hw.init = &vco_init;
-	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
-
-	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
-	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
-	clks[num++] = clk_register_divider(dev, clk_name,
-			parent1, CLK_SET_RATE_PARENT,
-			pll_28nm->mmio +
-			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
-			0, 4, 0, NULL);
-
-	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
-	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
-	clks[num++] = clk_register_fixed_factor(dev, clk_name,
-			parent1, CLK_SET_RATE_PARENT,
-			1, 2);
-
-	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
-	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
-	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
-			clk_register_divider(dev, clk_name,
-				parent1, 0, pll_28nm->mmio +
-				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
-				0, 8, 0, NULL);
-
-	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
-	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
-	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
-	clks[num++] = clk_register_mux(dev, clk_name,
-			((const char *[]){
-				parent1, parent2
-			}), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
-			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
-
-	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
-	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
-	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
-			clk_register_fixed_factor(dev, clk_name,
-				parent1, CLK_SET_RATE_PARENT, 1, 4);
-
-	pll_28nm->num_clks = num;
-
-	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
-	pll_28nm->clk_data.clks = provided_clks;
-
-	ret = of_clk_add_provider(dev->of_node,
-			of_clk_src_onecell_get, &pll_28nm->clk_data);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
-					enum msm_dsi_phy_type type, int id)
-{
-	struct dsi_pll_28nm *pll_28nm;
-	struct msm_dsi_pll *pll;
-	int ret;
-
-	if (!pdev)
-		return ERR_PTR(-ENODEV);
-
-	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
-	if (!pll_28nm)
-		return ERR_PTR(-ENOMEM);
-
-	pll_28nm->pdev = pdev;
-	pll_28nm->id = id;
-
-	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	pll = &pll_28nm->base;
-	pll->min_rate = VCO_MIN_RATE;
-	pll->max_rate = VCO_MAX_RATE;
-	pll->get_provider = dsi_pll_28nm_get_provider;
-	pll->destroy = dsi_pll_28nm_destroy;
-	pll->disable_seq = dsi_pll_28nm_disable_seq;
-	pll->save_state = dsi_pll_28nm_save_state;
-	pll->restore_state = dsi_pll_28nm_restore_state;
-
-	if (type == MSM_DSI_PHY_28NM_HPM) {
-		pll_28nm->vco_delay = 1;
-
-		pll->en_seq_cnt = 3;
-		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
-		pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
-		pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
-	} else if (type == MSM_DSI_PHY_28NM_LP) {
-		pll_28nm->vco_delay = 1000;
-
-		pll->en_seq_cnt = 1;
-		pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
-	} else {
-		DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
-		return ERR_PTR(-EINVAL);
-	}
-
-	ret = pll_28nm_register(pll_28nm);
-	if (ret) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	return pll;
-}
-
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
deleted file mode 100644
index a6e7a2525fe0..000000000000
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ /dev/null
@@ -1,526 +0,0 @@ 
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk-provider.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
- *
- *
- *                        +------+
- *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
- *  F * byte_clk    |     +------+
- *                  | bit clock divider (F / 8)
- *                  |
- *                  |     +------+
- *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
- *                  |     +------+                 | (sets parent rate)
- *                  | byte clock divider (F)       |
- *                  |                              |
- *                  |                              o---> To esc RCG
- *                  |                                (doesn't set parent rate)
- *                  |
- *                  |     +------+
- *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG
- *                        +------+                 | (sets parent rate)
- *                  dsi clock divider (F * magic)  |
- *                                                 |
- *                                                 o---> To pixel rcg
- *                                                  (doesn't set parent rate)
- */
-
-#define POLL_MAX_READS		8000
-#define POLL_TIMEOUT_US		1
-
-#define NUM_PROVIDED_CLKS	2
-
-#define VCO_REF_CLK_RATE	27000000
-#define VCO_MIN_RATE		600000000
-#define VCO_MAX_RATE		1200000000
-
-#define DSI_BYTE_PLL_CLK	0
-#define DSI_PIXEL_PLL_CLK	1
-
-#define VCO_PREF_DIV_RATIO	27
-
-struct pll_28nm_cached_state {
-	unsigned long vco_rate;
-	u8 postdiv3;
-	u8 postdiv2;
-	u8 postdiv1;
-};
-
-struct clk_bytediv {
-	struct clk_hw hw;
-	void __iomem *reg;
-};
-
-struct dsi_pll_28nm {
-	struct msm_dsi_pll base;
-
-	int id;
-	struct platform_device *pdev;
-	void __iomem *mmio;
-
-	/* custom byte clock divider */
-	struct clk_bytediv *bytediv;
-
-	/* private clocks: */
-	struct clk *clks[NUM_DSI_CLOCKS_MAX];
-	u32 num_clks;
-
-	/* clock-provider: */
-	struct clk *provided_clks[NUM_PROVIDED_CLKS];
-	struct clk_onecell_data clk_data;
-
-	struct pll_28nm_cached_state cached_state;
-};
-
-#define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
-
-static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
-				    int nb_tries, int timeout_us)
-{
-	bool pll_locked = false;
-	u32 val;
-
-	while (nb_tries--) {
-		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
-		pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
-
-		if (pll_locked)
-			break;
-
-		udelay(timeout_us);
-	}
-	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
-
-	return pll_locked;
-}
-
-/*
- * Clock Callbacks
- */
-static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
-				     unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	void __iomem *base = pll_28nm->mmio;
-	u32 val, temp, fb_divider;
-
-	DBG("rate=%lu, parent's=%lu", rate, parent_rate);
-
-	temp = rate / 10;
-	val = VCO_REF_CLK_RATE / 10;
-	fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
-	fb_divider = fb_divider / 2 - 1;
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
-			fb_divider & 0xff);
-
-	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
-
-	val |= (fb_divider >> 8) & 0x07;
-
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
-			val);
-
-	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
-
-	val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
-
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
-			val);
-
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
-			0xf);
-
-	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
-	val |= 0x7 << 4;
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
-			val);
-
-	return 0;
-}
-
-static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
-					POLL_TIMEOUT_US);
-}
-
-static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
-						  unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	void __iomem *base = pll_28nm->mmio;
-	unsigned long vco_rate;
-	u32 status, fb_divider, temp, ref_divider;
-
-	VERB("parent_rate=%lu", parent_rate);
-
-	status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
-
-	if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
-		fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
-		fb_divider &= 0xff;
-		temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
-		fb_divider = (temp << 8) | fb_divider;
-		fb_divider += 1;
-
-		ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
-		ref_divider &= 0x3f;
-		ref_divider += 1;
-
-		/* multiply by 2 */
-		vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
-	} else {
-		vco_rate = 0;
-	}
-
-	DBG("returning vco rate = %lu", vco_rate);
-
-	return vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
-	.round_rate = msm_dsi_pll_helper_clk_round_rate,
-	.set_rate = dsi_pll_28nm_clk_set_rate,
-	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
-	.prepare = msm_dsi_pll_helper_clk_prepare,
-	.unprepare = msm_dsi_pll_helper_clk_unprepare,
-	.is_enabled = dsi_pll_28nm_clk_is_enabled,
-};
-
-/*
- * Custom byte clock divier clk_ops
- *
- * This clock is the entry point to configuring the PLL. The user (dsi host)
- * will set this clock's rate to the desired byte clock rate. The VCO lock
- * frequency is a multiple of the byte clock rate. The multiplication factor
- * (shown as F in the diagram above) is a function of the byte clock rate.
- *
- * This custom divider clock ensures that its parent (VCO) is set to the
- * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
- * accordingly
- */
-#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
-
-static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
-		unsigned long parent_rate)
-{
-	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
-	unsigned int div;
-
-	div = pll_read(bytediv->reg) & 0xff;
-
-	return parent_rate / (div + 1);
-}
-
-/* find multiplication factor(wrt byte clock) at which the VCO should be set */
-static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
-{
-	unsigned long bit_mhz;
-
-	/* convert to bit clock in Mhz */
-	bit_mhz = (byte_clk_rate * 8) / 1000000;
-
-	if (bit_mhz < 125)
-		return 64;
-	else if (bit_mhz < 250)
-		return 32;
-	else if (bit_mhz < 600)
-		return 16;
-	else
-		return 8;
-}
-
-static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
-				   unsigned long *prate)
-{
-	unsigned long best_parent;
-	unsigned int factor;
-
-	factor = get_vco_mul_factor(rate);
-
-	best_parent = rate * factor;
-	*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
-
-	return *prate / factor;
-}
-
-static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate)
-{
-	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
-	u32 val;
-	unsigned int factor;
-
-	factor = get_vco_mul_factor(rate);
-
-	val = pll_read(bytediv->reg);
-	val |= (factor - 1) & 0xff;
-	pll_write(bytediv->reg, val);
-
-	return 0;
-}
-
-/* Our special byte clock divider ops */
-static const struct clk_ops clk_bytediv_ops = {
-	.round_rate = clk_bytediv_round_rate,
-	.set_rate = clk_bytediv_set_rate,
-	.recalc_rate = clk_bytediv_recalc_rate,
-};
-
-/*
- * PLL Callbacks
- */
-static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct device *dev = &pll_28nm->pdev->dev;
-	void __iomem *base = pll_28nm->mmio;
-	bool locked;
-	unsigned int bit_div, byte_div;
-	int max_reads = 1000, timeout_us = 100;
-	u32 val;
-
-	DBG("id=%d", pll_28nm->id);
-
-	/*
-	 * before enabling the PLL, configure the bit clock divider since we
-	 * don't expose it as a clock to the outside world
-	 * 1: read back the byte clock divider that should already be set
-	 * 2: divide by 8 to get bit clock divider
-	 * 3: write it to POSTDIV1
-	 */
-	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
-	byte_div = val + 1;
-	bit_div = byte_div / 8;
-
-	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
-	val &= ~0xf;
-	val |= (bit_div - 1);
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
-
-	/* enable the PLL */
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
-			DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
-
-	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
-
-	if (unlikely(!locked))
-		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
-	else
-		DBG("DSI PLL lock success");
-
-	return locked ? 0 : -EINVAL;
-}
-
-static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-	DBG("id=%d", pll_28nm->id);
-	pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
-}
-
-static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-	void __iomem *base = pll_28nm->mmio;
-
-	cached_state->postdiv3 =
-			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
-	cached_state->postdiv2 =
-			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
-	cached_state->postdiv1 =
-			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
-
-	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
-}
-
-static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-	void __iomem *base = pll_28nm->mmio;
-	int ret;
-
-	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
-					cached_state->vco_rate, 0);
-	if (ret) {
-		DRM_DEV_ERROR(&pll_28nm->pdev->dev,
-			"restore vco rate failed. ret=%d\n", ret);
-		return ret;
-	}
-
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
-			cached_state->postdiv3);
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
-			cached_state->postdiv2);
-	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
-			cached_state->postdiv1);
-
-	return 0;
-}
-
-static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
-				struct clk **byte_clk_provider,
-				struct clk **pixel_clk_provider)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-	if (byte_clk_provider)
-		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
-	if (pixel_clk_provider)
-		*pixel_clk_provider =
-				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
-
-	return 0;
-}
-
-static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
-					pll_28nm->clks, pll_28nm->num_clks);
-}
-
-static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
-{
-	char *clk_name, *parent_name, *vco_name;
-	struct clk_init_data vco_init = {
-		.parent_names = (const char *[]){ "pxo" },
-		.num_parents = 1,
-		.flags = CLK_IGNORE_UNUSED,
-		.ops = &clk_ops_dsi_pll_28nm_vco,
-	};
-	struct device *dev = &pll_28nm->pdev->dev;
-	struct clk **clks = pll_28nm->clks;
-	struct clk **provided_clks = pll_28nm->provided_clks;
-	struct clk_bytediv *bytediv;
-	struct clk_init_data bytediv_init = { };
-	int ret, num = 0;
-
-	DBG("%d", pll_28nm->id);
-
-	bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
-	if (!bytediv)
-		return -ENOMEM;
-
-	vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
-	if (!vco_name)
-		return -ENOMEM;
-
-	parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
-	if (!parent_name)
-		return -ENOMEM;
-
-	clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
-	if (!clk_name)
-		return -ENOMEM;
-
-	pll_28nm->bytediv = bytediv;
-
-	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
-	vco_init.name = vco_name;
-
-	pll_28nm->base.clk_hw.init = &vco_init;
-
-	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
-
-	/* prepare and register bytediv */
-	bytediv->hw.init = &bytediv_init;
-	bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
-
-	snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
-	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
-
-	bytediv_init.name = clk_name;
-	bytediv_init.ops = &clk_bytediv_ops;
-	bytediv_init.flags = CLK_SET_RATE_PARENT;
-	bytediv_init.parent_names = (const char * const *) &parent_name;
-	bytediv_init.num_parents = 1;
-
-	/* DIV2 */
-	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
-			clk_register(dev, &bytediv->hw);
-
-	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
-	/* DIV3 */
-	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
-			clk_register_divider(dev, clk_name,
-				parent_name, 0, pll_28nm->mmio +
-				REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
-				0, 8, 0, NULL);
-
-	pll_28nm->num_clks = num;
-
-	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
-	pll_28nm->clk_data.clks = provided_clks;
-
-	ret = of_clk_add_provider(dev->of_node,
-			of_clk_src_onecell_get, &pll_28nm->clk_data);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
-					       int id)
-{
-	struct dsi_pll_28nm *pll_28nm;
-	struct msm_dsi_pll *pll;
-	int ret;
-
-	if (!pdev)
-		return ERR_PTR(-ENODEV);
-
-	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
-	if (!pll_28nm)
-		return ERR_PTR(-ENOMEM);
-
-	pll_28nm->pdev = pdev;
-	pll_28nm->id = id + 1;
-
-	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	pll = &pll_28nm->base;
-	pll->min_rate = VCO_MIN_RATE;
-	pll->max_rate = VCO_MAX_RATE;
-	pll->get_provider = dsi_pll_28nm_get_provider;
-	pll->destroy = dsi_pll_28nm_destroy;
-	pll->disable_seq = dsi_pll_28nm_disable_seq;
-	pll->save_state = dsi_pll_28nm_save_state;
-	pll->restore_state = dsi_pll_28nm_restore_state;
-
-	pll->en_seq_cnt = 1;
-	pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
-
-	ret = pll_28nm_register(pll_28nm);
-	if (ret) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	return pll;
-}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
deleted file mode 100644
index e29b3bfd63d1..000000000000
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
+++ /dev/null
@@ -1,913 +0,0 @@ 
-/*
- * SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/iopoll.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
- *
- *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
- *                              |                |
- *                              |                |
- *                 +---------+  |  +----------+  |  +----+
- *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
- *                 +---------+  |  +----------+  |  +----+
- *                              |                |
- *                              |                |         dsi0_pll_by_2_bit_clk
- *                              |                |          |
- *                              |                |  +----+  |  |\  dsi0_pclk_mux
- *                              |                |--| /2 |--o--| \   |
- *                              |                |  +----+     |  \  |  +---------+
- *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
- *                              |------------------------------|  /     +---------+
- *                              |          +-----+             | /
- *                              -----------| /4? |--o----------|/
- *                                         +-----+  |           |
- *                                                  |           |dsiclk_sel
- *                                                  |
- *                                                  dsi0_pll_post_out_div_clk
- */
-
-#define DSI_BYTE_PLL_CLK		0
-#define DSI_PIXEL_PLL_CLK		1
-#define NUM_PROVIDED_CLKS		2
-
-#define VCO_REF_CLK_RATE		19200000
-
-struct dsi_pll_regs {
-	u32 pll_prop_gain_rate;
-	u32 pll_lockdet_rate;
-	u32 decimal_div_start;
-	u32 frac_div_start_low;
-	u32 frac_div_start_mid;
-	u32 frac_div_start_high;
-	u32 pll_clock_inverters;
-	u32 ssc_stepsize_low;
-	u32 ssc_stepsize_high;
-	u32 ssc_div_per_low;
-	u32 ssc_div_per_high;
-	u32 ssc_adjper_low;
-	u32 ssc_adjper_high;
-	u32 ssc_control;
-};
-
-struct dsi_pll_config {
-	u32 ref_freq;
-	bool div_override;
-	u32 output_div;
-	bool ignore_frac;
-	bool disable_prescaler;
-	bool enable_ssc;
-	bool ssc_center;
-	u32 dec_bits;
-	u32 frac_bits;
-	u32 lock_timer;
-	u32 ssc_freq;
-	u32 ssc_offset;
-	u32 ssc_adj_per;
-	u32 thresh_cycles;
-	u32 refclk_cycles;
-};
-
-struct pll_7nm_cached_state {
-	unsigned long vco_rate;
-	u8 bit_clk_div;
-	u8 pix_clk_div;
-	u8 pll_out_div;
-	u8 pll_mux;
-};
-
-struct dsi_pll_7nm {
-	struct msm_dsi_pll base;
-
-	int id;
-	struct platform_device *pdev;
-
-	void __iomem *phy_cmn_mmio;
-	void __iomem *mmio;
-
-	u64 vco_ref_clk_rate;
-	u64 vco_current_rate;
-
-	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
-	spinlock_t postdiv_lock;
-
-	int vco_delay;
-	struct dsi_pll_config pll_configuration;
-	struct dsi_pll_regs reg_setup;
-
-	/* private clocks: */
-	struct clk_hw *out_div_clk_hw;
-	struct clk_hw *bit_clk_hw;
-	struct clk_hw *byte_clk_hw;
-	struct clk_hw *by_2_bit_clk_hw;
-	struct clk_hw *post_out_div_clk_hw;
-	struct clk_hw *pclk_mux_hw;
-	struct clk_hw *out_dsiclk_hw;
-
-	/* clock-provider: */
-	struct clk_hw_onecell_data *hw_data;
-
-	struct pll_7nm_cached_state cached_state;
-
-	enum msm_dsi_phy_usecase uc;
-	struct dsi_pll_7nm *slave;
-};
-
-#define to_pll_7nm(x)	container_of(x, struct dsi_pll_7nm, base)
-
-/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
- * mode, where the master PLL's clk_ops needs access the slave's private data
- */
-static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
-
-static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-
-	config->ref_freq = pll->vco_ref_clk_rate;
-	config->output_div = 1;
-	config->dec_bits = 8;
-	config->frac_bits = 18;
-	config->lock_timer = 64;
-	config->ssc_freq = 31500;
-	config->ssc_offset = 4800;
-	config->ssc_adj_per = 2;
-	config->thresh_cycles = 32;
-	config->refclk_cycles = 256;
-
-	config->div_override = false;
-	config->ignore_frac = false;
-	config->disable_prescaler = false;
-
-	/* TODO: ssc enable */
-	config->enable_ssc = false;
-	config->ssc_center = 0;
-}
-
-static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u64 fref = pll->vco_ref_clk_rate;
-	u64 pll_freq;
-	u64 divider;
-	u64 dec, dec_multiple;
-	u32 frac;
-	u64 multiplier;
-
-	pll_freq = pll->vco_current_rate;
-
-	if (config->disable_prescaler)
-		divider = fref;
-	else
-		divider = fref * 2;
-
-	multiplier = 1 << config->frac_bits;
-	dec_multiple = div_u64(pll_freq * multiplier, divider);
-	div_u64_rem(dec_multiple, multiplier, &frac);
-
-	dec = div_u64(dec_multiple, multiplier);
-
-	if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)
-		regs->pll_clock_inverters = 0x28;
-	else if (pll_freq <= 1000000000ULL)
-		regs->pll_clock_inverters = 0xa0;
-	else if (pll_freq <= 2500000000ULL)
-		regs->pll_clock_inverters = 0x20;
-	else if (pll_freq <= 3020000000ULL)
-		regs->pll_clock_inverters = 0x00;
-	else
-		regs->pll_clock_inverters = 0x40;
-
-	regs->pll_lockdet_rate = config->lock_timer;
-	regs->decimal_div_start = dec;
-	regs->frac_div_start_low = (frac & 0xff);
-	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
-	regs->frac_div_start_high = (frac & 0x30000) >> 16;
-}
-
-#define SSC_CENTER		BIT(0)
-#define SSC_EN			BIT(1)
-
-static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)
-{
-	struct dsi_pll_config *config = &pll->pll_configuration;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u32 ssc_per;
-	u32 ssc_mod;
-	u64 ssc_step_size;
-	u64 frac;
-
-	if (!config->enable_ssc) {
-		DBG("SSC not enabled\n");
-		return;
-	}
-
-	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
-	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
-	ssc_per -= ssc_mod;
-
-	frac = regs->frac_div_start_low |
-			(regs->frac_div_start_mid << 8) |
-			(regs->frac_div_start_high << 16);
-	ssc_step_size = regs->decimal_div_start;
-	ssc_step_size *= (1 << config->frac_bits);
-	ssc_step_size += frac;
-	ssc_step_size *= config->ssc_offset;
-	ssc_step_size *= (config->ssc_adj_per + 1);
-	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
-	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
-
-	regs->ssc_div_per_low = ssc_per & 0xFF;
-	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
-	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
-	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
-	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
-	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
-
-	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
-
-	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
-		 regs->decimal_div_start, frac, config->frac_bits);
-	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
-		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
-}
-
-static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)
-{
-	void __iomem *base = pll->mmio;
-	struct dsi_pll_regs *regs = &pll->reg_setup;
-
-	if (pll->pll_configuration.enable_ssc) {
-		pr_debug("SSC is enabled\n");
-
-		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
-			  regs->ssc_stepsize_low);
-		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
-			  regs->ssc_stepsize_high);
-		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
-			  regs->ssc_div_per_low);
-		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
-			  regs->ssc_div_per_high);
-		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
-			  regs->ssc_adjper_low);
-		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
-			  regs->ssc_adjper_high);
-		pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
-			  SSC_EN | regs->ssc_control);
-	}
-}
-
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
-{
-	void __iomem *base = pll->mmio;
-	u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
-
-	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
-		if (pll->vco_current_rate >= 3100000000ULL)
-			analog_controls_five_1 = 0x03;
-
-		if (pll->vco_current_rate < 1520000000ULL)
-			vco_config_1 = 0x08;
-		else if (pll->vco_current_rate < 2990000000ULL)
-			vco_config_1 = 0x01;
-	}
-
-	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
-		  analog_controls_five_1);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
-		  pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);
-
-	if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
-		pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
-		if (pll->slave)
-			pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
-	}
-}
-
-static void dsi_pll_commit(struct dsi_pll_7nm *pll)
-{
-	void __iomem *base = pll->mmio;
-	struct dsi_pll_regs *reg = &pll->reg_setup;
-
-	pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
-	pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
-	pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
-}
-
-static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
-				     unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-
-	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,
-	    parent_rate);
-
-	pll_7nm->vco_current_rate = rate;
-	pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
-
-	dsi_pll_setup_config(pll_7nm);
-
-	dsi_pll_calc_dec_frac(pll_7nm);
-
-	dsi_pll_calc_ssc(pll_7nm);
-
-	dsi_pll_commit(pll_7nm);
-
-	dsi_pll_config_hzindep_reg(pll_7nm);
-
-	dsi_pll_ssc_commit(pll_7nm);
-
-	/* flush, ensure all register writes are done*/
-	wmb();
-
-	return 0;
-}
-
-static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
-{
-	int rc;
-	u32 status = 0;
-	u32 const delay_us = 100;
-	u32 const timeout_us = 5000;
-
-	rc = readl_poll_timeout_atomic(pll->mmio +
-				       REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
-				       status,
-				       ((status & BIT(0)) > 0),
-				       delay_us,
-				       timeout_us);
-	if (rc)
-		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
-		       pll->id, status);
-
-	return rc;
-}
-
-static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
-{
-	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
-
-	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
-	ndelay(250);
-}
-
-static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
-{
-	u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
-
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
-	pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
-	ndelay(250);
-}
-
-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
-{
-	u32 data;
-
-	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
-}
-
-static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
-{
-	u32 data;
-
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
-
-	data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
-		  data | BIT(5) | BIT(4));
-}
-
-static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
-{
-	/*
-	 * Reset the PHY digital domain. This would be needed when
-	 * coming out of a CX or analog rail power collapse while
-	 * ensuring that the pads maintain LP00 or LP11 state
-	 */
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
-	wmb(); /* Ensure that the reset is deasserted */
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
-	wmb(); /* Ensure that the reset is deasserted */
-}
-
-static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-	int rc;
-
-	dsi_pll_enable_pll_bias(pll_7nm);
-	if (pll_7nm->slave)
-		dsi_pll_enable_pll_bias(pll_7nm->slave);
-
-	/* Start PLL */
-	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
-
-	/*
-	 * ensure all PLL configurations are written prior to checking
-	 * for PLL lock.
-	 */
-	wmb();
-
-	/* Check for PLL lock */
-	rc = dsi_pll_7nm_lock_status(pll_7nm);
-	if (rc) {
-		pr_err("PLL(%d) lock failed\n", pll_7nm->id);
-		goto error;
-	}
-
-	pll->pll_on = true;
-
-	/*
-	 * assert power on reset for PHY digital in case the PLL is
-	 * enabled after CX of analog domain power collapse. This needs
-	 * to be done before enabling the global clk.
-	 */
-	dsi_pll_phy_dig_reset(pll_7nm);
-	if (pll_7nm->slave)
-		dsi_pll_phy_dig_reset(pll_7nm->slave);
-
-	dsi_pll_enable_global_clk(pll_7nm);
-	if (pll_7nm->slave)
-		dsi_pll_enable_global_clk(pll_7nm->slave);
-
-error:
-	return rc;
-}
-
-static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
-{
-	pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
-	dsi_pll_disable_pll_bias(pll);
-}
-
-static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-
-	/*
-	 * To avoid any stray glitches while abruptly powering down the PLL
-	 * make sure to gate the clock using the clock enable bit before
-	 * powering down the PLL
-	 */
-	dsi_pll_disable_global_clk(pll_7nm);
-	pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
-	dsi_pll_disable_sub(pll_7nm);
-	if (pll_7nm->slave) {
-		dsi_pll_disable_global_clk(pll_7nm->slave);
-		dsi_pll_disable_sub(pll_7nm->slave);
-	}
-	/* flush, ensure all register writes are done */
-	wmb();
-	pll->pll_on = false;
-}
-
-static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
-						  unsigned long parent_rate)
-{
-	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-	struct dsi_pll_config *config = &pll_7nm->pll_configuration;
-	void __iomem *base = pll_7nm->mmio;
-	u64 ref_clk = pll_7nm->vco_ref_clk_rate;
-	u64 vco_rate = 0x0;
-	u64 multiplier;
-	u32 frac;
-	u32 dec;
-	u64 pll_freq, tmp64;
-
-	dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
-	dec &= 0xff;
-
-	frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
-	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
-		  0xff) << 8);
-	frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
-		  0x3) << 16);
-
-	/*
-	 * TODO:
-	 *	1. Assumes prescaler is disabled
-	 */
-	multiplier = 1 << config->frac_bits;
-	pll_freq = dec * (ref_clk * 2);
-	tmp64 = (ref_clk * 2 * frac);
-	pll_freq += div_u64(tmp64, multiplier);
-
-	vco_rate = pll_freq;
-
-	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
-	    pll_7nm->id, (unsigned long)vco_rate, dec, frac);
-
-	return (unsigned long)vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
-	.round_rate = msm_dsi_pll_helper_clk_round_rate,
-	.set_rate = dsi_pll_7nm_vco_set_rate,
-	.recalc_rate = dsi_pll_7nm_vco_recalc_rate,
-	.prepare = dsi_pll_7nm_vco_prepare,
-	.unprepare = dsi_pll_7nm_vco_unprepare,
-};
-
-/*
- * PLL Callbacks
- */
-
-static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
-	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
-	u32 cmn_clk_cfg0, cmn_clk_cfg1;
-
-	cached->pll_out_div = pll_read(pll_7nm->mmio +
-				       REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
-	cached->pll_out_div &= 0x3;
-
-	cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
-	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
-	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
-
-	cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-	cached->pll_mux = cmn_clk_cfg1 & 0x3;
-
-	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
-	    pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,
-	    cached->pix_clk_div, cached->pll_mux);
-}
-
-static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
-	void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
-	u32 val;
-	int ret;
-
-	val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
-	val &= ~0x3;
-	val |= cached->pll_out_div;
-	pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
-
-	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
-		  cached->bit_clk_div | (cached->pix_clk_div << 4));
-
-	val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-	val &= ~0x3;
-	val |= cached->pll_mux;
-	pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
-
-	ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw, pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate);
-	if (ret) {
-		DRM_DEV_ERROR(&pll_7nm->pdev->dev,
-			"restore vco rate failed. ret=%d\n", ret);
-		return ret;
-	}
-
-	DBG("DSI PLL%d", pll_7nm->id);
-
-	return 0;
-}
-
-static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,
-				    enum msm_dsi_phy_usecase uc)
-{
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-	void __iomem *base = pll_7nm->phy_cmn_mmio;
-	u32 data = 0x0;	/* internal PLL */
-
-	DBG("DSI PLL%d", pll_7nm->id);
-
-	switch (uc) {
-	case MSM_DSI_PHY_STANDALONE:
-		break;
-	case MSM_DSI_PHY_MASTER:
-		pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];
-		break;
-	case MSM_DSI_PHY_SLAVE:
-		data = 0x1; /* external PLL */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* set PLL src */
-	pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
-
-	pll_7nm->uc = uc;
-
-	return 0;
-}
-
-static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,
-				     struct clk **byte_clk_provider,
-				     struct clk **pixel_clk_provider)
-{
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-	struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;
-
-	DBG("DSI PLL%d", pll_7nm->id);
-
-	if (byte_clk_provider)
-		*byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
-	if (pixel_clk_provider)
-		*pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
-
-	return 0;
-}
-
-static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)
-{
-	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-	struct device *dev = &pll_7nm->pdev->dev;
-
-	DBG("DSI PLL%d", pll_7nm->id);
-	of_clk_del_provider(dev->of_node);
-
-	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
-	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
-	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
-	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
-	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
-	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
-	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
-	clk_hw_unregister(&pll_7nm->base.clk_hw);
-}
-
-/*
- * The post dividers and mux clocks are created using the standard divider and
- * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
- * state to follow the master PLL's divider/mux state. Therefore, we don't
- * require special clock ops that also configure the slave PLL registers
- */
-static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
-{
-	char clk_name[32], parent[32], vco_name[32];
-	char parent2[32], parent3[32], parent4[32];
-	struct clk_init_data vco_init = {
-		.parent_names = (const char *[]){ "bi_tcxo" },
-		.num_parents = 1,
-		.name = vco_name,
-		.flags = CLK_IGNORE_UNUSED,
-		.ops = &clk_ops_dsi_pll_7nm_vco,
-	};
-	struct device *dev = &pll_7nm->pdev->dev;
-	struct clk_hw_onecell_data *hw_data;
-	struct clk_hw *hw;
-	int ret;
-
-	DBG("DSI%d", pll_7nm->id);
-
-	hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
-			       NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
-			       GFP_KERNEL);
-	if (!hw_data)
-		return -ENOMEM;
-
-	snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);
-	pll_7nm->base.clk_hw.init = &vco_init;
-
-	ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);
-	if (ret)
-		return ret;
-
-	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-	snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);
-
-	hw = clk_hw_register_divider(dev, clk_name,
-				     parent, CLK_SET_RATE_PARENT,
-				     pll_7nm->mmio +
-				     REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
-				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_base_clk_hw;
-	}
-
-	pll_7nm->out_div_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-
-	/* BIT CLK: DIV_CTRL_3_0 */
-	hw = clk_hw_register_divider(dev, clk_name, parent,
-				     CLK_SET_RATE_PARENT,
-				     pll_7nm->phy_cmn_mmio +
-				     REG_DSI_7nm_PHY_CMN_CLK_CFG0,
-				     0, 4, CLK_DIVIDER_ONE_BASED,
-				     &pll_7nm->postdiv_lock);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_out_div_clk_hw;
-	}
-
-	pll_7nm->bit_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);
-	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-
-	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-					  CLK_SET_RATE_PARENT, 1, 8);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_bit_clk_hw;
-	}
-
-	pll_7nm->byte_clk_hw = hw;
-	hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
-	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-					  0, 1, 2);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_byte_clk_hw;
-	}
-
-	pll_7nm->by_2_bit_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
-	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-
-	hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-					  0, 1, 4);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_by_2_bit_clk_hw;
-	}
-
-	pll_7nm->post_out_div_clk_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);
-	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
-	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
-
-	hw = clk_hw_register_mux(dev, clk_name,
-				 ((const char *[]){
-				 parent, parent2, parent3, parent4
-				 }), 4, 0, pll_7nm->phy_cmn_mmio +
-				 REG_DSI_7nm_PHY_CMN_CLK_CFG1,
-				 0, 2, 0, NULL);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_post_out_div_clk_hw;
-	}
-
-	pll_7nm->pclk_mux_hw = hw;
-
-	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);
-	snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);
-
-	/* PIX CLK DIV : DIV_CTRL_7_4*/
-	hw = clk_hw_register_divider(dev, clk_name, parent,
-				     0, pll_7nm->phy_cmn_mmio +
-					REG_DSI_7nm_PHY_CMN_CLK_CFG0,
-				     4, 4, CLK_DIVIDER_ONE_BASED,
-				     &pll_7nm->postdiv_lock);
-	if (IS_ERR(hw)) {
-		ret = PTR_ERR(hw);
-		goto err_pclk_mux_hw;
-	}
-
-	pll_7nm->out_dsiclk_hw = hw;
-	hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
-
-	hw_data->num = NUM_PROVIDED_CLKS;
-	pll_7nm->hw_data = hw_data;
-
-	ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
-				     pll_7nm->hw_data);
-	if (ret) {
-		DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-		goto err_dsiclk_hw;
-	}
-
-	return 0;
-
-err_dsiclk_hw:
-	clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
-err_pclk_mux_hw:
-	clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
-err_post_out_div_clk_hw:
-	clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
-err_by_2_bit_clk_hw:
-	clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
-err_byte_clk_hw:
-	clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
-err_bit_clk_hw:
-	clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
-err_out_div_clk_hw:
-	clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
-err_base_clk_hw:
-	clk_hw_unregister(&pll_7nm->base.clk_hw);
-
-	return ret;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
-					enum msm_dsi_phy_type type, int id)
-{
-	struct dsi_pll_7nm *pll_7nm;
-	struct msm_dsi_pll *pll;
-	int ret;
-
-	pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
-	if (!pll_7nm)
-		return ERR_PTR(-ENOMEM);
-
-	DBG("DSI PLL%d", id);
-
-	pll_7nm->pdev = pdev;
-	pll_7nm->id = id;
-	pll_7nm_list[id] = pll_7nm;
-
-	pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-	if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-	if (IS_ERR_OR_NULL(pll_7nm->mmio)) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	spin_lock_init(&pll_7nm->postdiv_lock);
-
-	pll = &pll_7nm->base;
-	pll->min_rate = 1000000000UL;
-	pll->max_rate = 3500000000UL;
-	if (type == MSM_DSI_PHY_7NM_V4_1) {
-		pll->min_rate = 600000000UL;
-		pll->max_rate = (unsigned long)5000000000ULL;
-		/* workaround for max rate overflowing on 32-bit builds: */
-		pll->max_rate = max(pll->max_rate, 0xffffffffUL);
-	}
-	pll->get_provider = dsi_pll_7nm_get_provider;
-	pll->destroy = dsi_pll_7nm_destroy;
-	pll->save_state = dsi_pll_7nm_save_state;
-	pll->restore_state = dsi_pll_7nm_restore_state;
-	pll->set_usecase = dsi_pll_7nm_set_usecase;
-
-	pll_7nm->vco_delay = 1;
-
-	ret = pll_7nm_register(pll_7nm);
-	if (ret) {
-		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	/* TODO: Remove this when we have proper display handover support */
-	msm_dsi_pll_save_state(pll);
-
-	return pll;
-}