diff mbox series

[v5,6/9] coresight: add support for CPU debug module

Message ID 1490466197-29163-7-git-send-email-leo.yan@linaro.org
State New
Headers show
Series None | expand

Commit Message

Leo Yan March 25, 2017, 6:23 p.m. UTC
Coresight includes debug module and usually the module connects with CPU
debug logic. ARMv8 architecture reference manual (ARM DDI 0487A.k) has
description for related info in "Part H: External Debug".

Chapter H7 "The Sample-based Profiling Extension" introduces several
sampling registers, e.g. we can check program counter value with
combined CPU exception level, secure state, etc. So this is helpful for
analysis CPU lockup scenarios, e.g. if one CPU has run into infinite
loop with IRQ disabled. In this case the CPU cannot switch context and
handle any interrupt (including IPIs), as the result it cannot handle
SMP call for stack dump.

This patch is to enable coresight debug module, so firstly this driver
is to bind apb clock for debug module and this is to ensure the debug
module can be accessed from program or external debugger. And the driver
uses sample-based registers for debug purpose, e.g. when system triggers
panic, the driver will dump program counter and combined context
registers (EDCIDSR, EDVIDSR); by parsing context registers so can
quickly get to know CPU secure state, exception level, etc.

Some of the debug module registers are located in CPU power domain, so
this requires the CPU power domain stays on when access related debug
registers, but the power management for CPU power domain is quite
dependent on SoC integration for power management. For the platforms
which with sane power controller implementations, this driver follows
the method to set EDPRCR to try to pull the CPU out of low power state
and then set 'no power down request' bit so the CPU has no chance to
lose power.

If the SoC has not followed up this design well for power management
controller, the driver introduces module parameter "idle_constraint".
Setting this parameter for latency requirement in microseconds, finally
we can constrain all or partial idle states to ensure the CPU power
domain is enabled, this is a backup method to access coresight CPU
debug component safely.

Signed-off-by: Leo Yan <leo.yan@linaro.org>

---
 drivers/hwtracing/coresight/Kconfig               |  11 +
 drivers/hwtracing/coresight/Makefile              |   1 +
 drivers/hwtracing/coresight/coresight-cpu-debug.c | 704 ++++++++++++++++++++++
 3 files changed, 716 insertions(+)
 create mode 100644 drivers/hwtracing/coresight/coresight-cpu-debug.c

-- 
2.7.4

--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Suzuki K Poulose March 27, 2017, 4:34 p.m. UTC | #1
On 25/03/17 18:23, Leo Yan wrote:
> Coresight includes debug module and usually the module connects with CPU

> debug logic. ARMv8 architecture reference manual (ARM DDI 0487A.k) has

> description for related info in "Part H: External Debug".

>

> Chapter H7 "The Sample-based Profiling Extension" introduces several

> sampling registers, e.g. we can check program counter value with

> combined CPU exception level, secure state, etc. So this is helpful for

> analysis CPU lockup scenarios, e.g. if one CPU has run into infinite

> loop with IRQ disabled. In this case the CPU cannot switch context and

> handle any interrupt (including IPIs), as the result it cannot handle

> SMP call for stack dump.

>

> This patch is to enable coresight debug module, so firstly this driver

> is to bind apb clock for debug module and this is to ensure the debug

> module can be accessed from program or external debugger. And the driver

> uses sample-based registers for debug purpose, e.g. when system triggers

> panic, the driver will dump program counter and combined context

> registers (EDCIDSR, EDVIDSR); by parsing context registers so can

> quickly get to know CPU secure state, exception level, etc.

>

> Some of the debug module registers are located in CPU power domain, so

> this requires the CPU power domain stays on when access related debug

> registers, but the power management for CPU power domain is quite

> dependent on SoC integration for power management. For the platforms

> which with sane power controller implementations, this driver follows

> the method to set EDPRCR to try to pull the CPU out of low power state

> and then set 'no power down request' bit so the CPU has no chance to

> lose power.

>

> If the SoC has not followed up this design well for power management

> controller, the driver introduces module parameter "idle_constraint".

> Setting this parameter for latency requirement in microseconds, finally

> we can constrain all or partial idle states to ensure the CPU power

> domain is enabled, this is a backup method to access coresight CPU

> debug component safely.


Leo,

Thanks a lot for the quick rework. I don't fully understand (yet!) why we need the
idle_constraint. I will leave it for Sudeep to comment on it, as he is the expert
in that area. Some minor comments below.

>

> Signed-off-by: Leo Yan <leo.yan@linaro.org>

> ---

>  drivers/hwtracing/coresight/Kconfig               |  11 +

>  drivers/hwtracing/coresight/Makefile              |   1 +

>  drivers/hwtracing/coresight/coresight-cpu-debug.c | 704 ++++++++++++++++++++++

>  3 files changed, 716 insertions(+)

>  create mode 100644 drivers/hwtracing/coresight/coresight-cpu-debug.c

>

> diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig

> index 130cb21..18d7931 100644

> --- a/drivers/hwtracing/coresight/Kconfig

> +++ b/drivers/hwtracing/coresight/Kconfig

> @@ -89,4 +89,15 @@ config CORESIGHT_STM

>  	  logging useful software events or data coming from various entities

>  	  in the system, possibly running different OSs

>

> +config CORESIGHT_CPU_DEBUG

> +	tristate "CoreSight CPU Debug driver"

> +	depends on ARM || ARM64

> +	depends on DEBUG_FS

> +	help

> +	  This driver provides support for coresight debugging module. This

> +	  is primarily used to dump sample-based profiling registers when

> +	  system triggers panic, the driver will parse context registers so

> +	  can quickly get to know program counter (PC), secure state,

> +	  exception level, etc.


May be we should mention/warn the user about the possible caveats of using
this feature to help him make a better decision ? And / Or we should add a documentation
for it. We have collected some real good information over the discussions and
it is a good idea to capture it somewhere.

> +

>  endif

> diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile

> index af480d9..433d590 100644

> --- a/drivers/hwtracing/coresight/Makefile

> +++ b/drivers/hwtracing/coresight/Makefile

> @@ -16,3 +16,4 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \

>  					coresight-etm4x-sysfs.o

>  obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o

>  obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o

> +obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o

> diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c

> new file mode 100644

> index 0000000..fbec1d1

> --- /dev/null

> +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c


> +#define EDPCSR				0x0A0

> +#define EDCIDSR				0x0A4

> +#define EDVIDSR				0x0A8

> +#define EDPCSR_HI			0x0AC

> +#define EDOSLAR				0x300

> +#define EDPRCR				0x310

> +#define EDPRSR				0x314

> +#define EDDEVID1			0xFC4

> +#define EDDEVID				0xFC8

> +

> +#define EDPCSR_PROHIBITED		0xFFFFFFFF

> +

> +/* bits definition for EDPCSR */

> +#ifndef CONFIG_64BIT


We don't need this to protect the defintions, see comments around adjust_pc method.

> +#define EDPCSR_THUMB			BIT(0)

> +#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)

> +#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)

> +#endif

> +

> +/* bits definition for EDPRCR */

> +#define EDPRCR_COREPURQ			BIT(3)

> +#define EDPRCR_CORENPDRQ		BIT(0)

> +

> +/* bits definition for EDPRSR */

> +#define EDPRSR_DLK			BIT(6)

> +#define EDPRSR_PU			BIT(0)

> +

> +/* bits definition for EDVIDSR */

> +#define EDVIDSR_NS			BIT(31)

> +#define EDVIDSR_E2			BIT(30)

> +#define EDVIDSR_E3			BIT(29)

> +#define EDVIDSR_HV			BIT(28)

> +#define EDVIDSR_VMID			GENMASK(7, 0)

> +

> +/*

> + * bits definition for EDDEVID1:PSCROffset

> + *

> + * NOTE: armv8 and armv7 have different definition for the register,

> + * so consolidate the bits definition as below:

> + *

> + * 0b0000 - Sample offset applies based on the instruction state, we

> + *          rely on EDDEVID to check if EDPCSR is implemented or not

> + * 0b0001 - No offset applies.

> + * 0b0010 - No offset applies, but do not use in AArch32 mode

> + *

> + */

> +#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)

> +#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)

> +#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)

> +

> +/* bits definition for EDDEVID */

> +#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)

> +#define EDDEVID_IMPL_NONE		(0x0)

> +#define EDDEVID_IMPL_EDPCSR		(0x1)

> +#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)

> +#define EDDEVID_IMPL_FULL		(0x3)

> +

> +#define DEBUG_WAIT_TIMEOUT		32

> +

> +struct debug_drvdata {

> +	void __iomem	*base;

> +	struct device	*dev;

> +	int		cpu;

> +

> +	bool		edpcsr_present;

> +	bool		edcidsr_present;

> +	bool		edvidsr_present;

> +	bool		pc_has_offset;

> +


> +	u32		eddevid;

> +	u32		eddevid1;


We don't need those two registers once we initialise the bool flags above.
So, we could as well drop them from here.

> +

> +	u32		edpcsr;

> +	u32		edpcsr_hi;


> +	u32		edprcr;


Unused member ?

> +	u32		edprsr;

> +	u32		edvidsr;

> +	u32		edcidsr;

> +};

> +

> +static DEFINE_MUTEX(debug_lock);

> +static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);

> +static int debug_count;

> +static struct dentry *debug_debugfs_dir;

> +

> +static struct pm_qos_request debug_qos_req;

> +static int idle_constraint = PM_QOS_DEFAULT_VALUE;

> +module_param(idle_constraint, int, 0600);

> +MODULE_PARM_DESC(idle_constraint, "Latency requirement in microseconds for CPU "

> +		 "idle states (default is -1, which means have no limiation "

> +		 "to CPU idle states; 0 means disabling all idle states; user "

> +		 "can choose other platform dependent values so can disable "

> +		 "specific idle states for the platform)");


Correct me if I am wrong,

All we want to do is disable the CPUIdle explicitly if the user knows that this
could be a problem to use CPU debug on his platform. So, in effect, we should
only be using idle_constraint = 0 or -1.

In which case, we could make it easier for the user to tell us, either

  0 - Don't do anything with CPUIdle (default)
  1 - Disable CPUIdle for me as I know the platform has issues with CPU debug and CPUidle.

than explaining the miscrosecond latency etc and make the appropriate calls underneath.
something like (not necessarily the same name) :

module_param(broken_with_cpuidle, bool, 0600);
MODULE_PARAM_DESC(broken_with_cpuidle, "Specifies whether the CPU debug has issues with CPUIdle on"
				       " the platform. Non-zero value implies CPUIdle has to be"
				       " explicitly disabled.",);

> +

> +static bool debug_enable;

> +module_param_named(enable, debug_enable, bool, 0600);

> +MODULE_PARM_DESC(enable, "Knob to enable debug functionality "

> +		 "(default is 0, which means is disabled by default)");


> +static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)

> +{

> +	int timeout = DEBUG_WAIT_TIMEOUT;

> +

> +	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);

> +

> +	CS_UNLOCK(drvdata->base);

> +

> +	/* Bail out if CPU is powered up yet */

> +	if (drvdata->edprsr & EDPRSR_PU)

> +		goto out_powered_up;

> +

> +	/*

> +	 * Send request to power management controller and assert

> +	 * DBGPWRUPREQ signal; if power management controller has

> +	 * sane implementation, it should enable CPU power domain

> +	 * in case CPU is in low power state.

> +	 */

> +	drvdata->edprsr = readl(drvdata->base + EDPRCR);

> +	drvdata->edprsr |= EDPRCR_COREPURQ;


You seem to be overloading the edprsr member here with EDPRCR by mistake.
Since we don't need a cached value of EDPRCR, you might as well use a local
variable here.

> +	writel(drvdata->edprsr, drvdata->base + EDPRCR);

> +

> +	/* Wait for CPU to be powered up (timeout~=32ms) */

> +	while (timeout--) {

> +		drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);

> +		if (drvdata->edprsr & EDPRSR_PU)

> +			break;

> +

> +		usleep_range(1000, 2000);

> +	}


We have coresight_timeout() already, but not in a reusable shape (regarding
the timeout). We could possibly reuse it in the future.

> +

> +	/*

> +	 * Unfortunately the CPU cannot be powered up, so return

> +	 * back and later has no permission to access other

> +	 * registers. For this case, should set 'idle_constraint'

> +	 * to ensure CPU power domain is enabled!

> +	 */

> +	if (!(drvdata->edprsr & EDPRSR_PU)) {

> +		pr_err("%s: power up request for CPU%d failed\n",

> +			__func__, drvdata->cpu);

> +		goto out;

> +	}

> +

> +out_powered_up:

> +	debug_os_unlock(drvdata);


Question: Do we need a matching debug_os_lock() once we are done ?

> +

> +	/*

> +	 * At this point the CPU is powered up, so set the no powerdown

> +	 * request bit so we don't lose power and emulate power down.

> +	 */

> +	drvdata->edprsr = readl(drvdata->base + EDPRCR);

> +	drvdata->edprsr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;

> +	writel(drvdata->edprsr, drvdata->base + EDPRCR);

> +

> +out:

> +	CS_LOCK(drvdata->base);

> +}

> +

> +static void debug_read_regs(struct debug_drvdata *drvdata)

> +{

> +	/*

> +	 * Ensure CPU power domain is enabled to let registers

> +	 * are accessiable.

> +	 */

> +	debug_force_cpu_powered_up(drvdata);

> +

> +	if (!debug_access_permitted(drvdata))

> +		return;

> +

> +	CS_UNLOCK(drvdata->base);

> +

> +	debug_os_unlock(drvdata);

> +

> +	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);

> +

> +	/*

> +	 * As described in ARM DDI 0487A.k, if the processing

> +	 * element (PE) is in debug state, or sample-based

> +	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;

> +	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become

> +	 * UNKNOWN state. So directly bail out for this case.

> +	 */

> +	if (drvdata->edpcsr == EDPCSR_PROHIBITED)

> +		goto out;

> +

> +	/*

> +	 * A read of the EDPCSR normally has the side-effect of

> +	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;

> +	 * at this point it's safe to read value from them.

> +	 */

> +	if (IS_ENABLED(CONFIG_64BIT))

> +		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);

> +

> +	if (drvdata->edcidsr_present)

> +		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);

> +

> +	if (drvdata->edvidsr_present)

> +		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);

> +

> +out:

> +	CS_LOCK(drvdata->base);

> +}

> +


> +#ifndef CONFIG_64BIT


Instead of using this #ifndef/ifdef check twice (here and in the caller), we could :

> +static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata,

> +				     unsigned long pc)

> +{

> +	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;

> +

	if (IS_ENABLED(CONFIG_64BIT))
		return drvdata->edpcsr_hi << 32 | drvdata->edpcsr;

> +	if (drvdata->pc_has_offset) {

> +		arm_inst_offset = 8;

> +		thumb_inst_offset = 4;

> +	}

> +

> +	/* Handle thumb instruction */

> +	if (pc & EDPCSR_THUMB) {

> +		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;

> +		return pc;

> +	}

> +

> +	/*

> +	 * Handle arm instruction offset, if the arm instruction

> +	 * is not 4 byte alignment then it's possible the case

> +	 * for implementation defined; keep original value for this

> +	 * case and print info for notice.

> +	 */

> +	if (pc & BIT(1))

> +		pr_emerg("Instruction offset is implementation defined\n");

> +	else

> +		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;

> +

> +	return pc;

> +}


> +#endif

> +

> +static void debug_dump_regs(struct debug_drvdata *drvdata)

> +{

> +	unsigned long pc;

> +

> +	pr_emerg("\tEDPRSR:  %08x (Power:%s DLK:%s)\n", drvdata->edprsr,

> +		 drvdata->edprsr & EDPRSR_PU ? "On" : "Off",

> +		 drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");

> +

> +	if (!debug_access_permitted(drvdata)) {

> +		pr_emerg("No permission to access debug registers!\n");

> +		return;

> +	}

> +

> +	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {

> +		pr_emerg("CPU is in Debug state or profiling is prohibited!\n");

> +		return;

> +	}

> +


> +#ifdef CONFIG_64BIT

> +	pc = (unsigned long)drvdata->edpcsr_hi << 32 |

> +	     (unsigned long)drvdata->edpcsr;

> +#else

> +	pc = debug_adjust_pc(drvdata, (unsigned long)drvdata->edpcsr);

> +#endif


nit: see above, comment for debug_adjust_pc().


> +

> +	pr_emerg("\tEDPCSR:  [<%p>] %pS\n", (void *)pc, (void *)pc);

> +

> +	if (drvdata->edcidsr_present)

> +		pr_emerg("\tEDCIDSR: %08x\n", drvdata->edcidsr);

> +

> +	if (drvdata->edvidsr_present)

> +		pr_emerg("\tEDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",

> +			 drvdata->edvidsr,

> +			 drvdata->edvidsr & EDVIDSR_NS ? "Non-secure" : "Secure",

> +			 drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :

> +				(drvdata->edvidsr & EDVIDSR_E2 ? "EL2" : "EL1/0"),

> +			 drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,

> +			 drvdata->edvidsr & (u32)EDVIDSR_VMID);

> +}

> +

> +static void debug_init_arch_data(void *info)

> +{

> +	struct debug_drvdata *drvdata = info;

> +	u32 mode, pcsr_offset;

> +

> +	CS_UNLOCK(drvdata->base);

> +

> +	debug_os_unlock(drvdata);

> +

> +	/* Read device info */

> +	drvdata->eddevid  = readl_relaxed(drvdata->base + EDDEVID);

> +	drvdata->eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);


As mentioned above, both of these registers are only need at init time to
figure out the flags we set here. So we could remove them.

> +

> +	CS_LOCK(drvdata->base);

> +

> +	/* Parse implementation feature */

> +	mode = drvdata->eddevid & EDDEVID_PCSAMPLE_MODE;

> +	pcsr_offset = drvdata->eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;



> +

> +	if (mode == EDDEVID_IMPL_NONE) {

> +		drvdata->edpcsr_present  = false;

> +		drvdata->edcidsr_present = false;

> +		drvdata->edvidsr_present = false;

> +	} else if (mode == EDDEVID_IMPL_EDPCSR) {

> +		drvdata->edpcsr_present  = true;

> +		drvdata->edcidsr_present = false;

> +		drvdata->edvidsr_present = false;

> +	} else if (mode == EDDEVID_IMPL_EDPCSR_EDCIDSR) {

> +		if (!IS_ENABLED(CONFIG_64BIT) &&

> +			(pcsr_offset == EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32))

> +			drvdata->edpcsr_present = false;

> +		else

> +			drvdata->edpcsr_present = true;


Sorry, I forgot why we do this check only in this mode. Shouldn't this be
common to all modes (of course which implies PCSR is present) ?

> +

> +		drvdata->edcidsr_present = true;

> +		drvdata->edvidsr_present = false;

> +	} else if (mode == EDDEVID_IMPL_FULL) {

> +		drvdata->edpcsr_present  = true;

> +		drvdata->edcidsr_present = true;

> +		drvdata->edvidsr_present = true;

> +	}

> +

> +	if (IS_ENABLED(CONFIG_64BIT))

> +		drvdata->pc_has_offset = false;

> +	else

> +		drvdata->pc_has_offset =

> +			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);

> +


nit: This if-else chain could be replaced by :

	
	drvdata->edpcsr_present = false;
	drvdata->edcidsr_present = false;
	drvdata->edvidsr_present = false;

	switch(mode) {
	case EDDEVID_IMPL_FULL:
		drvdata->edvidsr_present = true;
		/* Fall through */
	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
		drvdata->edcidsr_present = true;
		/* Fall through */
	case EDDEVID_IMPL_EDPCSR:
		/*
		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
		 * define if has the offset for PC sampling value; if read
		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
		 * module does not sample the instruction set state when
		 * armv8 CPU in AArch32 state.
		 */
		drvdata->edpcsr_present = (IS_ENABLED(CONFIG_64BIT) ||
					   (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
		drvdata->pc_has_offset = (pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
	default:
		break;
	}

> +	return;

> +}

> +

> +/*

> + * Dump out information on panic.

> + */

> +static int debug_notifier_call(struct notifier_block *self,

> +			       unsigned long v, void *p)

> +{

> +	int cpu;

> +	struct debug_drvdata *drvdata;

> +

> +	pr_emerg("ARM external debug module:\n");

> +

> +	for_each_possible_cpu(cpu) {


Shouldn't this be for_each_online_cpu() ? If the user has turned off a CPU,
we shouldn't try to dump its registers.

> +		drvdata = per_cpu(debug_drvdata, cpu);

> +		if (!drvdata)

> +			continue;

> +

> +		pr_emerg("CPU[%d]:\n", drvdata->cpu);

> +

> +		debug_read_regs(drvdata);

> +		debug_dump_regs(drvdata);

> +	}

> +

> +	return 0;

> +}

> +

> +static struct notifier_block debug_notifier = {

> +	.notifier_call = debug_notifier_call,

> +};

> +

> +static int debug_enable_func(void)

> +{

> +	int ret;

> +


I think we should request the power domain here, now that the user
has explicitly requested the feature to be turned on and released
in debug_disable_func().

> +	pm_qos_add_request(&debug_qos_req,

> +		PM_QOS_CPU_DMA_LATENCY, idle_constraint);

> +

> +	ret = atomic_notifier_chain_register(&panic_notifier_list,

> +					     &debug_notifier);


...

> +	if (ret)

> +		goto err;

> +

> +	return 0;

> +

> +err:

> +	pm_qos_remove_request(&debug_qos_req);

> +	return ret;


nit : this could be :

	if (ret)
		pm_qos_remove_request(&debug_qos_req);
	return ret;

> +}

> +

> +static void debug_disable_func(void)

> +{


As noted above, we could drop the power domain here.

> +	atomic_notifier_chain_unregister(&panic_notifier_list,

> +					 &debug_notifier);

> +	pm_qos_remove_request(&debug_qos_req);

> +}

> +

> +static ssize_t debug_func_knob_write(struct file *f,

> +		const char __user *buf, size_t count, loff_t *ppos)

> +{


> +	if (on) {

> +		ret = debug_enable_func();

> +		if (ret) {

> +			pr_err("%s: unable to disable debug function: %d\n",

> +			       __func__, ret);

> +			goto err;

> +		}

> +	} else

> +		debug_disable_func();


As per Linux codingstyle rules, you should use {} for the else section.
See Documentation/process/coding-style.rst: Section 3.


> +static ssize_t debug_idle_constraint_write(struct file *f,

> +		const char __user *buf, size_t count, loff_t *ppos)

> +{

> +	int val;

> +	ssize_t ret;

> +

> +	ret = kstrtoint_from_user(buf, count, 10, &val);

> +	if (ret)

> +		return ret;

> +

> +	mutex_lock(&debug_lock);

> +	idle_constraint = val;

> +

> +	if (debug_enable)

> +		pm_qos_update_request(&debug_qos_req, idle_constraint);

> +

> +	mutex_unlock(&debug_lock);

> +	return count;

> +}

> +

> +static ssize_t debug_idle_constraint_read(struct file *f,

> +		char __user *ubuf, size_t count, loff_t *ppos)

> +{

> +	char buf[32];

> +	int len;

> +

> +	if (*ppos)

> +		return 0;


It would be better if we do :

> +

> +	len = sprintf(buf, "%d\n", idle_constraint);


	if (*ppos > len)
		return 0;

> +	return simple_read_from_buffer(ubuf, count, ppos, buf, len);


	return simple_read_from_buffer(ubuf, count, ppos, buf + *ppos, len - *ppos);

> +

> +static int debug_func_init(void)

> +{

> +	struct dentry *file;

> +	int ret;


...

> +	/* Enable debug module at boot time */

> +	ret = debug_enable_func();

> +	if (ret) {

> +		pr_err("%s: unable to disable debug function: %d\n",


s/disable/enable ?

> +		       __func__, ret);

> +		goto err;

> +	}

> +

> +	return 0;

> +

> +err:

> +	debugfs_remove_recursive(debug_debugfs_dir);

> +	return ret;

> +}

> +

> +static void debug_func_exit(void)

> +{

> +	debugfs_remove_recursive(debug_debugfs_dir);

> +

> +	/* Disable functionality if has enabled */

> +	if (debug_enable)

> +		debug_disable_func();

> +}

> +

> +static int debug_probe(struct amba_device *adev, const struct amba_id *id)

> +{

> +	void __iomem *base;

> +	struct device *dev = &adev->dev;

> +	struct debug_drvdata *drvdata;

> +	struct resource *res = &adev->res;

> +	struct device_node *np = adev->dev.of_node;

> +	int ret;

> +

> +	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);

> +	if (!drvdata)

> +		return -ENOMEM;

> +

> +	drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;

> +	if (per_cpu(debug_drvdata, drvdata->cpu)) {

> +		dev_err(dev, "CPU's drvdata has been initialized\n");

> +		return -EBUSY;

> +	}

> +

> +	drvdata->dev = &adev->dev;

> +	amba_set_drvdata(adev, drvdata);

> +

> +	/* Validity for the resource is already checked by the AMBA core */

> +	base = devm_ioremap_resource(dev, res);

> +	if (IS_ERR(base))

> +		return PTR_ERR(base);

> +

> +	drvdata->base = base;

> +

> +	get_online_cpus();

> +	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;

> +	ret = smp_call_function_single(drvdata->cpu,

> +				debug_init_arch_data, drvdata, 1);

> +	put_online_cpus();


Now that we have dynamic enable/disable of the feature, we should do a pm_runtime_put()
as expected and try to get_ the power domain when we enable the debug.


Suzuki
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leo Yan March 29, 2017, 1:54 a.m. UTC | #2
Hi Mathieu,

On Tue, Mar 28, 2017 at 10:50:10AM -0600, Mathieu Poirier wrote:
> On Sun, Mar 26, 2017 at 02:23:14AM +0800, Leo Yan wrote:


[...]

> > +static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)

> > +{

> > +	int timeout = DEBUG_WAIT_TIMEOUT;

> > +

> > +	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);

> > +

> > +	CS_UNLOCK(drvdata->base);

> > +

> > +	/* Bail out if CPU is powered up yet */

> > +	if (drvdata->edprsr & EDPRSR_PU)

> > +		goto out_powered_up;

> > +

> > +	/*

> > +	 * Send request to power management controller and assert

> > +	 * DBGPWRUPREQ signal; if power management controller has

> > +	 * sane implementation, it should enable CPU power domain

> > +	 * in case CPU is in low power state.

> > +	 */

> > +	drvdata->edprsr = readl(drvdata->base + EDPRCR);

> > +	drvdata->edprsr |= EDPRCR_COREPURQ;

> > +	writel(drvdata->edprsr, drvdata->base + EDPRCR);

> 

> Here ->edprsr is used but EDPRCR is accessed.  Is this intentional or a

> copy/paste error?  The same is true for accesses in the out_powered_up section.


Thanks for pointing out. This is a typo error and will fix.

> > +

> > +	/* Wait for CPU to be powered up (timeout~=32ms) */

> > +	while (timeout--) {

> > +		drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);

> > +		if (drvdata->edprsr & EDPRSR_PU)

> > +			break;

> > +

> > +		usleep_range(1000, 2000);

> > +	}

> 

> See if function readx_poll_timeout() can be used.


Will use it.

> > +

> > +	/*

> > +	 * Unfortunately the CPU cannot be powered up, so return

> > +	 * back and later has no permission to access other

> > +	 * registers. For this case, should set 'idle_constraint'

> > +	 * to ensure CPU power domain is enabled!

> > +	 */

> > +	if (!(drvdata->edprsr & EDPRSR_PU)) {

> > +		pr_err("%s: power up request for CPU%d failed\n",

> > +			__func__, drvdata->cpu);

> > +		goto out;

> > +	}

> > +

> > +out_powered_up:

> > +	debug_os_unlock(drvdata);

> > +

> > +	/*

> > +	 * At this point the CPU is powered up, so set the no powerdown

> > +	 * request bit so we don't lose power and emulate power down.

> > +	 */

> > +	drvdata->edprsr = readl(drvdata->base + EDPRCR);

> > +	drvdata->edprsr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;

> 

> If we are here the core is already up.  Shouldn't we need to set

> EDPRCR_CORENPDRQ only?


Yeah. Will fix.

> > +	writel(drvdata->edprsr, drvdata->base + EDPRCR);

> 

> This section is a little racy - between the time the PU bit has been

> checked and the time COREPDRQ has been flipped, the state of PU may have

> changed.  You can probably get around this by checking edprsr.PU rigth here.  If

> it is not set you go through the process again.  Note that doing this will

> probably force a refactoring of the whole function.  


Agree. Will handle this.

[...]

> > +static ssize_t debug_func_knob_write(struct file *f,

> > +		const char __user *buf, size_t count, loff_t *ppos)

> > +{

> > e	u8 on;

> > +	int ret;

> > +

> > +	ret = kstrtou8_from_user(buf, count, 2, &on);

> > +	if (ret)

> > +		return ret;

> > +

> > +	mutex_lock(&debug_lock);

> > +

> > +	if (!on ^ debug_enable)

> > +		goto out;

> 

> I had to read this condition too many times - please refactor.


Will do it.

> > +

> > +	if (on) {

> > +		ret = debug_enable_func();

> > +		if (ret) {

> > +			pr_err("%s: unable to disable debug function: %d\n",

> > +			       __func__, ret);

> 

> Based on the semantic this is the wrong error message.


Yeah. Will fix.

> > +			goto err;

> > +		}

> > +	} else

> > +		debug_disable_func();

> 

> Did checkpatch.pl complain about extra curly braces?  If not please add them.


checkpatch.pl doesn't report for this. Will add.

> > +

> > +	debug_enable = on;

> 

> Here we can't set debug_enable if we just called debug_disable_func().  Maybe

> I'm missing something.  If that's the case a comment in the code would be worth

> it.


After called debug_disable_func(), debug_enable is set to 0 (on = 0).

> > +

> > +out:

> > +	ret = count;

> > +err:

> > +	mutex_unlock(&debug_lock);

> > +	return ret;

> > +}

> > +

> > +static ssize_t debug_func_knob_read(struct file *f,

> > +		char __user *ubuf, size_t count, loff_t *ppos)

> > +{

> > +	char val[] = { '0' + debug_enable, '\n' };

> > +

> > +	return simple_read_from_buffer(ubuf, count, ppos, val, sizeof(val));

> 

> Use the debug_lock to avoid race conditions.


Will do it.

> > +}

> > +

> > +static ssize_t debug_idle_constraint_write(struct file *f,

> > +		const char __user *buf, size_t count, loff_t *ppos)

> > +{

> > +	int val;

> > +	ssize_t ret;

> > +

> > +	ret = kstrtoint_from_user(buf, count, 10, &val);

> > +	if (ret)

> > +		return ret;

> > +

> > +	mutex_lock(&debug_lock);

> > +	idle_constraint = val;

> > +

> > +	if (debug_enable)

> > +		pm_qos_update_request(&debug_qos_req, idle_constraint);

> > +

> > +	mutex_unlock(&debug_lock);

> > +	return count;

> > +}

> > +

> > +static ssize_t debug_idle_constraint_read(struct file *f,

> > +		char __user *ubuf, size_t count, loff_t *ppos)

> > +{

> > +	char buf[32];

> > +	int len;

> > +

> > +	if (*ppos)

> > +		return 0;

> > +

> > +	len = sprintf(buf, "%d\n", idle_constraint);

> > +	return simple_read_from_buffer(ubuf, count, ppos, buf, len);

> 

> Use the debug_lock to avoid race conditions.


Will do it.

> > +}

> > +

> > +static const struct file_operations debug_func_knob_fops = {

> > +	.open	= simple_open,

> > +	.read	= debug_func_knob_read,

> > +	.write	= debug_func_knob_write,

> > +};

> > +

> > +static const struct file_operations debug_idle_constraint_fops = {

> > +	.open	= simple_open,

> > +	.read	= debug_idle_constraint_read,

> > +	.write	= debug_idle_constraint_write,

> > +};

> > +

> > +static int debug_func_init(void)

> > +{

> > +	struct dentry *file;

> > +	int ret;

> > +

> > +	/* Create debugfs node */

> > +	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);

> > +	if (!debug_debugfs_dir) {

> > +		pr_err("%s: unable to create debugfs directory\n", __func__);

> > +		return -ENOMEM;

> 

> return PTR_ERR(debug_debugfs_dir);


Here cannot use PTR_ERR(debug_debugfs_dir). If create debugfs failed
the pointer is NULL value, so finally we will return zero value for
PTR_ERR(debug_debugfs_dir).

[...]

> > +	}

> > +

> > +	file = debugfs_create_file("enable", S_IRUGO | S_IWUSR,

> > +			debug_debugfs_dir, NULL, &debug_func_knob_fops);

> > +	if (!file) {

> > +		pr_err("%s: unable to create enable knob file\n", __func__);

> > +		ret = -ENOMEM;

> 

> Same as above.

> 

> > +		goto err;

> > +	}

> > +

> > +	file = debugfs_create_file("idle_constraint", S_IRUGO | S_IWUSR,

> > +			debug_debugfs_dir, NULL, &debug_idle_constraint_fops);

> > +	if (!file) {

> > +		pr_err("%s: unable to create idle constraint file\n", __func__);

> > +		ret = -ENOMEM;

> 

> Same as above.

> 

> > +		goto err;

> > +	}

> > +

> > +	/* Use sysfs node to enable functionality */

> > +	if (!debug_enable)

> > +		return 0;

> > +

> > +	/* Enable debug module at boot time */

> > +	ret = debug_enable_func();

> > +	if (ret) {

> > +		pr_err("%s: unable to disable debug function: %d\n",

> > +		       __func__, ret);

> > +		goto err;

> > +	}

> 

> Use the debug_lock to avoid race conditions.


I'm struggling to understand what's race condition at here? The
function pairs debug_func_init()/debug_func_exit() are used for
module's probing and removing, so naturally module's probing and
removing are sequential, right?

> > +

> > +	return 0;

> > +

> > +err:

> > +	debugfs_remove_recursive(debug_debugfs_dir);

> > +	return ret;

> > +}

> > +

> > +static void debug_func_exit(void)

> > +{

> > +	debugfs_remove_recursive(debug_debugfs_dir);

> > +

> > +	/* Disable functionality if has enabled */

> > +	if (debug_enable)

> > +		debug_disable_func();

> > +}

> > +

> > +static int debug_probe(struct amba_device *adev, const struct amba_id *id)

> > +{

> > +	void __iomem *base;

> > +	struct device *dev = &adev->dev;

> > +	struct debug_drvdata *drvdata;

> > +	struct resource *res = &adev->res;

> > +	struct device_node *np = adev->dev.of_node;

> > +	int ret;

> > +

> > +	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);

> > +	if (!drvdata)

> > +		return -ENOMEM;

> > +

> > +	drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;

> > +	if (per_cpu(debug_drvdata, drvdata->cpu)) {

> > +		dev_err(dev, "CPU's drvdata has been initialized\n");

> 

> Might be worth adding the CPU number in the error message.


Yeah, will add it.

[...]

> This driver doesn't call the pm_runtime_put/get() operations needed to handle the

> debug power domain.  See the other CoreSight drivers for details. 


Sure, will do it.

> Also, from the conversation that followed the previous post we agreed that we wouldn't

> deal with CPUidle issues in this driver.  We deal with the CPU power domain

> using the EDPRCR register (like you did) and that's it.  System that don't honor that register

> can use other (external) means to solve this.  As such please remove the

> pm_qos_xyz() functions. 


From previous discussion, Mike reminds the CPU power domain design is
quite SoC specific and usually the SoC has many different low power
states, e.g. except CPU level and cluster level low power states, they
also can define SoC level low power states. Any SoC with any power
state is possible finally impact CPU power domain, so this is why I add
this interface to let user can have the final decision based on their
working platform.

We can rely on "nohlt" and "cpuidle.off=1" in kernel command line to
disable whole SoC low power states at boot time; or we can use sysfs
node "echo 1 > /sys/devices/system/cpu/cpuX/cpuidle/stateX/disble" to
disable CPU low power states at runtime. But that means we need use
different interfaces to control CPU power domain for booting and
runtime, it's not nice for usage.

So this is why add "idle_constraint" as a central place to control
power domain for CPU debug purpose and I also think this is more
friendly for hardware design, e.g. some platforms can enable partial
low power states to save power and avoid overheat after using this
driver.

How about you think for this?

Thanks,
Leo Yan
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leo Yan March 29, 2017, 3:07 a.m. UTC | #3
Hi Suzuki,

On Mon, Mar 27, 2017 at 05:34:57PM +0100, Suzuki K Poulose wrote:
> On 25/03/17 18:23, Leo Yan wrote:


[...]

> Leo,

> 

> Thanks a lot for the quick rework. I don't fully understand (yet!) why we need the

> idle_constraint. I will leave it for Sudeep to comment on it, as he is the expert

> in that area. Some minor comments below.


Thanks a lot for quick reviewing :)

> >Signed-off-by: Leo Yan <leo.yan@linaro.org>

> >---

> > drivers/hwtracing/coresight/Kconfig               |  11 +

> > drivers/hwtracing/coresight/Makefile              |   1 +

> > drivers/hwtracing/coresight/coresight-cpu-debug.c | 704 ++++++++++++++++++++++

> > 3 files changed, 716 insertions(+)

> > create mode 100644 drivers/hwtracing/coresight/coresight-cpu-debug.c

> >

> >diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig

> >index 130cb21..18d7931 100644

> >--- a/drivers/hwtracing/coresight/Kconfig

> >+++ b/drivers/hwtracing/coresight/Kconfig

> >@@ -89,4 +89,15 @@ config CORESIGHT_STM

> > 	  logging useful software events or data coming from various entities

> > 	  in the system, possibly running different OSs

> >

> >+config CORESIGHT_CPU_DEBUG

> >+	tristate "CoreSight CPU Debug driver"

> >+	depends on ARM || ARM64

> >+	depends on DEBUG_FS

> >+	help

> >+	  This driver provides support for coresight debugging module. This

> >+	  is primarily used to dump sample-based profiling registers when

> >+	  system triggers panic, the driver will parse context registers so

> >+	  can quickly get to know program counter (PC), secure state,

> >+	  exception level, etc.

> 

> May be we should mention/warn the user about the possible caveats of using

> this feature to help him make a better decision ? And / Or we should add a documentation

> for it. We have collected some real good information over the discussions and

> it is a good idea to capture it somewhere.


Sure, I will add a documentation for this.

[...]

> >+static struct pm_qos_request debug_qos_req;

> >+static int idle_constraint = PM_QOS_DEFAULT_VALUE;

> >+module_param(idle_constraint, int, 0600);

> >+MODULE_PARM_DESC(idle_constraint, "Latency requirement in microseconds for CPU "

> >+		 "idle states (default is -1, which means have no limiation "

> >+		 "to CPU idle states; 0 means disabling all idle states; user "

> >+		 "can choose other platform dependent values so can disable "

> >+		 "specific idle states for the platform)");

> 

> Correct me if I am wrong,

> 

> All we want to do is disable the CPUIdle explicitly if the user knows that this

> could be a problem to use CPU debug on his platform. So, in effect, we should

> only be using idle_constraint = 0 or -1.

> 

> In which case, we could make it easier for the user to tell us, either

> 

>  0 - Don't do anything with CPUIdle (default)

>  1 - Disable CPUIdle for me as I know the platform has issues with CPU debug and CPUidle.


The reason for not using bool flag is: usually SoC may have many idle
states, so if user wants to partially enable some states then can set
the latency to constraint.

But of course, we can change this to binary value as you suggested,
this means turn on of turn off all states. The only one reason to use
latency value is it is more friendly for hardware design, e.g. some
platforms can enable partial states to save power and avoid overheat
after using this driver.

If you guys think this is a bit over design, I will follow up your
suggestion. I also have some replying in Mathieu's reviewing, please
help review as well.

> than explaining the miscrosecond latency etc and make the appropriate calls underneath.

> something like (not necessarily the same name) :

> 

> module_param(broken_with_cpuidle, bool, 0600);

> MODULE_PARAM_DESC(broken_with_cpuidle, "Specifies whether the CPU debug has issues with CPUIdle on"

> 				       " the platform. Non-zero value implies CPUIdle has to be"

> 				       " explicitly disabled.",);


[...]

> >+	/*

> >+	 * Unfortunately the CPU cannot be powered up, so return

> >+	 * back and later has no permission to access other

> >+	 * registers. For this case, should set 'idle_constraint'

> >+	 * to ensure CPU power domain is enabled!

> >+	 */

> >+	if (!(drvdata->edprsr & EDPRSR_PU)) {

> >+		pr_err("%s: power up request for CPU%d failed\n",

> >+			__func__, drvdata->cpu);

> >+		goto out;

> >+	}

> >+

> >+out_powered_up:

> >+	debug_os_unlock(drvdata);

> 

> Question: Do we need a matching debug_os_lock() once we are done ?


I have checked ARM ARMv8, but there have no detailed description for
this. I refered coresight-etmv4 code and Mike's pseudo code, ther have
no debug_os_lock() related operations.

Mike, Mathieu, could you also help confirm this?

[...]

> >+static void debug_init_arch_data(void *info)

> >+{

> >+	struct debug_drvdata *drvdata = info;

> >+	u32 mode, pcsr_offset;

> >+

> >+	CS_UNLOCK(drvdata->base);

> >+

> >+	debug_os_unlock(drvdata);

> >+

> >+	/* Read device info */

> >+	drvdata->eddevid  = readl_relaxed(drvdata->base + EDDEVID);

> >+	drvdata->eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);

> 

> As mentioned above, both of these registers are only need at init time to

> figure out the flags we set here. So we could remove them.

> 

> >+

> >+	CS_LOCK(drvdata->base);

> >+

> >+	/* Parse implementation feature */

> >+	mode = drvdata->eddevid & EDDEVID_PCSAMPLE_MODE;

> >+	pcsr_offset = drvdata->eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;

> 

> 

> >+

> >+	if (mode == EDDEVID_IMPL_NONE) {

> >+		drvdata->edpcsr_present  = false;

> >+		drvdata->edcidsr_present = false;

> >+		drvdata->edvidsr_present = false;

> >+	} else if (mode == EDDEVID_IMPL_EDPCSR) {

> >+		drvdata->edpcsr_present  = true;

> >+		drvdata->edcidsr_present = false;

> >+		drvdata->edvidsr_present = false;

> >+	} else if (mode == EDDEVID_IMPL_EDPCSR_EDCIDSR) {

> >+		if (!IS_ENABLED(CONFIG_64BIT) &&

> >+			(pcsr_offset == EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32))

> >+			drvdata->edpcsr_present = false;

> >+		else

> >+			drvdata->edpcsr_present = true;

> 

> Sorry, I forgot why we do this check only in this mode. Shouldn't this be

> common to all modes (of course which implies PCSR is present) ?


No. PCSROffset is defined differently in ARMv7 and ARMv8; So finally we
simplize PCSROffset value :
0000 - Sample offset applies based on the instruction state (indicated by PCSR[0])
0001 - No offset applies.
0010 - No offset applies, but do not use in AArch32 mode!

So we need handle the corner case is when CPU runs AArch32 mode and
PCSRoffset = 'b0010. Other cases the pcsr should be present.

[...]

Other suggestions are good for me, will take them in next version.

Thanks,
Leo Yan
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Suzuki K Poulose March 29, 2017, 9:07 a.m. UTC | #4
On 29/03/17 04:07, Leo Yan wrote:
> Hi Suzuki,

>

> On Mon, Mar 27, 2017 at 05:34:57PM +0100, Suzuki K Poulose wrote:

>> On 25/03/17 18:23, Leo Yan wrote:

>

> [...]

>

>> Leo,

>>

>> Thanks a lot for the quick rework. I don't fully understand (yet!) why we need the

>> idle_constraint. I will leave it for Sudeep to comment on it, as he is the expert

>> in that area. Some minor comments below.

>

> Thanks a lot for quick reviewing :)

>

>>> Signed-off-by: Leo Yan <leo.yan@linaro.org>

>>> ---

>>> drivers/hwtracing/coresight/Kconfig               |  11 +

>>> drivers/hwtracing/coresight/Makefile              |   1 +

>>> drivers/hwtracing/coresight/coresight-cpu-debug.c | 704 ++++++++++++++++++++++

>>> 3 files changed, 716 insertions(+)

>>> create mode 100644 drivers/hwtracing/coresight/coresight-cpu-debug.c

>>>

>>> diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig

>>> index 130cb21..18d7931 100644

>>> --- a/drivers/hwtracing/coresight/Kconfig

>>> +++ b/drivers/hwtracing/coresight/Kconfig

>>> @@ -89,4 +89,15 @@ config CORESIGHT_STM

>>> 	  logging useful software events or data coming from various entities

>>> 	  in the system, possibly running different OSs

>>>

>>> +config CORESIGHT_CPU_DEBUG

>>> +	tristate "CoreSight CPU Debug driver"

>>> +	depends on ARM || ARM64

>>> +	depends on DEBUG_FS

>>> +	help

>>> +	  This driver provides support for coresight debugging module. This

>>> +	  is primarily used to dump sample-based profiling registers when

>>> +	  system triggers panic, the driver will parse context registers so

>>> +	  can quickly get to know program counter (PC), secure state,

>>> +	  exception level, etc.

>>

>> May be we should mention/warn the user about the possible caveats of using

>> this feature to help him make a better decision ? And / Or we should add a documentation

>> for it. We have collected some real good information over the discussions and

>> it is a good idea to capture it somewhere.

>

> Sure, I will add a documentation for this.

>

> [...]

>

>>> +static struct pm_qos_request debug_qos_req;

>>> +static int idle_constraint = PM_QOS_DEFAULT_VALUE;

>>> +module_param(idle_constraint, int, 0600);

>>> +MODULE_PARM_DESC(idle_constraint, "Latency requirement in microseconds for CPU "

>>> +		 "idle states (default is -1, which means have no limiation "

>>> +		 "to CPU idle states; 0 means disabling all idle states; user "

>>> +		 "can choose other platform dependent values so can disable "

>>> +		 "specific idle states for the platform)");

>>

>> Correct me if I am wrong,

>>

>> All we want to do is disable the CPUIdle explicitly if the user knows that this

>> could be a problem to use CPU debug on his platform. So, in effect, we should

>> only be using idle_constraint = 0 or -1.

>>

>> In which case, we could make it easier for the user to tell us, either

>>

>>  0 - Don't do anything with CPUIdle (default)

>>  1 - Disable CPUIdle for me as I know the platform has issues with CPU debug and CPUidle.

>

> The reason for not using bool flag is: usually SoC may have many idle

> states, so if user wants to partially enable some states then can set

> the latency to constraint.

>

> But of course, we can change this to binary value as you suggested,

> this means turn on of turn off all states. The only one reason to use

> latency value is it is more friendly for hardware design, e.g. some

> platforms can enable partial states to save power and avoid overheat

> after using this driver.

>

> If you guys think this is a bit over design, I will follow up your

> suggestion. I also have some replying in Mathieu's reviewing, please

> help review as well.

>

>> than explaining the miscrosecond latency etc and make the appropriate calls underneath.

>> something like (not necessarily the same name) :

>>

>> module_param(broken_with_cpuidle, bool, 0600);

>> MODULE_PARAM_DESC(broken_with_cpuidle, "Specifies whether the CPU debug has issues with CPUIdle on"

>> 				       " the platform. Non-zero value implies CPUIdle has to be"

>> 				       " explicitly disabled.",);

>

> [...]

>

>>> +	/*

>>> +	 * Unfortunately the CPU cannot be powered up, so return

>>> +	 * back and later has no permission to access other

>>> +	 * registers. For this case, should set 'idle_constraint'

>>> +	 * to ensure CPU power domain is enabled!

>>> +	 */

>>> +	if (!(drvdata->edprsr & EDPRSR_PU)) {

>>> +		pr_err("%s: power up request for CPU%d failed\n",

>>> +			__func__, drvdata->cpu);

>>> +		goto out;

>>> +	}

>>> +

>>> +out_powered_up:

>>> +	debug_os_unlock(drvdata);

>>

>> Question: Do we need a matching debug_os_lock() once we are done ?

>

> I have checked ARM ARMv8, but there have no detailed description for

> this. I refered coresight-etmv4 code and Mike's pseudo code, ther have

> no debug_os_lock() related operations.

>

> Mike, Mathieu, could you also help confirm this?

>

> [...]

>

>>> +static void debug_init_arch_data(void *info)

>>> +{

>>> +	struct debug_drvdata *drvdata = info;

>>> +	u32 mode, pcsr_offset;

>>> +

>>> +	CS_UNLOCK(drvdata->base);

>>> +

>>> +	debug_os_unlock(drvdata);

>>> +

>>> +	/* Read device info */

>>> +	drvdata->eddevid  = readl_relaxed(drvdata->base + EDDEVID);

>>> +	drvdata->eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);

>>

>> As mentioned above, both of these registers are only need at init time to

>> figure out the flags we set here. So we could remove them.

>>

>>> +

>>> +	CS_LOCK(drvdata->base);

>>> +

>>> +	/* Parse implementation feature */

>>> +	mode = drvdata->eddevid & EDDEVID_PCSAMPLE_MODE;

>>> +	pcsr_offset = drvdata->eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;

>>

>>

>>> +

>>> +	if (mode == EDDEVID_IMPL_NONE) {

>>> +		drvdata->edpcsr_present  = false;

>>> +		drvdata->edcidsr_present = false;

>>> +		drvdata->edvidsr_present = false;

>>> +	} else if (mode == EDDEVID_IMPL_EDPCSR) {

>>> +		drvdata->edpcsr_present  = true;

>>> +		drvdata->edcidsr_present = false;

>>> +		drvdata->edvidsr_present = false;

>>> +	} else if (mode == EDDEVID_IMPL_EDPCSR_EDCIDSR) {

>>> +		if (!IS_ENABLED(CONFIG_64BIT) &&

>>> +			(pcsr_offset == EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32))

>>> +			drvdata->edpcsr_present = false;

>>> +		else

>>> +			drvdata->edpcsr_present = true;

>>

>> Sorry, I forgot why we do this check only in this mode. Shouldn't this be

>> common to all modes (of course which implies PCSR is present) ?

>

> No. PCSROffset is defined differently in ARMv7 and ARMv8; So finally we

> simplize PCSROffset value :

> 0000 - Sample offset applies based on the instruction state (indicated by PCSR[0])

> 0001 - No offset applies.

> 0010 - No offset applies, but do not use in AArch32 mode!

>

> So we need handle the corner case is when CPU runs AArch32 mode and

> PCSRoffset = 'b0010. Other cases the pcsr should be present.


I understand that reasoning. But my question is, why do we check for PCSROffset
only when mode == EDDEVID_IMPL_EDPCSR_EDCIDSR and not for say mode == EDDEVID_IMPL_EDPCSR or
any other mode where PCSR is present.

Suzuki
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leo Yan March 29, 2017, 10:27 a.m. UTC | #5
On Wed, Mar 29, 2017 at 10:07:07AM +0100, Suzuki K Poulose wrote:

[...]

> >>>+	if (mode == EDDEVID_IMPL_NONE) {

> >>>+		drvdata->edpcsr_present  = false;

> >>>+		drvdata->edcidsr_present = false;

> >>>+		drvdata->edvidsr_present = false;

> >>>+	} else if (mode == EDDEVID_IMPL_EDPCSR) {

> >>>+		drvdata->edpcsr_present  = true;

> >>>+		drvdata->edcidsr_present = false;

> >>>+		drvdata->edvidsr_present = false;

> >>>+	} else if (mode == EDDEVID_IMPL_EDPCSR_EDCIDSR) {

> >>>+		if (!IS_ENABLED(CONFIG_64BIT) &&

> >>>+			(pcsr_offset == EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32))

> >>>+			drvdata->edpcsr_present = false;

> >>>+		else

> >>>+			drvdata->edpcsr_present = true;

> >>

> >>Sorry, I forgot why we do this check only in this mode. Shouldn't this be

> >>common to all modes (of course which implies PCSR is present) ?

> >

> >No. PCSROffset is defined differently in ARMv7 and ARMv8; So finally we

> >simplize PCSROffset value :

> >0000 - Sample offset applies based on the instruction state (indicated by PCSR[0])

> >0001 - No offset applies.

> >0010 - No offset applies, but do not use in AArch32 mode!

> >

> >So we need handle the corner case is when CPU runs AArch32 mode and

> >PCSRoffset = 'b0010. Other cases the pcsr should be present.

> 

> I understand that reasoning. But my question is, why do we check for PCSROffset

> only when mode == EDDEVID_IMPL_EDPCSR_EDCIDSR and not for say mode == EDDEVID_IMPL_EDPCSR or

> any other mode where PCSR is present.


Sorry I misunderstood your question.

I made mistake when I analyzed the possbile combination for mode and
PCSROffset so I thought it's the only case should handle:
{ EDDEVID_IMPL_EDPCSR_EDCIDSR, EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 }

Below three combinations are possible to exist; so you are right, I
should move this out for the checking:
{ EDDEVID_IMPL_NONE,           EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 }
{ EDDEVID_IMPL_EDPCSR_EDCIDSR, EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 }
{ EDDEVID_IMPL_FULL,           EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 }

Thanks,
Leo Yan
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Suzuki K Poulose March 29, 2017, 10:31 a.m. UTC | #6
On 29/03/17 11:27, Leo Yan wrote:
> On Wed, Mar 29, 2017 at 10:07:07AM +0100, Suzuki K Poulose wrote:

>

> [...]

>

>>>>> +	if (mode == EDDEVID_IMPL_NONE) {

>>>>> +		drvdata->edpcsr_present  = false;

>>>>> +		drvdata->edcidsr_present = false;

>>>>> +		drvdata->edvidsr_present = false;

>>>>> +	} else if (mode == EDDEVID_IMPL_EDPCSR) {

>>>>> +		drvdata->edpcsr_present  = true;

>>>>> +		drvdata->edcidsr_present = false;

>>>>> +		drvdata->edvidsr_present = false;

>>>>> +	} else if (mode == EDDEVID_IMPL_EDPCSR_EDCIDSR) {

>>>>> +		if (!IS_ENABLED(CONFIG_64BIT) &&

>>>>> +			(pcsr_offset == EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32))

>>>>> +			drvdata->edpcsr_present = false;

>>>>> +		else

>>>>> +			drvdata->edpcsr_present = true;

>>>>

>>>> Sorry, I forgot why we do this check only in this mode. Shouldn't this be

>>>> common to all modes (of course which implies PCSR is present) ?

>>>

>>> No. PCSROffset is defined differently in ARMv7 and ARMv8; So finally we

>>> simplize PCSROffset value :

>>> 0000 - Sample offset applies based on the instruction state (indicated by PCSR[0])

>>> 0001 - No offset applies.

>>> 0010 - No offset applies, but do not use in AArch32 mode!

>>>

>>> So we need handle the corner case is when CPU runs AArch32 mode and

>>> PCSRoffset = 'b0010. Other cases the pcsr should be present.

>>

>> I understand that reasoning. But my question is, why do we check for PCSROffset

>> only when mode == EDDEVID_IMPL_EDPCSR_EDCIDSR and not for say mode == EDDEVID_IMPL_EDPCSR or

>> any other mode where PCSR is present.

>

> Sorry I misunderstood your question.

>

> I made mistake when I analyzed the possbile combination for mode and

> PCSROffset so I thought it's the only case should handle:

> { EDDEVID_IMPL_EDPCSR_EDCIDSR, EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 }

>

> Below three combinations are possible to exist; so you are right, I

> should move this out for the checking:

> { EDDEVID_IMPL_NONE,           EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 }


That need not be covered, as IMPL_NONE says PCSR is not implemented hence you
don't worry about anything as the functionality is missing. This should rather be:
EDDEVID_IMPL_EDPCSR, where only PCSR is implemented.

My switch...case suggestion makes it easier to do all this checking.


Thanks
Suzuki
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Mike Leach March 29, 2017, 3:17 p.m. UTC | #7
On 29 March 2017 at 04:07, Leo Yan <leo.yan@linaro.org> wrote:
> Hi Suzuki,

>

> On Mon, Mar 27, 2017 at 05:34:57PM +0100, Suzuki K Poulose wrote:

>> On 25/03/17 18:23, Leo Yan wrote:

>

> [...]

>

>> Leo,

>>

>> Thanks a lot for the quick rework. I don't fully understand (yet!) why we need the

>> idle_constraint. I will leave it for Sudeep to comment on it, as he is the expert

>> in that area. Some minor comments below.

>

> Thanks a lot for quick reviewing :)

>

>> >Signed-off-by: Leo Yan <leo.yan@linaro.org>

>> >---

>> > drivers/hwtracing/coresight/Kconfig               |  11 +

>> > drivers/hwtracing/coresight/Makefile              |   1 +

>> > drivers/hwtracing/coresight/coresight-cpu-debug.c | 704 ++++++++++++++++++++++

>> > 3 files changed, 716 insertions(+)

>> > create mode 100644 drivers/hwtracing/coresight/coresight-cpu-debug.c

>> >

>> >diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig

>> >index 130cb21..18d7931 100644

>> >--- a/drivers/hwtracing/coresight/Kconfig

>> >+++ b/drivers/hwtracing/coresight/Kconfig

>> >@@ -89,4 +89,15 @@ config CORESIGHT_STM

>> >       logging useful software events or data coming from various entities

>> >       in the system, possibly running different OSs

>> >

>> >+config CORESIGHT_CPU_DEBUG

>> >+    tristate "CoreSight CPU Debug driver"

>> >+    depends on ARM || ARM64

>> >+    depends on DEBUG_FS

>> >+    help

>> >+      This driver provides support for coresight debugging module. This

>> >+      is primarily used to dump sample-based profiling registers when

>> >+      system triggers panic, the driver will parse context registers so

>> >+      can quickly get to know program counter (PC), secure state,

>> >+      exception level, etc.

>>

>> May be we should mention/warn the user about the possible caveats of using

>> this feature to help him make a better decision ? And / Or we should add a documentation

>> for it. We have collected some real good information over the discussions and

>> it is a good idea to capture it somewhere.

>

> Sure, I will add a documentation for this.

>

> [...]

>

>> >+static struct pm_qos_request debug_qos_req;

>> >+static int idle_constraint = PM_QOS_DEFAULT_VALUE;

>> >+module_param(idle_constraint, int, 0600);

>> >+MODULE_PARM_DESC(idle_constraint, "Latency requirement in microseconds for CPU "

>> >+             "idle states (default is -1, which means have no limiation "

>> >+             "to CPU idle states; 0 means disabling all idle states; user "

>> >+             "can choose other platform dependent values so can disable "

>> >+             "specific idle states for the platform)");

>>

>> Correct me if I am wrong,

>>

>> All we want to do is disable the CPUIdle explicitly if the user knows that this

>> could be a problem to use CPU debug on his platform. So, in effect, we should

>> only be using idle_constraint = 0 or -1.

>>

>> In which case, we could make it easier for the user to tell us, either

>>

>>  0 - Don't do anything with CPUIdle (default)

>>  1 - Disable CPUIdle for me as I know the platform has issues with CPU debug and CPUidle.

>

> The reason for not using bool flag is: usually SoC may have many idle

> states, so if user wants to partially enable some states then can set

> the latency to constraint.

>

> But of course, we can change this to binary value as you suggested,

> this means turn on of turn off all states. The only one reason to use

> latency value is it is more friendly for hardware design, e.g. some

> platforms can enable partial states to save power and avoid overheat

> after using this driver.

>

> If you guys think this is a bit over design, I will follow up your

> suggestion. I also have some replying in Mathieu's reviewing, please

> help review as well.

>

>> than explaining the miscrosecond latency etc and make the appropriate calls underneath.

>> something like (not necessarily the same name) :

>>

>> module_param(broken_with_cpuidle, bool, 0600);

>> MODULE_PARAM_DESC(broken_with_cpuidle, "Specifies whether the CPU debug has issues with CPUIdle on"

>>                                      " the platform. Non-zero value implies CPUIdle has to be"

>>                                      " explicitly disabled.",);

>

> [...]

>

>> >+    /*

>> >+     * Unfortunately the CPU cannot be powered up, so return

>> >+     * back and later has no permission to access other

>> >+     * registers. For this case, should set 'idle_constraint'

>> >+     * to ensure CPU power domain is enabled!

>> >+     */

>> >+    if (!(drvdata->edprsr & EDPRSR_PU)) {

>> >+            pr_err("%s: power up request for CPU%d failed\n",

>> >+                    __func__, drvdata->cpu);

>> >+            goto out;

>> >+    }

>> >+

>> >+out_powered_up:

>> >+    debug_os_unlock(drvdata);

>>

>> Question: Do we need a matching debug_os_lock() once we are done ?

>

> I have checked ARM ARMv8, but there have no detailed description for

> this. I refered coresight-etmv4 code and Mike's pseudo code, ther have

> no debug_os_lock() related operations.

>

> Mike, Mathieu, could you also help confirm this?

>


Debug OS lock / unlock allows the power management code running on the
core to lock out the external debugger while the debug registers are
saved/restored during a core power event.

e.g. A sequence such as this might occur in a correctly programmed system....

debug_os_lock()
save_debug_regs() // visible from core power domain - incl breakpoints etc
save_etm_regs()
... // other stuff prior to core power down,
<power_down_core>

Followed by...

<power_up_core>
restore_etm_regs()
restore_debug_regs() // visible from core power domain - incl breakpoints etc
debug_os_unlock()

The value is 1 (locked) if cold resetting into AArch64 - it is
expected that some system software will set this to 0 as part of the
boot process.
The lock prevents write access to the external debug registers so we
need to clear it to set up the external debug registers we are using.
This suggests that it should be restored as we found it when done.

Mike

> [...]

>

>> >+static void debug_init_arch_data(void *info)

>> >+{

>> >+    struct debug_drvdata *drvdata = info;

>> >+    u32 mode, pcsr_offset;

>> >+

>> >+    CS_UNLOCK(drvdata->base);

>> >+

>> >+    debug_os_unlock(drvdata);

>> >+

>> >+    /* Read device info */

>> >+    drvdata->eddevid  = readl_relaxed(drvdata->base + EDDEVID);

>> >+    drvdata->eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);

>>

>> As mentioned above, both of these registers are only need at init time to

>> figure out the flags we set here. So we could remove them.

>>

>> >+

>> >+    CS_LOCK(drvdata->base);

>> >+

>> >+    /* Parse implementation feature */

>> >+    mode = drvdata->eddevid & EDDEVID_PCSAMPLE_MODE;

>> >+    pcsr_offset = drvdata->eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;

>>

>>

>> >+

>> >+    if (mode == EDDEVID_IMPL_NONE) {

>> >+            drvdata->edpcsr_present  = false;

>> >+            drvdata->edcidsr_present = false;

>> >+            drvdata->edvidsr_present = false;

>> >+    } else if (mode == EDDEVID_IMPL_EDPCSR) {

>> >+            drvdata->edpcsr_present  = true;

>> >+            drvdata->edcidsr_present = false;

>> >+            drvdata->edvidsr_present = false;

>> >+    } else if (mode == EDDEVID_IMPL_EDPCSR_EDCIDSR) {

>> >+            if (!IS_ENABLED(CONFIG_64BIT) &&

>> >+                    (pcsr_offset == EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32))

>> >+                    drvdata->edpcsr_present = false;

>> >+            else

>> >+                    drvdata->edpcsr_present = true;

>>

>> Sorry, I forgot why we do this check only in this mode. Shouldn't this be

>> common to all modes (of course which implies PCSR is present) ?

>

> No. PCSROffset is defined differently in ARMv7 and ARMv8; So finally we

> simplize PCSROffset value :

> 0000 - Sample offset applies based on the instruction state (indicated by PCSR[0])

> 0001 - No offset applies.

> 0010 - No offset applies, but do not use in AArch32 mode!

>

> So we need handle the corner case is when CPU runs AArch32 mode and

> PCSRoffset = 'b0010. Other cases the pcsr should be present.

>

> [...]

>

> Other suggestions are good for me, will take them in next version.

>

> Thanks,

> Leo Yan




-- 
Mike Leach
Principal Engineer, ARM Ltd.
Blackburn Design Centre. UK
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leo Yan March 30, 2017, 1:03 a.m. UTC | #8
On Wed, Mar 29, 2017 at 03:56:23PM +0100, Mike Leach wrote:

[...]

> >> > +   /*

> >> > +    * Unfortunately the CPU cannot be powered up, so return

> >> > +    * back and later has no permission to access other

> >> > +    * registers. For this case, should set 'idle_constraint'

> >> > +    * to ensure CPU power domain is enabled!

> >> > +    */

> >> > +   if (!(drvdata->edprsr & EDPRSR_PU)) {

> >> > +           pr_err("%s: power up request for CPU%d failed\n",

> >> > +                   __func__, drvdata->cpu);

> >> > +           goto out;

> >> > +   }

> >> > +

> >> > +out_powered_up:

> >> > +   debug_os_unlock(drvdata);

> >> > +

> >> > +   /*

> >> > +    * At this point the CPU is powered up, so set the no powerdown

> >> > +    * request bit so we don't lose power and emulate power down.

> >> > +    */

> >> > +   drvdata->edprsr = readl(drvdata->base + EDPRCR);

> >> > +   drvdata->edprsr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;

> >>

> >> If we are here the core is already up.  Shouldn't we need to set

> >> EDPRCR_CORENPDRQ only?

> >

> > Yeah. Will fix.

> 

> No - EDPRCR_COREPURQ and EDPRCR_CORENPDRQ have different semantics and purposes

> 

> EDPRCR_COREPURQ is in the debug power domain an is tied to an external

> debug request that should be an input to the external (to the PE)

> system power controller.

> The requirement is that the system power controller powers up the core

> domain and does not power it down while it remains asserted.

> 

> EDPRCR_CORENPDRQ is in the core power domain and thus to the specific

> core only. This ensures that any power control software running on

> that core should emulate a power down if this is set to one.


I'm curious the exact meaning for "power control software".

Does it mean EDPRCR_CORENPDRQ should be checked by kernel or PSCI
liked code in ARM trusted firmware to avoid to run CPU power off flow?

Or will EDPRCR_CORENPDRQ assert CPU external signal to notify power
controller so power controller emulate a power down?

> We cannot know the power control design of the system, so the safe

> solution is to set both bits.


Thanks a lot for the suggestion. Will set both bits.

Thanks,
Leo Yan
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leo Yan March 30, 2017, 1:18 a.m. UTC | #9
On Wed, Mar 29, 2017 at 04:17:19PM +0100, Mike Leach wrote:

[...]

> >> >+out_powered_up:

> >> >+    debug_os_unlock(drvdata);

> >>

> >> Question: Do we need a matching debug_os_lock() once we are done ?

> >

> > I have checked ARM ARMv8, but there have no detailed description for

> > this. I refered coresight-etmv4 code and Mike's pseudo code, ther have

> > no debug_os_lock() related operations.

> >

> > Mike, Mathieu, could you also help confirm this?

> >

> 

> Debug OS lock / unlock allows the power management code running on the

> core to lock out the external debugger while the debug registers are

> saved/restored during a core power event.

> 

> e.g. A sequence such as this might occur in a correctly programmed system....

> 

> debug_os_lock()

> save_debug_regs() // visible from core power domain - incl breakpoints etc

> save_etm_regs()

> ... // other stuff prior to core power down,

> <power_down_core>

> 

> Followed by...

> 

> <power_up_core>

> restore_etm_regs()

> restore_debug_regs() // visible from core power domain - incl breakpoints etc

> debug_os_unlock()

> 

> The value is 1 (locked) if cold resetting into AArch64 - it is

> expected that some system software will set this to 0 as part of the

> boot process.

> The lock prevents write access to the external debug registers so we

> need to clear it to set up the external debug registers we are using.


This description is conflict with upper restoring flows. During
restore_debug_regs(), the os lock is locked so how it can write
external debug register to restore context?

> This suggests that it should be restored as we found it when done.


Thanks,
Leo Yan
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sudeep Holla March 30, 2017, 3:47 p.m. UTC | #10
On 29/03/17 15:56, Mike Leach wrote:

[...]
> 

> No - EDPRCR_COREPURQ and EDPRCR_CORENPDRQ have different semantics and purposes

> 

> EDPRCR_COREPURQ is in the debug power domain an is tied to an external

> debug request that should be an input to the external (to the PE)

> system power controller.

> The requirement is that the system power controller powers up the core

> domain and does not power it down while it remains asserted.

> 

> EDPRCR_CORENPDRQ is in the core power domain and thus to the specific

> core only. This ensures that any power control software running on

> that core should emulate a power down if this is set to one.

> 

> We cannot know the power control design of the system, so the safe

> solution is to set both bits.

> 


+1

I agree that's the safe bet.

-- 
Regards,
Sudeep
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox series

Patch

diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 130cb21..18d7931 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -89,4 +89,15 @@  config CORESIGHT_STM
 	  logging useful software events or data coming from various entities
 	  in the system, possibly running different OSs
 
+config CORESIGHT_CPU_DEBUG
+	tristate "CoreSight CPU Debug driver"
+	depends on ARM || ARM64
+	depends on DEBUG_FS
+	help
+	  This driver provides support for coresight debugging module. This
+	  is primarily used to dump sample-based profiling registers when
+	  system triggers panic, the driver will parse context registers so
+	  can quickly get to know program counter (PC), secure state,
+	  exception level, etc.
+
 endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index af480d9..433d590 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -16,3 +16,4 @@  obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
 					coresight-etm4x-sysfs.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
 obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
+obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
new file mode 100644
index 0000000..fbec1d1
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -0,0 +1,704 @@ 
+/*
+ * Copyright (c) 2017 Linaro Limited. All rights reserved.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/amba/bus.h>
+#include <linux/coresight.h>
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "coresight-priv.h"
+
+#define EDPCSR				0x0A0
+#define EDCIDSR				0x0A4
+#define EDVIDSR				0x0A8
+#define EDPCSR_HI			0x0AC
+#define EDOSLAR				0x300
+#define EDPRCR				0x310
+#define EDPRSR				0x314
+#define EDDEVID1			0xFC4
+#define EDDEVID				0xFC8
+
+#define EDPCSR_PROHIBITED		0xFFFFFFFF
+
+/* bits definition for EDPCSR */
+#ifndef CONFIG_64BIT
+#define EDPCSR_THUMB			BIT(0)
+#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
+#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
+#endif
+
+/* bits definition for EDPRCR */
+#define EDPRCR_COREPURQ			BIT(3)
+#define EDPRCR_CORENPDRQ		BIT(0)
+
+/* bits definition for EDPRSR */
+#define EDPRSR_DLK			BIT(6)
+#define EDPRSR_PU			BIT(0)
+
+/* bits definition for EDVIDSR */
+#define EDVIDSR_NS			BIT(31)
+#define EDVIDSR_E2			BIT(30)
+#define EDVIDSR_E3			BIT(29)
+#define EDVIDSR_HV			BIT(28)
+#define EDVIDSR_VMID			GENMASK(7, 0)
+
+/*
+ * bits definition for EDDEVID1:PSCROffset
+ *
+ * NOTE: armv8 and armv7 have different definition for the register,
+ * so consolidate the bits definition as below:
+ *
+ * 0b0000 - Sample offset applies based on the instruction state, we
+ *          rely on EDDEVID to check if EDPCSR is implemented or not
+ * 0b0001 - No offset applies.
+ * 0b0010 - No offset applies, but do not use in AArch32 mode
+ *
+ */
+#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
+#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
+#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
+
+/* bits definition for EDDEVID */
+#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
+#define EDDEVID_IMPL_NONE		(0x0)
+#define EDDEVID_IMPL_EDPCSR		(0x1)
+#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
+#define EDDEVID_IMPL_FULL		(0x3)
+
+#define DEBUG_WAIT_TIMEOUT		32
+
+struct debug_drvdata {
+	void __iomem	*base;
+	struct device	*dev;
+	int		cpu;
+
+	bool		edpcsr_present;
+	bool		edcidsr_present;
+	bool		edvidsr_present;
+	bool		pc_has_offset;
+
+	u32		eddevid;
+	u32		eddevid1;
+
+	u32		edpcsr;
+	u32		edpcsr_hi;
+	u32		edprcr;
+	u32		edprsr;
+	u32		edvidsr;
+	u32		edcidsr;
+};
+
+static DEFINE_MUTEX(debug_lock);
+static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
+static int debug_count;
+static struct dentry *debug_debugfs_dir;
+
+static struct pm_qos_request debug_qos_req;
+static int idle_constraint = PM_QOS_DEFAULT_VALUE;
+module_param(idle_constraint, int, 0600);
+MODULE_PARM_DESC(idle_constraint, "Latency requirement in microseconds for CPU "
+		 "idle states (default is -1, which means have no limiation "
+		 "to CPU idle states; 0 means disabling all idle states; user "
+		 "can choose other platform dependent values so can disable "
+		 "specific idle states for the platform)");
+
+static bool debug_enable;
+module_param_named(enable, debug_enable, bool, 0600);
+MODULE_PARM_DESC(enable, "Knob to enable debug functionality "
+		 "(default is 0, which means is disabled by default)");
+
+static void debug_os_unlock(struct debug_drvdata *drvdata)
+{
+	/* Unlocks the debug registers */
+	writel_relaxed(0x0, drvdata->base + EDOSLAR);
+	wmb();
+}
+
+/*
+ * According to ARM DDI 0487A.k, before access external debug
+ * registers should firstly check the access permission; if any
+ * below condition has been met then cannot access debug
+ * registers to avoid lockup issue:
+ *
+ * - CPU power domain is powered off;
+ * - The OS Double Lock is locked;
+ *
+ * By checking EDPRSR can get to know if meet these conditions.
+ */
+static bool debug_access_permitted(struct debug_drvdata *drvdata)
+{
+	/* CPU is powered off */
+	if (!(drvdata->edprsr & EDPRSR_PU))
+		return false;
+
+	/* The OS Double Lock is locked */
+	if (drvdata->edprsr & EDPRSR_DLK)
+		return false;
+
+	return true;
+}
+
+static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
+{
+	int timeout = DEBUG_WAIT_TIMEOUT;
+
+	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
+
+	CS_UNLOCK(drvdata->base);
+
+	/* Bail out if CPU is powered up yet */
+	if (drvdata->edprsr & EDPRSR_PU)
+		goto out_powered_up;
+
+	/*
+	 * Send request to power management controller and assert
+	 * DBGPWRUPREQ signal; if power management controller has
+	 * sane implementation, it should enable CPU power domain
+	 * in case CPU is in low power state.
+	 */
+	drvdata->edprsr = readl(drvdata->base + EDPRCR);
+	drvdata->edprsr |= EDPRCR_COREPURQ;
+	writel(drvdata->edprsr, drvdata->base + EDPRCR);
+
+	/* Wait for CPU to be powered up (timeout~=32ms) */
+	while (timeout--) {
+		drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
+		if (drvdata->edprsr & EDPRSR_PU)
+			break;
+
+		usleep_range(1000, 2000);
+	}
+
+	/*
+	 * Unfortunately the CPU cannot be powered up, so return
+	 * back and later has no permission to access other
+	 * registers. For this case, should set 'idle_constraint'
+	 * to ensure CPU power domain is enabled!
+	 */
+	if (!(drvdata->edprsr & EDPRSR_PU)) {
+		pr_err("%s: power up request for CPU%d failed\n",
+			__func__, drvdata->cpu);
+		goto out;
+	}
+
+out_powered_up:
+	debug_os_unlock(drvdata);
+
+	/*
+	 * At this point the CPU is powered up, so set the no powerdown
+	 * request bit so we don't lose power and emulate power down.
+	 */
+	drvdata->edprsr = readl(drvdata->base + EDPRCR);
+	drvdata->edprsr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
+	writel(drvdata->edprsr, drvdata->base + EDPRCR);
+
+out:
+	CS_LOCK(drvdata->base);
+}
+
+static void debug_read_regs(struct debug_drvdata *drvdata)
+{
+	/*
+	 * Ensure CPU power domain is enabled to let registers
+	 * are accessiable.
+	 */
+	debug_force_cpu_powered_up(drvdata);
+
+	if (!debug_access_permitted(drvdata))
+		return;
+
+	CS_UNLOCK(drvdata->base);
+
+	debug_os_unlock(drvdata);
+
+	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
+
+	/*
+	 * As described in ARM DDI 0487A.k, if the processing
+	 * element (PE) is in debug state, or sample-based
+	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
+	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
+	 * UNKNOWN state. So directly bail out for this case.
+	 */
+	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
+		goto out;
+
+	/*
+	 * A read of the EDPCSR normally has the side-effect of
+	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
+	 * at this point it's safe to read value from them.
+	 */
+	if (IS_ENABLED(CONFIG_64BIT))
+		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
+
+	if (drvdata->edcidsr_present)
+		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
+
+	if (drvdata->edvidsr_present)
+		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
+
+out:
+	CS_LOCK(drvdata->base);
+}
+
+#ifndef CONFIG_64BIT
+static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata,
+				     unsigned long pc)
+{
+	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
+
+	if (drvdata->pc_has_offset) {
+		arm_inst_offset = 8;
+		thumb_inst_offset = 4;
+	}
+
+	/* Handle thumb instruction */
+	if (pc & EDPCSR_THUMB) {
+		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
+		return pc;
+	}
+
+	/*
+	 * Handle arm instruction offset, if the arm instruction
+	 * is not 4 byte alignment then it's possible the case
+	 * for implementation defined; keep original value for this
+	 * case and print info for notice.
+	 */
+	if (pc & BIT(1))
+		pr_emerg("Instruction offset is implementation defined\n");
+	else
+		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
+
+	return pc;
+}
+#endif
+
+static void debug_dump_regs(struct debug_drvdata *drvdata)
+{
+	unsigned long pc;
+
+	pr_emerg("\tEDPRSR:  %08x (Power:%s DLK:%s)\n", drvdata->edprsr,
+		 drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
+		 drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
+
+	if (!debug_access_permitted(drvdata)) {
+		pr_emerg("No permission to access debug registers!\n");
+		return;
+	}
+
+	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
+		pr_emerg("CPU is in Debug state or profiling is prohibited!\n");
+		return;
+	}
+
+#ifdef CONFIG_64BIT
+	pc = (unsigned long)drvdata->edpcsr_hi << 32 |
+	     (unsigned long)drvdata->edpcsr;
+#else
+	pc = debug_adjust_pc(drvdata, (unsigned long)drvdata->edpcsr);
+#endif
+
+	pr_emerg("\tEDPCSR:  [<%p>] %pS\n", (void *)pc, (void *)pc);
+
+	if (drvdata->edcidsr_present)
+		pr_emerg("\tEDCIDSR: %08x\n", drvdata->edcidsr);
+
+	if (drvdata->edvidsr_present)
+		pr_emerg("\tEDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
+			 drvdata->edvidsr,
+			 drvdata->edvidsr & EDVIDSR_NS ? "Non-secure" : "Secure",
+			 drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
+				(drvdata->edvidsr & EDVIDSR_E2 ? "EL2" : "EL1/0"),
+			 drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
+			 drvdata->edvidsr & (u32)EDVIDSR_VMID);
+}
+
+static void debug_init_arch_data(void *info)
+{
+	struct debug_drvdata *drvdata = info;
+	u32 mode, pcsr_offset;
+
+	CS_UNLOCK(drvdata->base);
+
+	debug_os_unlock(drvdata);
+
+	/* Read device info */
+	drvdata->eddevid  = readl_relaxed(drvdata->base + EDDEVID);
+	drvdata->eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
+
+	CS_LOCK(drvdata->base);
+
+	/* Parse implementation feature */
+	mode = drvdata->eddevid & EDDEVID_PCSAMPLE_MODE;
+	pcsr_offset = drvdata->eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
+
+	if (mode == EDDEVID_IMPL_NONE) {
+		drvdata->edpcsr_present  = false;
+		drvdata->edcidsr_present = false;
+		drvdata->edvidsr_present = false;
+	} else if (mode == EDDEVID_IMPL_EDPCSR) {
+		drvdata->edpcsr_present  = true;
+		drvdata->edcidsr_present = false;
+		drvdata->edvidsr_present = false;
+	} else if (mode == EDDEVID_IMPL_EDPCSR_EDCIDSR) {
+		/*
+		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
+		 * define if has the offset for PC sampling value; if read
+		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
+		 * module does not sample the instruction set state when
+		 * armv8 CPU in AArch32 state.
+		 */
+		if (!IS_ENABLED(CONFIG_64BIT) &&
+			(pcsr_offset == EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32))
+			drvdata->edpcsr_present = false;
+		else
+			drvdata->edpcsr_present = true;
+
+		drvdata->edcidsr_present = true;
+		drvdata->edvidsr_present = false;
+	} else if (mode == EDDEVID_IMPL_FULL) {
+		drvdata->edpcsr_present  = true;
+		drvdata->edcidsr_present = true;
+		drvdata->edvidsr_present = true;
+	}
+
+	if (IS_ENABLED(CONFIG_64BIT))
+		drvdata->pc_has_offset = false;
+	else
+		drvdata->pc_has_offset =
+			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
+
+	return;
+}
+
+/*
+ * Dump out information on panic.
+ */
+static int debug_notifier_call(struct notifier_block *self,
+			       unsigned long v, void *p)
+{
+	int cpu;
+	struct debug_drvdata *drvdata;
+
+	pr_emerg("ARM external debug module:\n");
+
+	for_each_possible_cpu(cpu) {
+		drvdata = per_cpu(debug_drvdata, cpu);
+		if (!drvdata)
+			continue;
+
+		pr_emerg("CPU[%d]:\n", drvdata->cpu);
+
+		debug_read_regs(drvdata);
+		debug_dump_regs(drvdata);
+	}
+
+	return 0;
+}
+
+static struct notifier_block debug_notifier = {
+	.notifier_call = debug_notifier_call,
+};
+
+static int debug_enable_func(void)
+{
+	int ret;
+
+	pm_qos_add_request(&debug_qos_req,
+		PM_QOS_CPU_DMA_LATENCY, idle_constraint);
+
+	ret = atomic_notifier_chain_register(&panic_notifier_list,
+					     &debug_notifier);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	pm_qos_remove_request(&debug_qos_req);
+	return ret;
+}
+
+static void debug_disable_func(void)
+{
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &debug_notifier);
+	pm_qos_remove_request(&debug_qos_req);
+}
+
+static ssize_t debug_func_knob_write(struct file *f,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u8 on;
+	int ret;
+
+	ret = kstrtou8_from_user(buf, count, 2, &on);
+	if (ret)
+		return ret;
+
+	mutex_lock(&debug_lock);
+
+	if (!on ^ debug_enable)
+		goto out;
+
+	if (on) {
+		ret = debug_enable_func();
+		if (ret) {
+			pr_err("%s: unable to disable debug function: %d\n",
+			       __func__, ret);
+			goto err;
+		}
+	} else
+		debug_disable_func();
+
+	debug_enable = on;
+
+out:
+	ret = count;
+err:
+	mutex_unlock(&debug_lock);
+	return ret;
+}
+
+static ssize_t debug_func_knob_read(struct file *f,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	char val[] = { '0' + debug_enable, '\n' };
+
+	return simple_read_from_buffer(ubuf, count, ppos, val, sizeof(val));
+}
+
+static ssize_t debug_idle_constraint_write(struct file *f,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	int val;
+	ssize_t ret;
+
+	ret = kstrtoint_from_user(buf, count, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&debug_lock);
+	idle_constraint = val;
+
+	if (debug_enable)
+		pm_qos_update_request(&debug_qos_req, idle_constraint);
+
+	mutex_unlock(&debug_lock);
+	return count;
+}
+
+static ssize_t debug_idle_constraint_read(struct file *f,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int len;
+
+	if (*ppos)
+		return 0;
+
+	len = sprintf(buf, "%d\n", idle_constraint);
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations debug_func_knob_fops = {
+	.open	= simple_open,
+	.read	= debug_func_knob_read,
+	.write	= debug_func_knob_write,
+};
+
+static const struct file_operations debug_idle_constraint_fops = {
+	.open	= simple_open,
+	.read	= debug_idle_constraint_read,
+	.write	= debug_idle_constraint_write,
+};
+
+static int debug_func_init(void)
+{
+	struct dentry *file;
+	int ret;
+
+	/* Create debugfs node */
+	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
+	if (!debug_debugfs_dir) {
+		pr_err("%s: unable to create debugfs directory\n", __func__);
+		return -ENOMEM;
+	}
+
+	file = debugfs_create_file("enable", S_IRUGO | S_IWUSR,
+			debug_debugfs_dir, NULL, &debug_func_knob_fops);
+	if (!file) {
+		pr_err("%s: unable to create enable knob file\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	file = debugfs_create_file("idle_constraint", S_IRUGO | S_IWUSR,
+			debug_debugfs_dir, NULL, &debug_idle_constraint_fops);
+	if (!file) {
+		pr_err("%s: unable to create idle constraint file\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* Use sysfs node to enable functionality */
+	if (!debug_enable)
+		return 0;
+
+	/* Enable debug module at boot time */
+	ret = debug_enable_func();
+	if (ret) {
+		pr_err("%s: unable to disable debug function: %d\n",
+		       __func__, ret);
+		goto err;
+	}
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(debug_debugfs_dir);
+	return ret;
+}
+
+static void debug_func_exit(void)
+{
+	debugfs_remove_recursive(debug_debugfs_dir);
+
+	/* Disable functionality if has enabled */
+	if (debug_enable)
+		debug_disable_func();
+}
+
+static int debug_probe(struct amba_device *adev, const struct amba_id *id)
+{
+	void __iomem *base;
+	struct device *dev = &adev->dev;
+	struct debug_drvdata *drvdata;
+	struct resource *res = &adev->res;
+	struct device_node *np = adev->dev.of_node;
+	int ret;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;
+	if (per_cpu(debug_drvdata, drvdata->cpu)) {
+		dev_err(dev, "CPU's drvdata has been initialized\n");
+		return -EBUSY;
+	}
+
+	drvdata->dev = &adev->dev;
+	amba_set_drvdata(adev, drvdata);
+
+	/* Validity for the resource is already checked by the AMBA core */
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	drvdata->base = base;
+
+	get_online_cpus();
+	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
+	ret = smp_call_function_single(drvdata->cpu,
+				debug_init_arch_data, drvdata, 1);
+	put_online_cpus();
+
+	if (ret) {
+		dev_err(dev, "Debug arch init failed\n");
+		goto err;
+	}
+
+	if (!drvdata->edpcsr_present) {
+		ret = -ENXIO;
+		dev_err(dev, "Sample-based profiling is not implemented\n");
+		goto err;
+	}
+
+	if (!debug_count++) {
+		ret = debug_func_init();
+		if (ret)
+			goto err_func_init;
+	}
+
+	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
+	return 0;
+
+err_func_init:
+	debug_count--;
+err:
+	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
+	return ret;
+}
+
+static int debug_remove(struct amba_device *adev)
+{
+	struct debug_drvdata *drvdata = amba_get_drvdata(adev);
+
+	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
+
+	if (!--debug_count)
+		debug_func_exit();
+
+	return 0;
+}
+
+static struct amba_id debug_ids[] = {
+	{       /* Debug for Cortex-A53 */
+		.id	= 0x000bbd03,
+		.mask	= 0x000fffff,
+	},
+	{       /* Debug for Cortex-A57 */
+		.id	= 0x000bbd07,
+		.mask	= 0x000fffff,
+	},
+	{       /* Debug for Cortex-A72 */
+		.id	= 0x000bbd08,
+		.mask	= 0x000fffff,
+	},
+	{ 0, 0 },
+};
+
+static struct amba_driver debug_driver = {
+	.drv = {
+		.name   = "coresight-cpu-debug",
+		.suppress_bind_attrs = true,
+	},
+	.probe		= debug_probe,
+	.remove		= debug_remove,
+	.id_table	= debug_ids,
+};
+
+module_amba_driver(debug_driver);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
+MODULE_LICENSE("GPL");