@@ -23,7 +23,14 @@
#include <linux/types.h>
+struct outer_cache_info {
+ unsigned int num_ways;
+ unsigned int num_sets;
+ unsigned int line_size;
+};
+
struct outer_cache_fns {
+ void (*get_info)(struct outer_cache_info *info);
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
@@ -112,6 +119,11 @@ static inline void outer_resume(void)
outer_cache.resume();
}
+static inline void outer_get_info(struct outer_cache_info *info)
+{
+ if (outer_cache.get_info)
+ outer_cache.get_info(info);
+}
#else
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
@@ -123,6 +135,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_all(void) { }
static inline void outer_disable(void) { }
static inline void outer_resume(void) { }
+static inline void outer_get_info(struct outer_cache_info *info) { }
#endif
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <asm/cputype.h>
+#include <asm/outercache.h>
#include <asm/processor.h>
#if __LINUX_ARM_ARCH__ < 7 /* pre ARMv7 */
@@ -176,11 +177,27 @@ static void __ci_leaf_init(enum cache_type type, struct cacheinfo *this_leaf)
#endif
+static void __outer_ci_leaf_init(struct cacheinfo *this_leaf)
+{
+ struct outer_cache_info info;
+
+ outer_get_info(&info);
+
+ this_leaf->type = CACHE_TYPE_UNIFIED;/* record it as Unified */
+ this_leaf->ways_of_associativity = info.num_ways;
+ this_leaf->number_of_sets = info.num_sets;
+ this_leaf->coherency_line_size = info.line_size;
+ this_leaf->size = info.num_ways * info.num_sets * info.line_size;
+}
+
static void ci_leaf_init(struct cacheinfo *this_leaf,
enum cache_type type, unsigned int level)
{
this_leaf->level = level;
- __ci_leaf_init(type, this_leaf);
+ if (type == CACHE_TYPE_NOCACHE) /* must be outer cache */
+ __outer_ci_leaf_init(this_leaf);
+ else
+ __ci_leaf_init(type, this_leaf);
}
int init_cache_level(unsigned int cpu)
@@ -202,6 +219,9 @@ int init_cache_level(unsigned int cpu)
this_cpu_ci->num_levels = level - 1;
this_cpu_ci->num_leaves = leaves;
+ if (IS_ENABLED(CONFIG_OUTER_CACHE) && outer_cache.get_info)
+ this_cpu_ci->num_leaves++, this_cpu_ci->num_levels++;
+
return 0;
}
@@ -105,6 +105,15 @@ static inline void l2c_unlock(void __iomem *base, unsigned num)
}
}
+static void l2x0_getinfo(struct outer_cache_info *info)
+{
+ if (!info)
+ return;
+ info->num_ways = get_count_order(l2x0_way_mask);
+ info->line_size = CACHE_LINE_SIZE;
+ info->num_sets = l2x0_size / (info->num_ways * CACHE_LINE_SIZE);
+}
+
/*
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
@@ -894,6 +903,7 @@ static void __init __l2c_init(const struct l2c_init_data *data,
data->enable(l2x0_base, aux, data->num_lock);
outer_cache = fns;
+ outer_cache.get_info = l2x0_getinfo;
/*
* It is strange to save the register state before initialisation,
@@ -60,6 +60,7 @@ static inline void tauros2_inv_pa(unsigned long addr)
* noninclusive.
*/
#define CACHE_LINE_SIZE 32
+#define CACHE_LINE_SHIFT 5
static void tauros2_inv_range(unsigned long start, unsigned long end)
{
@@ -131,6 +132,38 @@ static void tauros2_resume(void)
"mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
: : "r" (0x0));
}
+
+/*
+ * +----------------------------------------+
+ * | 11 10 9 8 | 7 6 5 4 3 | 2 | 1 0 |
+ * +----------------------------------------+
+ * | way size | associativity | - |line_sz|
+ * +----------------------------------------+
+ */
+#define L2CTR_ASSOCIAT_SHIFT 3
+#define L2CTR_ASSOCIAT_MASK 0x1F
+#define L2CTR_WAYSIZE_SHIFT 8
+#define L2CTR_WAYSIZE_MASK 0xF
+#define CACHE_WAY_PER_SET(l2ctr) \
+ (((l2_ctr) >> L2CTR_ASSOCIAT_SHIFT) & L2CTR_ASSOCIAT_MASK)
+#define CACHE_WAY_SIZE(l2ctr) \
+ (8192 << (((l2ctr) >> L2CTR_WAYSIZE_SHIFT) & L2CTR_WAYSIZE_MASK))
+#define CACHE_SET_SIZE(l2ctr) (CACHE_WAY_SIZE(l2ctr) >> CACHE_LINE_SHIFT)
+
+static void tauros2_getinfo(struct outer_cache_info *info)
+{
+ unsigned int l2_ctr;
+
+ if (!info)
+ return;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2_ctr));
+
+ info->line_size = CACHE_LINE_SIZE;
+ info->num_ways = CACHE_WAY_PER_SET(l2_ctr);
+ info->num_sets = CACHE_SET_SIZE(l2_ctr);
+}
+
#endif
static inline u32 __init read_extra_features(void)
@@ -226,6 +259,7 @@ static void __init tauros2_internal_init(unsigned int features)
outer_cache.flush_range = tauros2_flush_range;
outer_cache.disable = tauros2_disable;
outer_cache.resume = tauros2_resume;
+ outer_cache.get_info = tauros2_getinfo;
}
#endif
@@ -201,6 +201,20 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
dsb();
}
+static void xsc3_l2_getinfo(struct outer_cache_info *info)
+{
+ unsigned long l2ctype;
+
+ if (!info)
+ return;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
+
+ info->num_ways = CACHE_WAY_PER_SET;
+ info->line_size = CACHE_LINE_SIZE;
+ info->num_sets = CACHE_SET_SIZE(l2ctype);
+}
+
static int __init xsc3_l2_init(void)
{
if (!cpu_is_xsc3() || !xsc3_l2_present())
@@ -213,6 +227,7 @@ static int __init xsc3_l2_init(void)
outer_cache.inv_range = xsc3_l2_inv_range;
outer_cache.clean_range = xsc3_l2_clean_range;
outer_cache.flush_range = xsc3_l2_flush_range;
+ outer_cache.get_info = xsc3_l2_getinfo;
}
return 0;