aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/cache-tauros2.c
blob: 23a7643e9a875925be1280d1bff0c3ee6ffab7ee (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
/*
 * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
 *
 * Copyright (C) 2008 Marvell Semiconductor
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2.  This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 *
 * References:
 * - PJ1 CPU Core Datasheet,
 *   Document ID MV-S104837-01, Rev 0.7, January 24 2008.
 * - PJ4 CPU Core Datasheet,
 *   Document ID MV-S105190-00, Rev 0.7, March 14 2008.
 */

#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/hardware/cache-tauros2.h>


/*
 * When Tauros2 is used on a CPU that supports the v7 hierarchical
 * cache operations, the cache handling code in proc-v7.S takes care
 * of everything, including handling DMA coherency.
 *
 * So, we only need to register outer cache operations here if we're
 * being used on a pre-v7 CPU, and we only need to build support for
 * outer cache operations into the kernel image if the kernel has been
 * configured to support a pre-v7 CPU.
 */
#if __LINUX_ARM_ARCH__ < 7
/*
 * Low-level cache maintenance operations.
 */
static inline void tauros2_clean_pa(unsigned long addr)
{
	__asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
}

static inline void tauros2_clean_inv_pa(unsigned long addr)
{
	__asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
}

static inline void tauros2_inv_pa(unsigned long addr)
{
	__asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
}


/*
 * Linux primitives.
 *
 * Note that the end addresses passed to Linux primitives are
 * noninclusive.
 */
#define CACHE_LINE_SIZE		32

static void tauros2_inv_range(unsigned long start, unsigned long end)
{
	/*
	 * Clean and invalidate partial first cache line.
	 */
	if (start & (CACHE_LINE_SIZE - 1)) {
		tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
	}

	/*
	 * Clean and invalidate partial last cache line.
	 */
	if (end & (CACHE_LINE_SIZE - 1)) {
		tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
		end &= ~(CACHE_LINE_SIZE - 1);
	}

	/*
	 * Invalidate all full cache lines between 'start' and 'end'.
	 */
	while (start < end) {
		tauros2_inv_pa(start);
		start += CACHE_LINE_SIZE;
	}

	dsb();
}

static void tauros2_clean_range(unsigned long start, unsigned long end)
{
	start &= ~(CACHE_LINE_SIZE - 1);
	while (start < end) {
		tauros2_clean_pa(start);
		start += CACHE_LINE_SIZE;
	}

	dsb();
}

static void tauros2_flush_range(unsigned long start, unsigned long end)
{
	start &= ~(CACHE_LINE_SIZE - 1);
	while (start < end) {
		tauros2_clean_inv_pa(start);
		start += CACHE_LINE_SIZE;
	}

	dsb();
}

static void tauros2_disable(void)
{
	__asm__ __volatile__ (
	"mcr	p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
	"mrc	p15, 0, %0, c1, c0, 0\n\t"
	"bic	%0, %0, #(1 << 26)\n\t"
	"mcr	p15, 0, %0, c1, c0, 0  @Disable L2 Cache\n\t"
	: : "r" (0x0));
}

static void tauros2_resume(void)
{
	__asm__ __volatile__ (
	"mcr	p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
	"mrc	p15, 0, %0, c1, c0, 0\n\t"
	"orr	%0, %0, #(1 << 26)\n\t"
	"mcr	p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
	: : "r" (0x0));
}
#endif

static inline u32 __init read_extra_features(void)
{
	u32 u;

	__asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));

	return u;
}

static inline void __init write_extra_features(u32 u)
{
	__asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
}

static void __init disable_l2_prefetch(void)
{
	u32 u;

	/*
	 * Read the CPU Extra Features register and verify that the
	 * Disable L2 Prefetch bit is set.
	 */
	u = read_extra_features();
	if (!(u & 0x01000000)) {
		printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n");
		write_extra_features(u | 0x01000000);
	}
}

static inline int __init cpuid_scheme(void)
{
	extern int processor_id;

	return !!((processor_id & 0x000f0000) == 0x000f0000);
}

static inline u32 __init read_mmfr3(void)
{
	u32 mmfr3;

	__asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));

	return mmfr3;
}

static inline u32 __init read_actlr(void)
{
	u32 actlr;

	__asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));

	return actlr;
}

static inline void __init write_actlr(u32 actlr)
{
	__asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
}

void __init tauros2_init(void)
{
	extern int processor_id;
	char *mode;

	disable_l2_prefetch();

#ifdef CONFIG_CPU_32v5
	if ((processor_id & 0xff0f0000) == 0x56050000) {
		u32 feat;

		/*
		 * v5 CPUs with Tauros2 have the L2 cache enable bit
		 * located in the CPU Extra Features register.
		 */
		feat = read_extra_features();
		if (!(feat & 0x00400000)) {
			printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
			write_extra_features(feat | 0x00400000);
		}

		mode = "ARMv5";
		outer_cache.inv_range = tauros2_inv_range;
		outer_cache.clean_range = tauros2_clean_range;
		outer_cache.flush_range = tauros2_flush_range;
		outer_cache.disable = tauros2_disable;
		outer_cache.resume = tauros2_resume;
	}
#endif

#ifdef CONFIG_CPU_32v6
	/*
	 * Check whether this CPU lacks support for the v7 hierarchical
	 * cache ops.  (PJ4 is in its v6 personality mode if the MMFR3
	 * register indicates no support for the v7 hierarchical cache
	 * ops.)
	 */
	if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) {
		/*
		 * When Tauros2 is used in an ARMv6 system, the L2
		 * enable bit is in the ARMv6 ARM-mandated position
		 * (bit [26] of the System Control Register).
		 */
		if (!(get_cr() & 0x04000000)) {
			printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
			adjust_cr(0x04000000, 0x04000000);
		}

		mode = "ARMv6";
		outer_cache.inv_range = tauros2_inv_range;
		outer_cache.clean_range = tauros2_clean_range;
		outer_cache.flush_range = tauros2_flush_range;
		outer_cache.disable = tauros2_disable;
		outer_cache.resume = tauros2_resume;
	}
#endif

#ifdef CONFIG_CPU_32v7
	/*
	 * Check whether this CPU has support for the v7 hierarchical
	 * cache ops.  (PJ4 is in its v7 personality mode if the MMFR3
	 * register indicates support for the v7 hierarchical cache
	 * ops.)
	 *
	 * (Although strictly speaking there may exist CPUs that
	 * implement the v7 cache ops but are only ARMv6 CPUs (due to
	 * not complying with all of the other ARMv7 requirements),
	 * there are no real-life examples of Tauros2 being used on
	 * such CPUs as of yet.)
	 */
	if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
		u32 actlr;

		/*
		 * When Tauros2 is used in an ARMv7 system, the L2
		 * enable bit is located in the Auxiliary System Control
		 * Register (which is the only register allowed by the
		 * ARMv7 spec to contain fine-grained cache control bits).
		 */
		actlr = read_actlr();
		if (!(actlr & 0x00000002)) {
			printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
			write_actlr(actlr | 0x00000002);
		}

		mode = "ARMv7";
	}
#endif

	if (mode == NULL) {
		printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
		return;
	}

	printk(KERN_INFO "Tauros2: L2 cache support initialised "
			 "in %s mode.\n", mode);
}