1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
|
/*
* OMAP44xx CPU low power powerdown and powerup code.
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software,you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/system.h>
#include <asm/smp_scu.h>
#include <asm/memory.h>
#include <asm/hardware/cache-l2x0.h>
#include <plat/omap44xx.h>
#include <mach/omap4-common.h>
#include "omap4-sar-layout.h"
#ifdef CONFIG_SMP
/* Masks used for MMU manipulation */
#define TTRBIT_MASK 0xffffc000
#define TABLE_INDEX_MASK 0xfff00000
#define TABLE_ENTRY 0x00000c02
#define CACHE_DISABLE_MASK 0xffffe7fb
#define TABLE_ADDRESS_OFFSET 0x04
#define CR_VALUE_OFFSET 0x08
#define SCU_POWER_SECURE_INDEX 0x108
/*
* Macro to call PPA svc when MMU is OFF
* Caller must setup r0 and r3 before calling this macro
* @r0: PPA service ID
* @r3: Pointer to params
*/
.macro LM_CALL_PPA_SERVICE_PA
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
mov r6, #0xff
mov r12, #0x00 @ Secure Service ID
dsb
smc #0
.endm
/*
* Macro to check OMAP4 revision
* @a: bits 31:08 of omap_rev
*/
.macro OMAP4_REV_CMP a
ldr r0, =OMAP44XX_SAR_RAM_BASE
ldr r1, [r0, #OMAP_REV_OFFSET]
lsr r2, r1, #8
and r2, r2, #0xffffff
ldr r3, =\a
cmp r2, r3
.endm
/*
* To load POR which was saved in SAR RAM
*/
POR_params:
.word 1, 0
ppa_zero_params:
.word 0x0
/*
* =============================
* == CPU suspend entry point ==
* =============================
*
* void omap4_cpu_suspend(unsigned int cpu, unsigned int save_state)
*
* This function code saves the CPU context and performs the CPU
* power down sequence. Calling WFI effectively changes the CPU
* power domains states to the desired target power state.
*
* @cpu : contains cpu id (r0)
* @save_state : contains context save state (r1)
* 0 - No context lost
* 1 - CPUx L1 and logic lost: MPUSS CSWR
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
* 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
* @return: This function never returns for CPU OFF and DORMANT power states.
* Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
* from this follows a full CPU reset path via ROM code to CPU restore code.
* It returns to the caller for CPU INACTIVE and ON power states or in case
* CPU failed to transition to targeted OFF/DORMANT state.
*/
ENTRY(omap4_cpu_suspend)
stmfd sp!, {r0-r12, lr} @ Save registers on stack
cmp r1, #0x0
beq do_WFI @ Nothing to save, jump to WFI
mov r5, r0
mov r6, r1
bl omap4_get_sar_ram_base
mov r8, r0
ands r5, r5, #0x0f
streq r6, [r8, #L2X0_SAVE_OFFSET0] @ Store save state
strne r6, [r8, #L2X0_SAVE_OFFSET1]
orreq r8, r8, #CPU0_SAVE_OFFSET
orrne r8, r8, #CPU1_SAVE_OFFSET
/*
* Save only needed CPU CP15 registers. VFP, breakpoint,
* performance monitor registers are not saved. Generic
* code suppose to take care of those.
*/
mov r4, sp @ Store sp
mrs r5, spsr @ Store spsr
mov r6, lr @ Store lr
stmia r8!, {r4-r6}
/* c1 and c2 registers */
mrc p15, 0, r4, c1, c0, 2 @ CPACR
mrc p15, 0, r5, c2, c0, 0 @ TTBR0
mrc p15, 0, r6, c2, c0, 1 @ TTBR1
mrc p15, 0, r7, c2, c0, 2 @ TTBCR
stmia r8!, {r4-r7}
/* c3 and c10 registers */
mrc p15, 0, r4, c3, c0, 0 @ DACR
mrc p15, 0, r5, c10, c2, 0 @ PRRR
mrc p15, 0, r6, c10, c2, 1 @ NMRR
stmia r8!,{r4-r6}
/* c12, c13 and CPSR registers */
mrc p15, 0, r4, c13, c0, 1 @ Context ID
mrc p15, 0, r5, c13, c0, 2 @ User r/w thread ID
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
mrs r7, cpsr @ Store CPSR
stmia r8!, {r4-r7}
/* c1 control register */
mrc p15, 0, r4, c1, c0, 0 @ Save control register
stmia r8!, {r4}
/*
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/
bl v7_flush_dcache_all
bl omap4_get_sar_ram_base
ldr r9, [r0, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne skip_secure_l1_flush
mov r0, #SCU_PM_NORMAL
mov r1, #0xFF @ clean seucre L1
stmfd r13!, {r4-r12, r14}
ldr r12, =SCU_POWER_SECURE_INDEX
dsb
smc #0
dsb
ldmfd r13!, {r4-r12, r14}
skip_secure_l1_flush:
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/*
* Invalidate L1 data cache. Even though only invalidate is
* necessary exported flush API is used here. Doing clean
* on already clean cache would be almost NOP.
*/
bl v7_flush_dcache_all
/*
* Switch the CPU from Symmetric Multiprocessing (SMP) mode
* to AsymmetricMultiprocessing (AMP) mode by programming
* the SCU power status to DORMANT or OFF mode.
* This enables the CPU to be taken out of coherency by
* preventing the CPU from receiving cache, TLB, or BTB
* maintenance operations broadcast by other CPUs in the cluster.
*/
bl omap4_get_sar_ram_base
mov r8, r0
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne scu_gp_set
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
ldreq r0, [r8, #SCU_OFFSET0]
ldrne r0, [r8, #SCU_OFFSET1]
mov r1, #0x00 @ Secure L1 is clean already
stmfd r13!, {r4-r12, r14}
ldr r12, =SCU_POWER_SECURE_INDEX
dsb
smc #0
dsb
ldmfd r13!, {r4-r12, r14}
b skip_scu_gp_set
scu_gp_set:
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
ldreq r1, [r8, #SCU_OFFSET0]
ldrne r1, [r8, #SCU_OFFSET1]
bl omap4_get_scu_base
bl scu_power_mode
skip_scu_gp_set:
isb
dsb
mrc p15, 0, r0, c1, c1, 2 @Read NSACR data
tst r0, #(1 << 18)
mrcne p15, 0, r0, c1, c0, 1
bicne r0, r0, #(1 << 6)
mcrne p15, 0, r0, c1, c0, 1
isb
#ifdef CONFIG_CACHE_L2X0
/*
* Clean and invalidate the L2 cache.
* Common cache-l2x0.c functions can't be used here since it
* uses spinlocks. We are out of coherency here with data cache
* disabled. The spinlock implementation uses exclusive load/store
* instruction which can fail without data cache being enabled.
* OMAP4 hardware doesn't support exclusive monitor which can
* overcome exclusive access issue. Because of this, CPU can
* lead to deadlock.
*/
l2x_clean_inv:
bl omap4_get_sar_ram_base
mov r8, r0
mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
ands r5, r5, #0x0f
ldreq r0, [r8, #L2X0_SAVE_OFFSET0]
ldrne r0, [r8, #L2X0_SAVE_OFFSET1]
cmp r0, #3
bne do_WFI
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x03
mov r12, #0x100
dsb
smc #0
dsb
#endif
bl omap4_get_l2cache_base
mov r2, r0
ldr r0, =0xffff
str r0, [r2, #L2X0_CLEAN_INV_WAY]
wait:
ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
ands r0, r0, #0xff
bne wait
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #0x100
dsb
smc #0
dsb
#endif
l2x_sync:
bl omap4_get_l2cache_base
mov r2, r0
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync
#endif
do_WFI:
bl omap_do_wfi
/*
* CPU is here when it failed to enter OFF/DORMANT or
* no low power state was attempted.
*/
mrc p15, 0, r0, c1, c0, 0
tst r0, #(1 << 2) @ Check C bit enabled?
orreq r0, r0, #(1 << 2) @ Enable the C bit
mcreq p15, 0, r0, c1, c0, 0
isb
/* Enable SMP bit if it's being disabled */
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ Check SMP bit enabled?
orreq r0, r0, #(1 << 6)
mcreq p15, 0, r0, c1, c0, 1
isb
/*
* Ensure the CPU power state is set to NORMAL in
* SCU power state so that CPU is back in coherency.
* In non-coherent mode CPU can lock-up and lead to
* system deadlock.
*/
bl omap4_get_sar_ram_base
mov r8, r0
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne scu_gp_clear
mov r0, #SCU_PM_NORMAL
mov r1, #0x00
stmfd r13!, {r4-r12, r14}
ldr r12, =SCU_POWER_SECURE_INDEX
dsb
smc #0
dsb
ldmfd r13!, {r4-r12, r14}
b skip_scu_gp_clear
scu_gp_clear:
bl omap4_get_scu_base
mov r1, #SCU_PM_NORMAL
bl scu_power_mode
skip_scu_gp_clear:
isb
dsb
ldmfd sp!, {r0-r12, pc} @ Restore regs and return
ENDPROC(omap4_cpu_suspend)
/*
* ============================
* == CPU resume entry point ==
* ============================
*
* void omap4_cpu_resume(void)
*
* ROM code jumps to this function while waking up from CPU
* OFF or DORMANT state. Physical address of the function is
* stored in the SAR RAM while entering to OFF or DORMANT mode.
*/
ENTRY(omap4_cpu_resume)
/*
* Each CPU get the device type from SAR RAM and store in r9
*/
ldr r8, =OMAP44XX_SAR_RAM_BASE
ldr r9, [r8, #OMAP_TYPE_OFFSET]
/*
* CPU1 must check if CPU0 is alive/awake,
* if PL310 is OFF, MPUSS was OFF and CPU0 is still off,
* CPU1 must go to sleep and wait for CPU0.
* Only CPU0 should enable cache controller.
*/
mrc p15, 0, r0, c0, c0, 5 @ Get cpuID
ands r0, r0, #0x0f @ Continue boot if CPU0
bne is_cpu0_up @ CPU1: Must check if CPU0 is up
cmp r9, #0x1 @ CPU0: Check for HS device in r9
beq continue_boot
bne gp_cp15_configure
is_cpu0_up:
ldr r2, =OMAP44XX_L2CACHE_BASE
ldr r0, [r2, #L2X0_CTRL]
and r0, #0x0f
cmp r0, #1 @ is CPU0 already UP?
beq cpu1_configure_cp15
/*
* When CPU1 is released to control of HLOS in the case of OSWR
* and OFF mode, PPA below v1.7.3[1] is not performing all
* Memory coherency and TLB operations required.
*
* For GP devices (PPA never present) this WA path is also taken if
* CPU1 wakes up first, this mechanism should be used to synchronize
* booting by shutting off CPU1 thus allowing CPU0 to boot first
* and restore OMAP context.
*
* A WA to recover cleanly from this scenario is to switch CPU1 back to
* previous OFF state. This forces a reset of CPU1, which in turn
* forces CPU1 not to override MMU descriptors already in place in
* internal RAM setup by CPU0. CPU1 will also sync to the in-place
* descriptors on the next wakeup. CPU1 wakeup is done by
* later kernel subsystems depending on suspend or cpuidle path
* being exercised.
* NOTE - for OSWR, state provided is 2, and for OFF, state is 3,
* Since the bug impacts OFF and OSWR, we need to force a 0x3 to
* shut off CPU1
*
* Since many distributions may not be able to update PPA OR would like
* to support platforms with older PPA, we provide a config option.
* This is simpler and makes the current code remain cleaner in
* comparison to a flag based handling in CPU1 recovery for
* board + PPA revision combinations.
*
* Having this config option enabled even on platforms with fixed PPA
* should not impact stability, however, ability to make CPU1 available
* for operations a little earlier is curtailed.
*
* Foot note [1]:
* v1.7.3 is the official TI PPA version. Custom PPA could have
* the relevant changes ported over to it.
*/
#ifdef CONFIG_OMAP4_PPA_CPU1_ONLINE_BUG
mov r0, #0x03 @ target CPU1 to OFF(mpusspd=OSWR/OFF)
mov r1, #0x00 @ Secure L1 is already clean
ldr r12, =SCU_POWER_SECURE_INDEX
dsb
smc #0
isb @ Necessary barriers before wfi
dsb
dmb
wfi @ wait for interrupt
nop
nop
/*
* IF we came out of WFI immediately, something unknown happend.
* Fall through AND loop back to the checks. Failing which retry WFI.
*/
#endif
/*
* CPU0 and CPU1 are release together from OFF mode, however,
* CPU0 can be busy doing restore operations while waking
* from OFF mode, However, for many PPA services we need
* CPU0, so, we ask CPU1 to loop back to stagger CPU1 behind CPU0
*/
b omap4_cpu_resume
cpu1_configure_cp15:
/*
* Select right API to set cp15 depending on device type
*/
cmp r9, #0x1 @ Check for HS device in r9
bne gp_cp15_configure @ Jump to GP API
ppa_cp15_cpu1_configure:
/*
* In HS devices CPU0's CP15 is configured at wakeup by PPA, CPU1 must
* call PPA to configure it and CPU0 must be online for any PPA API to
* work. In 4430 devices CPU1 this call also enables the access to SMP
* bit, on 4460 devices, CPU1 will have SMP bit access by default.
*/
mov r0, #PPA_SERVICE_DEFAULT_POR_NS_SMP
adr r3, ppa_zero_params @ Pointer to parameters
LM_CALL_PPA_SERVICE_PA
isb
dsb
cmp r0, #0x0 @ API returns 0 on success.
bne ppa_cp15_cpu1_configure @ retry if we did not succeed
/* HS device cp15 done, jump to continue_boot */
b continue_boot
gp_cp15_configure:
/* In GP devices, both CPUs must configure their CP15 */
/* Compute the ARM revision */
mov r3, #0
mrc p15, 0, r1, c0, c0, 0 @ read main ID register
and r2, r1, #0x00f00000 @ variant
and r3, r1, #0x0000000f @ revision
orr r3, r3, r2, lsr #20-4 @ r3: has variant and revision
mov r0, #0
#ifdef CONFIG_OMAP4_ARM_ERRATA_742230
cmp r3, #0x10 @ present in r1p0 onwards
blt ignore_742230
cmp r3, #0x22 @ not present after r2p2
orrle r0, r0, #0x10 @ Set bit 4
ignore_742230:
#endif
#ifdef CONFIG_OMAP4_ARM_ERRATA_751472
cmp r3, #0x30 @ present prior to r3p0
orrlt r0, r0, #0x800 @ Set bit 11
#endif
#ifdef CONFIG_OMAP4_ARM_ERRATA_743622
cmp r3, #0x20 @ present in r2p0 onwards
blt ignore_743622
cmp r3, #0x30 @ not preset in r3p0 onwards
orrlt r0, r0, #0x40 @ Set bit 6
ignore_743622:
#endif
cmp r0, #0
beq continue_boot
mov r12, #HAL_DIAGREG_0
dsb
smc #0
dsb
continue_boot:
#ifdef CONFIG_CACHE_L2X0
/*
* Restore the L2 AUXCTRL and enable the L2 cache.
* 0x109 = Program the L2X0 AUXCTRL
* 0x102 = Enable the L2 using L2X0 CTRL
* register r0 contains value to be programmed.
* L2 cache is already invalidate by ROM code as part
* of MPUSS OFF wakeup path.
*/
ldr r2, =OMAP44XX_L2CACHE_BASE
ldr r0, [r2, #L2X0_CTRL]
and r0, #0x0f
cmp r0, #1
beq skip_l2en @ Skip if already enabled
check_por:
ldr r0, =OMAP44XX_SAR_RAM_BASE @ Check DEVICE type
ldr r1, [r0, #OMAP_TYPE_OFFSET]
cmp r1, #0x1 @ Check for HS device
bne check_por_gp
ldr r0, =PPA_SERVICE_PL310_POR @ Setup PPA HAL call
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r4, [r1, #L2X0_PREFETCHCTRL_OFFSET]
adr r3, POR_params
str r4, [r3, #0x04]
LM_CALL_PPA_SERVICE_PA
b skip_por
check_por_gp:
OMAP4_REV_CMP 0x443022
blt skip_por @ for GP POR setup possible only on >= OMAP4430ES2.2
ldr r3, =OMAP44XX_SAR_RAM_BASE
ldr r0, [r3, #L2X0_PREFETCHCTRL_OFFSET]
ldr r12, =0x113 @ Setup L2 POR value
dsb
smc #0
skip_por:
ldr r3, =OMAP44XX_SAR_RAM_BASE
ldr r0, [r3, #L2X0_AUXCTRL_OFFSET]
ldr r12, =0x109 @ Setup L2 AUXCTRL value
dsb
smc #0
ldr r2, =OMAP44XX_L2CACHE_BASE
ldr r4, =OMAP44XX_SAR_RAM_BASE
ldr r9, [r4, #L2X0_LOCKDOWN_OFFSET0]
str r9, [r2, #L2X0_LOCKDOWN_WAY_D0]
str r9, [r2, #L2X0_LOCKDOWN_WAY_D1]
str r9, [r2, #L2X0_LOCKDOWN_WAY_I0]
str r9, [r2, #L2X0_LOCKDOWN_WAY_I1]
dsb
mov r0, #0x1
ldr r12, =0x102 @ Enable L2 Cache controller
dsb
smc #0
dsb
skip_l2en:
#endif
/* Check if we have Public access to SMP bit */
mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
tst r0, #(1 << 18)
beq skip_ns_smp_enable @ Skip if still no access
/* Set the SMP bit if it is not already set */
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ Check SMP bit enabled?
orreq r0, r0, #(1 << 6)
mcreq p15, 0, r0, c1, c0, 1
isb
skip_ns_smp_enable:
/*
* Check the wakeup cpuid and use appropriate
* SAR BANK location for context restore.
*/
ldr r3, =OMAP44XX_SAR_RAM_BASE
mov r1, #0
mcr p15, 0, r1, c7, c5, 0 @ Invalidate L1 I
mrc p15, 0, r0, c0, c0, 5 @ MPIDR
ands r0, r0, #0x0f
orreq r3, r3, #CPU0_SAVE_OFFSET
orrne r3, r3, #CPU1_SAVE_OFFSET
/* Restore cp15 registers */
ldmia r3!, {r4-r6}
mov sp, r4 @ Restore sp
msr spsr_cxsf, r5 @ Restore spsr
mov lr, r6 @ Restore lr
/* c1 and c2 registers */
ldmia r3!, {r4-r7}
mcr p15, 0, r4, c1, c0, 2 @ CPACR
mcr p15, 0, r5, c2, c0, 0 @ TTBR0
mcr p15, 0, r6, c2, c0, 1 @ TTBR1
mcr p15, 0, r7, c2, c0, 2 @ TTBCR
/* c3 and c10 registers */
ldmia r3!,{r4-r6}
mcr p15, 0, r4, c3, c0, 0 @ DACR
mcr p15, 0, r5, c10, c2, 0 @ PRRR
mcr p15, 0, r6, c10, c2, 1 @ NMRR
/* c12, c13 and CPSR registers */
ldmia r3!,{r4-r7}
mcr p15, 0, r4, c13, c0, 1 @ Context ID
mcr p15, 0, r5, c13, c0, 2 @ User r/w thread ID
mcr p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
msr cpsr, r7 @ store cpsr
/*
* Enabling MMU here. Page entry needs to be altered
* to create temporary 1:1 map and then resore the entry
* ones MMU is enabled
*/
mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
and r7, #0x7 @ Extract N (0:2) to decide
cmp r7, #0x0 @ TTBR0/TTBR1
beq use_ttbr0
ttbr_error:
b ttbr_error @ Only N = 0 supported
use_ttbr0:
mrc p15, 0, r2, c2, c0, 0 @ Read TTBR0
ldr r5, =TTRBIT_MASK
and r2, r5
mov r4, pc
ldr r5, =TABLE_INDEX_MASK
and r4, r5 @ r4 = 31 to 20 bits of pc
ldr r1, =TABLE_ENTRY
add r1, r1, r4 @ r1 has value of table entry
lsr r4, #18 @ Address of table entry
add r2, r4 @ r2 - location to be modified
/* Ensure the modified entry makes it to main memory */
#ifdef CONFIG_CACHE_L2X0
ldr r5, =OMAP44XX_L2CACHE_BASE
str r2, [r5, #L2X0_CLEAN_INV_LINE_PA]
wait_l2:
ldr r0, [r5, #L2X0_CLEAN_INV_LINE_PA]
ands r0, #1
bne wait_l2
#endif
/* Storing previous entry of location being modified */
ldr r5, =OMAP44XX_SAR_RAM_BASE
ldr r4, [r2]
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
streq r4, [r5, #MMU_OFFSET0] @ Modify the table entry
strne r4, [r5, #MMU_OFFSET1]
str r1, [r2]
/*
* Storing address of entry being modified
* It will be restored after enabling MMU
*/
ldr r5, =OMAP44XX_SAR_RAM_BASE
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
orreq r5, r5, #MMU_OFFSET0
orrne r5, r5, #MMU_OFFSET1
str r2, [r5, #TABLE_ADDRESS_OFFSET]
mov r0, #0
mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
/*
* Restore control register but don't enable Data caches here.
* Caches will be enabled after restoring MMU table entry.
*/
ldmia r3!, {r4}
str r4, [r5, #CR_VALUE_OFFSET] @ Store previous value of CR
ldr r2, =CACHE_DISABLE_MASK
and r4, r2
mcr p15, 0, r4, c1, c0, 0
isb
dsb
ldr r0, =mmu_on_label
bx r0
mmu_on_label:
/* Set up the per-CPU stacks */
bl cpu_init
/*
* Restore the MMU table entry that was modified for
* enabling MMU.
*/
bl omap4_get_sar_ram_base
mov r8, r0
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
orreq r8, r8, #MMU_OFFSET0 @ Get address of entry that..
orrne r8, r8, #MMU_OFFSET1 @ was modified
ldr r2, [r8, #TABLE_ADDRESS_OFFSET]
ldr r3, =local_va2pa_offet
add r2, r2, r3
ldr r0, [r8] @ Get the previous value..
str r0, [r2] @ which needs to be restored
mov r0, #0
mcr p15, 0, r0, c7, c1, 6 @ flush TLB and issue barriers
mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
dsb
isb
ldr r0, [r8, #CR_VALUE_OFFSET] @ Restore the Control register
mcr p15, 0, r0, c1, c0, 0 @ with caches enabled.
isb
ldmfd sp!, {r0-r12, pc} @ restore regs and return
.equ local_va2pa_offet, (PLAT_PHYS_OFFSET + PAGE_OFFSET)
ENDPROC(omap4_cpu_resume)
ENTRY(omap_bus_sync)
stmfd sp!, {r9, lr}
/* SO write to drain of MPU-2-DDR T2ASYNC FIFO */
bl omap_get_dram_barrier_base
ldr r2, [r0]
str r2, [r0]
/* SO write to drain MPU-2-L3 T2ASYNC FIFO */
bl omap_get_sram_barrier_base
ldr r2, [r0]
str r2, [r0]
isb
ldmfd sp!, {r9, pc}
ENDPROC(omap_bus_sync)
ENTRY(omap_do_wfi)
stmfd sp!, {lr}
/* Drain interconnect write buffers. */
bl omap_bus_sync
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
*/
isb
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* by any CPU in the cluster have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Sixteen
* NOPs as per Cortex-A9 pipeline.
*/
wfi @ Wait For Interrupt
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
ldmfd sp!, {pc}
ENDPROC(omap_do_wfi)
/*
* ============================
* == ARM get revision id ==
* ============================
*
* unsigned int omap_get_arm_rev()
*
* This function returns the ARM revision id ROM,
* eg, 0x20 for arm r2p0, 0x21 for arm r2p1, 0x30 for arm r3p0
*/
ENTRY(omap_get_arm_rev)
stmfd sp!, {lr}
mov r0, #0
mrc p15, 0, r1, c0, c0, 0 @ read main ID register
and r2, r1, #0xff000000 @ ARM?
teq r2, #0x41000000
bne not_arm
and r2, r1, #0x00f00000 @ variant
and r3, r1, #0x0000000f @ revision
orr r3, r3, r2, lsr #20-4 @ combine variant and revision
mov r0, r3
not_arm:
ldmfd sp!, {pc}
ENDPROC(omap_get_arm_rev)
#endif
|