aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/ARM/README-Thumb.txt
blob: 6c2cb710ab30a4af204207ad4ca4760e28946800 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
//===---------------------------------------------------------------------===//
// Random ideas for the ARM backend (Thumb specific).
//===---------------------------------------------------------------------===//

* Add support for compiling functions in both ARM and Thumb mode, then taking
  the smallest.
* Add support for compiling individual basic blocks in thumb mode, when in a 
  larger ARM function.  This can be used for presumed cold code, like paths
  to abort (failure path of asserts), EH handling code, etc.

* Thumb doesn't have normal pre/post increment addressing modes, but you can
  load/store 32-bit integers with pre/postinc by using load/store multiple
  instrs with a single register.

* Make better use of high registers r8, r10, r11, r12 (ip). Some variants of add
  and cmp instructions can use high registers. Also, we can use them as
  temporaries to spill values into.

* In thumb mode, short, byte, and bool preferred alignments are currently set
  to 4 to accommodate ISA restriction (i.e. add sp, #imm, imm must be multiple
  of 4).

//===---------------------------------------------------------------------===//

Potential jumptable improvements:

* If we know function size is less than (1 << 16) * 2 bytes, we can use 16-bit
  jumptable entries (e.g. (L1 - L2) >> 1). Or even smaller entries if the
  function is even smaller. This also applies to ARM.

* Thumb jumptable codegen can improve given some help from the assembler. This
  is what we generate right now:

	.set PCRELV0, (LJTI1_0_0-(LPCRELL0+4))
LPCRELL0:
	mov r1, #PCRELV0
	add r1, pc
	ldr r0, [r0, r1]
	cpy pc, r0 
	.align	2
LJTI1_0_0:
	.long	 LBB1_3
        ...

Note there is another pc relative add that we can take advantage of.
     add r1, pc, #imm_8 * 4

We should be able to generate:

LPCRELL0:
	add r1, LJTI1_0_0
	ldr r0, [r0, r1]
	cpy pc, r0 
	.align	2
LJTI1_0_0:
	.long	 LBB1_3

if the assembler can translate the add to:
       add r1, pc, #((LJTI1_0_0-(LPCRELL0+4))&0xfffffffc)

Note the assembler also does something similar to constpool load:
LPCRELL0:
     ldr r0, LCPI1_0
=>
     ldr r0, pc, #((LCPI1_0-(LPCRELL0+4))&0xfffffffc)


//===---------------------------------------------------------------------===//

We compiles the following using a jump table.

define i16 @func_entry_2E_ce(i32 %i) {
newFuncRoot:
        br label %entry.ce

bb12.exitStub:          ; preds = %entry.ce
        ret i16 0

bb4.exitStub:           ; preds = %entry.ce, %entry.ce, %entry.ce
        ret i16 1

bb9.exitStub:           ; preds = %entry.ce, %entry.ce, %entry.ce
        ret i16 2

bb.exitStub:            ; preds = %entry.ce
        ret i16 3

entry.ce:               ; preds = %newFuncRoot
        switch i32 %i, label %bb12.exitStub [
                 i32 0, label %bb4.exitStub
                 i32 1, label %bb9.exitStub
                 i32 2, label %bb4.exitStub
                 i32 3, label %bb4.exitStub
                 i32 7, label %bb9.exitStub
                 i32 8, label %bb.exitStub
                 i32 9, label %bb9.exitStub
        ]
}

gcc compiles to:

	cmp	r0, #9
	@ lr needed for prologue
	bhi	L2
	ldr	r3, L11
	mov	r2, #1
	mov	r1, r2, asl r0
	ands	r0, r3, r2, asl r0
	movne	r0, #2
	bxne	lr
	tst	r1, #13
	beq	L9
L3:
	mov	r0, r2
	bx	lr
L9:
	tst	r1, #256
	movne	r0, #3
	bxne	lr
L2:
	mov	r0, #0
	bx	lr
L12:
	.align 2
L11:
	.long	642

//===---------------------------------------------------------------------===//

When spilling in thumb mode and the sp offset is too large to fit in the ldr /
str offset field, we load the offset from a constpool entry and add it to sp:

ldr r2, LCPI
add r2, sp
ldr r2, [r2]

These instructions preserve the condition code which is important if the spill
is between a cmp and a bcc instruction. However, we can use the (potentially)
cheaper sequnce if we know it's ok to clobber the condition register.

add r2, sp, #255 * 4
add r2, #132
ldr r2, [r2, #7 * 4]