summaryrefslogtreecommitdiffstats
path: root/media/libstagefright/codecs
diff options
context:
space:
mode:
authorMartin Storsjo <martin@martin.st>2012-04-13 14:16:54 +0300
committerMartin Storsjo <martin@martin.st>2012-08-20 23:47:00 +0100
commitccde1257952d2c073e51ecba6180060570ffa41f (patch)
tree6c4a4527855885d8f4d337836612df5372db8942 /media/libstagefright/codecs
parent46b7eeb8be7449515082ecb1a61700affb44115f (diff)
downloadframeworks_av-ccde1257952d2c073e51ecba6180060570ffa41f.zip
frameworks_av-ccde1257952d2c073e51ecba6180060570ffa41f.tar.gz
frameworks_av-ccde1257952d2c073e51ecba6180060570ffa41f.tar.bz2
avcenc: Properly indent assembly blocks
Also line break multiline assembly blocks - previously they were virtually unreadable. Change-Id: Icb269909b78746e26b28ab7dcb6979c4655a0b0c
Diffstat (limited to 'media/libstagefright/codecs')
-rw-r--r--media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h16
-rw-r--r--media/libstagefright/codecs/avc/enc/src/sad_inline.h50
-rw-r--r--media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h60
3 files changed, 108 insertions, 18 deletions
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
index bb4a510..4eb9b00 100644
--- a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
+++ b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
@@ -77,14 +77,26 @@ extern "C"
__inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
-__asm__ volatile("rsbs %1, %1, %2, asr #1\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2));
+ __asm__ volatile(
+ "rsbs %1, %1, %2, asr #1\n\t"
+ "rsbmi %1, %1, #0\n\t"
+ "add %0, %0, %1"
+ : "=r"(sad), "=r"(tmp)
+ : "r"(tmp2)
+ );
return sad;
}
__inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
-__asm__ volatile("rsbs %1, %2, %1, asr #2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2));
+ __asm__ volatile(
+ "rsbs %1, %2, %1, asr #2\n\t"
+ "rsbmi %1, %1, #0\n\t"
+ "add %0, %0, %1"
+ : "=r"(sad), "=r"(tmp)
+ : "r"(tmp2)
+ );
return sad;
}
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_inline.h
index f6c3554..6695d63 100644
--- a/media/libstagefright/codecs/avc/enc/src/sad_inline.h
+++ b/media/libstagefright/codecs/avc/enc/src/sad_inline.h
@@ -343,7 +343,13 @@ SadMBOffset1:
__inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
{
-__asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad): "r"(tmp), "r"(tmp2));
+ __asm__ volatile(
+ "rsbs %1, %1, %2\n\t"
+ "rsbmi %1, %1, #0\n\t"
+ "add %0, %0, %1"
+ : "=r"(sad)
+ : "r"(tmp), "r"(tmp2)
+ );
return sad;
}
@@ -351,7 +357,18 @@ __asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(s
{
int32 x7;
-__asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND %1, %3, %1, lsr #1\n\tORRCC %1, %1, #0x80000000\n\tRSB %1, %1, %1, lsl #8\n\tADD %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask));
+ __asm__ volatile(
+ "EOR %1, %2, %0\n\t"
+ "SUBS %0, %2, %0\n\t"
+ "EOR %1, %1, %0\n\t"
+ "AND %1, %3, %1, lsr #1\n\t"
+ "ORRCC %1, %1, #0x80000000\n\t"
+ "RSB %1, %1, %1, lsl #8\n\t"
+ "ADD %0, %0, %1, asr #7\n\t"
+ "EOR %0, %0, %1, asr #7"
+ : "=r"(src1), "=&r"(x7)
+ : "r"(src2), "r"(mask)
+ );
return src1;
}
@@ -360,12 +377,31 @@ __asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND %
{
int32 x7;
-__asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS %1, %3, %1, rrx\n\tRSB %1, %1, %1, lsl #8\n\tSUB %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask));
+ __asm__ volatile(
+ "EOR %1, %2, %0\n\t"
+ "ADDS %0, %2, %0\n\t"
+ "EOR %1, %1, %0\n\t"
+ "ANDS %1, %3, %1, rrx\n\t"
+ "RSB %1, %1, %1, lsl #8\n\t"
+ "SUB %0, %0, %1, asr #7\n\t"
+ "EOR %0, %0, %1, asr #7"
+ : "=r"(src1), "=&r"(x7)
+ : "r"(src2), "r"(mask)
+ );
return src1;
}
-#define sum_accumulate __asm__ volatile("SBC %0, %0, %1\n\tBIC %1, %4, %1\n\tADD %2, %2, %1, lsr #8\n\tSBC %0, %0, %3\n\tBIC %3, %4, %3\n\tADD %2, %2, %3, lsr #8": "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11): "r" (x6));
+#define sum_accumulate __asm__ volatile( \
+ "SBC %0, %0, %1\n\t" \
+ "BIC %1, %4, %1\n\t" \
+ "ADD %2, %2, %1, lsr #8\n\t" \
+ "SBC %0, %0, %3\n\t" \
+ "BIC %3, %4, %3\n\t" \
+ "ADD %2, %2, %3, lsr #8" \
+ : "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11) \
+ : "r" (x6) \
+ );
#define NUMBER 3
#define SHIFT 24
@@ -407,7 +443,7 @@ __asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS
x8 = 16;
///
-__asm__ volatile("MVN %0, #0xFF00": "=r"(x6));
+ __asm__ volatile("MVN %0, #0xFF00": "=r"(x6));
LOOP_SAD0:
/****** process 8 pixels ******/
@@ -431,10 +467,10 @@ LOOP_SAD0:
/****** process 8 pixels ******/
x11 = *((int32*)(ref + 4));
-__asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx));
+ __asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx));
//x10 = *((int32*)ref); ref+=lx;
x14 = *((int32*)(blk + 4));
-__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
+ __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
/* process x11 & x14 */
x11 = sad_4pixel(x11, x14, x9);
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
index 8a7fe22..0165360 100644
--- a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
+++ b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
@@ -230,7 +230,7 @@ __inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)
x4 = x5 = 0;
x8 = 16; //<<===========*******
-__asm__ volatile("MVN %0, #0xFF0000": "=r"(x6));
+ __asm__ volatile("MVN %0, #0xFF0000": "=r"(x6));
#if (NUMBER==3)
LOOP_SAD3:
@@ -239,7 +239,7 @@ LOOP_SAD2:
#elif (NUMBER==1)
LOOP_SAD1:
#endif
-__asm__ volatile("BIC %0, %0, #3": "=r"(ref));
+ __asm__ volatile("BIC %0, %0, #3": "=r"(ref));
/****** process 8 pixels ******/
x11 = *((int32*)(ref + 12));
x12 = *((int32*)(ref + 16));
@@ -247,11 +247,32 @@ __asm__ volatile("BIC %0, %0, #3": "=r"(ref));
x14 = *((int32*)(blk + 12));
#if (SHIFT==8)
-__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #8\n\t"
+ "BIC %0, %0, %1, lsl #24\n\t"
+ "MVN %1, %1, lsr #8\n\t"
+ "BIC %1, %1, %2, lsl #24"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==16)
-__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #16\n\t"
+ "BIC %0, %0, %1, lsl #16\n\t"
+ "MVN %1, %1, lsr #16\n\t"
+ "BIC %1, %1, %2, lsl #16"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==24)
-__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #24\n\t"
+ "BIC %0, %0, %1, lsl #8\n\t"
+ "MVN %1, %1, lsr #24\n\t"
+ "BIC %1, %1, %2, lsl #8"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#endif
x12 = *((int32*)(blk + 8));
@@ -271,13 +292,34 @@ __asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1,
x14 = *((int32*)(blk + 4));
#if (SHIFT==8)
-__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #8\n\t"
+ "BIC %0, %0, %1, lsl #24\n\t"
+ "MVN %1, %1, lsr #8\n\t"
+ "BIC %1, %1, %2, lsl #24"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==16)
-__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #16\n\t"
+ "BIC %0, %0, %1, lsl #16\n\t"
+ "MVN %1, %1, lsr #16\n\t"
+ "BIC %1, %1, %2, lsl #16"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#elif (SHIFT==24)
-__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12));
+ __asm__ volatile(
+ "MVN %0, %0, lsr #24\n\t"
+ "BIC %0, %0, %1, lsl #8\n\t"
+ "MVN %1, %1, lsr #24\n\t"
+ "BIC %1, %1, %2, lsl #8"
+ : "=&r"(x10), "=&r"(x11)
+ : "r"(x12)
+ );
#endif
-__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
+ __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
/* process x11 & x14 */
x11 = sad_4pixelN(x11, x14, x9);