ref: e50c842755993b183c1c56d72000be7918bf0bfb
parent: 22ae1403e99e417dac4f6432915d852c824a94c9
author: Timothy B. Terriberry <tterribe@xiph.org>
date: Wed May 2 06:11:36 EDT 2012
Fix TEXTRELs in the ARM asm. Besides imposing a performance penalty at startup in most configurations, these relocations break the dynamic linker for native Fennec, since it does not support them at all. Change-Id: Id5dc768609354ebb4379966eb61a7313e6fd18de
--- a/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
+++ b/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
@@ -9,6 +9,11 @@
;
+bilinear_taps_coeff
+ DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
+;-----------------
+
EXPORT |vp8_sub_pixel_variance16x16_neon_func|
ARM
REQUIRE8
@@ -27,7 +32,7 @@
|vp8_sub_pixel_variance16x16_neon_func| PROC
push {r4-r6, lr}
- ldr r12, _BilinearTaps_coeff_
+ adr r12, bilinear_taps_coeff
ldr r4, [sp, #16] ;load *dst_ptr from stack
ldr r5, [sp, #20] ;load dst_pixels_per_line from stack
ldr r6, [sp, #24] ;load *sse from stack
@@ -414,12 +419,5 @@
pop {r4-r6,pc}
ENDP
-
-;-----------------
-
-_BilinearTaps_coeff_
- DCD bilinear_taps_coeff
-bilinear_taps_coeff
- DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
END
--- a/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
+++ b/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
@@ -27,7 +27,7 @@
|vp8_sub_pixel_variance8x8_neon| PROC
push {r4-r5, lr}
- ldr r12, _BilinearTaps_coeff_
+ adr r12, bilinear_taps_coeff
ldr r4, [sp, #12] ;load *dst_ptr from stack
ldr r5, [sp, #16] ;load dst_pixels_per_line from stack
ldr lr, [sp, #20] ;load *sse from stack
@@ -216,8 +216,6 @@
;-----------------
-_BilinearTaps_coeff_
- DCD bilinear_taps_coeff
bilinear_taps_coeff
DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
--- a/vp8/encoder/arm/neon/fastquantizeb_neon.asm
+++ b/vp8/encoder/arm/neon/fastquantizeb_neon.asm
@@ -98,7 +98,7 @@
vmul.s16 q2, q6, q4 ; x * Dequant
vmul.s16 q3, q7, q5
- ldr r0, _inv_zig_zag_ ; load ptr of inverse zigzag table
+ adr r0, inv_zig_zag ; load ptr of inverse zigzag table
vceq.s16 q8, q8 ; set q8 to all 1
@@ -181,7 +181,7 @@
vadd.s16 q12, q14 ; x + Round
vadd.s16 q13, q15
- ldr r0, _inv_zig_zag_ ; load ptr of inverse zigzag table
+ adr r0, inv_zig_zag ; load ptr of inverse zigzag table
vqdmulh.s16 q12, q8 ; y = ((Round+abs(z)) * Quant) >> 16
vqdmulh.s16 q13, q9
@@ -247,9 +247,6 @@
ENDP
; default inverse zigzag table is defined in vp8/common/entropy.c
-_inv_zig_zag_
- DCD inv_zig_zag
-
ALIGN 16 ; enable use of @128 bit aligned loads
inv_zig_zag
DCW 0x0001, 0x0002, 0x0006, 0x0007
--
⑨