ref: f91c3bb3ab000940ac4792dab25ba8ef2f005132
parent: e0b4c4d1ae15c1b83249f4eebcd0e63a35a600ea
author: James Zern <jzern@google.com>
date: Mon Mar 20 18:46:53 EDT 2017
idct_neon: prefix non-static functions w/'vpx_' Change-Id: I94fcdeae18468e6ef0cb7119b8142d982a048031
--- a/vpx_dsp/arm/highbd_idct16x16_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct16x16_add_neon.c
@@ -592,9 +592,10 @@
vst1q_s32(output + 4, out[15].val[1]);
}
-static void highbd_idct16x16_256_add_half1d(const int32_t *input,
- int32_t *output, uint16_t *dest,
- const int stride, const int bd) {
+static void vpx_highbd_idct16x16_256_add_half1d(const int32_t *input,
+ int32_t *output, uint16_t *dest,
+ const int stride,
+ const int bd) {
const int32x4_t cospi_0_8_16_24 = vld1q_s32(kCospi32 + 0);
const int32x4_t cospi_4_12_20N_28 = vld1q_s32(kCospi32 + 4);
const int32x4_t cospi_2_30_10_22 = vld1q_s32(kCospi32 + 8);
@@ -846,9 +847,9 @@
return highbd_idct16x16_add_wrap_low_4x1(t);
}
-static void highbd_idct16x16_38_add_half1d(const int32_t *input,
- int32_t *output, uint16_t *dest,
- const int stride, const int bd) {
+static void vpx_highbd_idct16x16_38_add_half1d(const int32_t *input,
+ int32_t *output, uint16_t *dest,
+ const int stride, const int bd) {
const int32x4_t cospi_0_8_16_24 = vld1q_s32(kCospi32 + 0);
const int32x4_t cospi_4_12_20N_28 = vld1q_s32(kCospi32 + 4);
const int32x4_t cospi_2_30_10_22 = vld1q_s32(kCospi32 + 8);
@@ -1002,8 +1003,8 @@
}
}
-void highbd_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
- int32_t *output) {
+void vpx_highbd_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
+ int32_t *output) {
const int32x4_t cospi_0_8_16_24 = vld1q_s32(kCospi32 + 0);
const int32x4_t cospi_4_12_20N_28 = vld1q_s32(kCospi32 + 4);
const int32x4_t cospi_2_30_10_22 = vld1q_s32(kCospi32 + 8);
@@ -1141,10 +1142,10 @@
vst1q_s32(output, out[15]);
}
-void highbd_idct16x16_10_add_half1d_pass2(const int32_t *input,
- int32_t *const output,
- uint16_t *const dest,
- const int stride, const int bd) {
+void vpx_highbd_idct16x16_10_add_half1d_pass2(const int32_t *input,
+ int32_t *const output,
+ uint16_t *const dest,
+ const int stride, const int bd) {
const int32x4_t cospi_0_8_16_24 = vld1q_s32(kCospi32 + 0);
const int32x4_t cospi_4_12_20N_28 = vld1q_s32(kCospi32 + 4);
const int32x4_t cospi_2_30_10_22 = vld1q_s32(kCospi32 + 8);
@@ -1276,37 +1277,39 @@
// pass 1
// Parallel idct on the upper 8 rows
- idct16x16_256_add_half1d(input, row_idct_output, dest, stride, 1);
+ vpx_idct16x16_256_add_half1d(input, row_idct_output, dest, stride, 1);
// Parallel idct on the lower 8 rows
- idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest, stride,
- 1);
+ vpx_idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest,
+ stride, 1);
// pass 2
// Parallel idct to get the left 8 columns
- idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, 1);
+ vpx_idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, 1);
// Parallel idct to get the right 8 columns
- idct16x16_256_add_half1d(row_idct_output + 8 * 16, NULL, dest + 8, stride,
- 1);
+ vpx_idct16x16_256_add_half1d(row_idct_output + 8 * 16, NULL, dest + 8,
+ stride, 1);
} else {
int32_t row_idct_output[16 * 16];
// pass 1
// Parallel idct on the upper 8 rows
- highbd_idct16x16_256_add_half1d(input, row_idct_output, dest, stride, bd);
+ vpx_highbd_idct16x16_256_add_half1d(input, row_idct_output, dest, stride,
+ bd);
// Parallel idct on the lower 8 rows
- highbd_idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest,
- stride, bd);
+ vpx_highbd_idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8,
+ dest, stride, bd);
// pass 2
// Parallel idct to get the left 8 columns
- highbd_idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, bd);
+ vpx_highbd_idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride,
+ bd);
// Parallel idct to get the right 8 columns
- highbd_idct16x16_256_add_half1d(row_idct_output + 8 * 16, NULL, dest + 8,
- stride, bd);
+ vpx_highbd_idct16x16_256_add_half1d(row_idct_output + 8 * 16, NULL,
+ dest + 8, stride, bd);
}
}
@@ -1319,29 +1322,30 @@
// pass 1
// Parallel idct on the upper 8 rows
- idct16x16_38_add_half1d(input, row_idct_output, dest, stride, 1);
+ vpx_idct16x16_38_add_half1d(input, row_idct_output, dest, stride, 1);
// pass 2
// Parallel idct to get the left 8 columns
- idct16x16_38_add_half1d(row_idct_output, NULL, dest, stride, 1);
+ vpx_idct16x16_38_add_half1d(row_idct_output, NULL, dest, stride, 1);
// Parallel idct to get the right 8 columns
- idct16x16_38_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride,
- 1);
+ vpx_idct16x16_38_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8,
+ stride, 1);
} else {
int32_t row_idct_output[16 * 16];
// pass 1
// Parallel idct on the upper 8 rows
- highbd_idct16x16_38_add_half1d(input, row_idct_output, dest, stride, bd);
+ vpx_highbd_idct16x16_38_add_half1d(input, row_idct_output, dest, stride,
+ bd);
// pass 2
// Parallel idct to get the left 8 columns
- highbd_idct16x16_38_add_half1d(row_idct_output, NULL, dest, stride, bd);
+ vpx_highbd_idct16x16_38_add_half1d(row_idct_output, NULL, dest, stride, bd);
// Parallel idct to get the right 8 columns
- highbd_idct16x16_38_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8,
- stride, bd);
+ vpx_highbd_idct16x16_38_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8,
+ stride, bd);
}
}
@@ -1354,30 +1358,30 @@
// pass 1
// Parallel idct on the upper 8 rows
- idct16x16_10_add_half1d_pass1(input, row_idct_output);
+ vpx_idct16x16_10_add_half1d_pass1(input, row_idct_output);
// pass 2
// Parallel idct to get the left 8 columns
- idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride, 1);
+ vpx_idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride, 1);
// Parallel idct to get the right 8 columns
- idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL, dest + 8,
- stride, 1);
+ vpx_idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL, dest + 8,
+ stride, 1);
} else {
int32_t row_idct_output[4 * 16];
// pass 1
// Parallel idct on the upper 8 rows
- highbd_idct16x16_10_add_half1d_pass1(input, row_idct_output);
+ vpx_highbd_idct16x16_10_add_half1d_pass1(input, row_idct_output);
// pass 2
// Parallel idct to get the left 8 columns
- highbd_idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride,
- bd);
+ vpx_highbd_idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest,
+ stride, bd);
// Parallel idct to get the right 8 columns
- highbd_idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL,
- dest + 8, stride, bd);
+ vpx_highbd_idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL,
+ dest + 8, stride, bd);
}
}
--- a/vpx_dsp/arm/highbd_idct32x32_1024_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct32x32_1024_add_neon.c
@@ -385,9 +385,9 @@
max);
}
-static INLINE void highbd_idct32_32_neon(const tran_low_t *input,
- uint8_t *const dest, const int stride,
- const int bd) {
+static INLINE void vpx_highbd_idct32_32_neon(const tran_low_t *input,
+ uint8_t *const dest,
+ const int stride, const int bd) {
int i, idct32_pass_loop;
int32_t trans_buf[32 * 8];
int32_t pass1[32 * 32];
@@ -640,8 +640,8 @@
void vpx_highbd_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest,
int stride, int bd) {
if (bd == 8) {
- idct32_32_neon(input, dest, stride, 1);
+ vpx_idct32_32_neon(input, dest, stride, 1);
} else {
- highbd_idct32_32_neon(input, dest, stride, bd);
+ vpx_highbd_idct32_32_neon(input, dest, stride, bd);
}
}
--- a/vpx_dsp/arm/highbd_idct32x32_135_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct32x32_135_add_neon.c
@@ -95,8 +95,8 @@
// 13 84 93 103 110 125
// 14 98 106 115 127
// 15 117 128
-static void highbd_idct32_12_neon(const tran_low_t *const input,
- int32_t *output) {
+static void vpx_highbd_idct32_12_neon(const tran_low_t *const input,
+ int32_t *output) {
int32x4x2_t in[12], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32],
s8[32];
@@ -447,9 +447,9 @@
vst1q_s32(output + 4, s8[31].val[1]);
}
-static void highbd_idct32_16_neon(const int32_t *const input,
- uint16_t *const output, const int stride,
- const int bd) {
+static void vpx_highbd_idct32_16_neon(const int32_t *const input,
+ uint16_t *const output, const int stride,
+ const int bd) {
int32x4x2_t in[16], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32],
out[32];
@@ -733,11 +733,11 @@
if (bd == 8) {
int16_t temp[32 * 16];
int16_t *t = temp;
- idct32_12_neon(input, temp);
- idct32_12_neon(input + 32 * 8, temp + 8);
+ vpx_idct32_12_neon(input, temp);
+ vpx_idct32_12_neon(input + 32 * 8, temp + 8);
for (i = 0; i < 32; i += 8) {
- idct32_16_neon(t, dest, stride, 1);
+ vpx_idct32_16_neon(t, dest, stride, 1);
t += (16 * 8);
dest += 8;
}
@@ -745,11 +745,11 @@
uint16_t *dst = CONVERT_TO_SHORTPTR(dest);
int32_t temp[32 * 16];
int32_t *t = temp;
- highbd_idct32_12_neon(input, temp);
- highbd_idct32_12_neon(input + 32 * 8, temp + 8);
+ vpx_highbd_idct32_12_neon(input, temp);
+ vpx_highbd_idct32_12_neon(input + 32 * 8, temp + 8);
for (i = 0; i < 32; i += 8) {
- highbd_idct32_16_neon(t, dst, stride, bd);
+ vpx_highbd_idct32_16_neon(t, dst, stride, bd);
t += (16 * 8);
dst += 8;
}
--- a/vpx_dsp/arm/highbd_idct32x32_34_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct32x32_34_add_neon.c
@@ -35,7 +35,7 @@
// 5 13 20 26
// 6 21 27 33
// 7 24 32
-static void highbd_idct32_6_neon(const tran_low_t *input, int32_t *output) {
+static void vpx_highbd_idct32_6_neon(const tran_low_t *input, int32_t *output) {
int32x4x2_t in[8], s1[32], s2[32], s3[32];
in[0].val[0] = vld1q_s32(input);
@@ -370,8 +370,8 @@
vst1q_s32(output, s3[31].val[1]);
}
-static void highbd_idct32_8_neon(const int32_t *input, uint16_t *output,
- int stride, const int bd) {
+static void vpx_highbd_idct32_8_neon(const int32_t *input, uint16_t *output,
+ int stride, const int bd) {
int32x4x2_t in[8], s1[32], s2[32], s3[32], out[32];
load_and_transpose_s32_8x8(input, 8, &in[0], &in[1], &in[2], &in[3], &in[4],
@@ -602,10 +602,10 @@
int16_t temp[32 * 8];
int16_t *t = temp;
- idct32_6_neon(input, t);
+ vpx_idct32_6_neon(input, t);
for (i = 0; i < 32; i += 8) {
- idct32_8_neon(t, dest, stride, 1);
+ vpx_idct32_8_neon(t, dest, stride, 1);
t += (8 * 8);
dest += 8;
}
@@ -614,10 +614,10 @@
int32_t temp[32 * 8];
int32_t *t = temp;
- highbd_idct32_6_neon(input, t);
+ vpx_highbd_idct32_6_neon(input, t);
for (i = 0; i < 32; i += 8) {
- highbd_idct32_8_neon(t, dst, stride, bd);
+ vpx_highbd_idct32_8_neon(t, dst, stride, bd);
t += (8 * 8);
dst += 8;
}
--- a/vpx_dsp/arm/idct16x16_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_add_neon.c
@@ -121,9 +121,9 @@
highbd_idct16x16_add8x1(out[15], max, &dest, stride);
}
-void idct16x16_256_add_half1d(const void *const input, int16_t *output,
- void *const dest, const int stride,
- const int highbd_flag) {
+void vpx_idct16x16_256_add_half1d(const void *const input, int16_t *output,
+ void *const dest, const int stride,
+ const int highbd_flag) {
const int16x8_t cospis0 = vld1q_s16(kCospi);
const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
const int16x4_t cospi_0_8_16_24 = vget_low_s16(cospis0);
@@ -323,9 +323,9 @@
}
}
-void idct16x16_38_add_half1d(const void *const input, int16_t *const output,
- void *const dest, const int stride,
- const int highbd_flag) {
+void vpx_idct16x16_38_add_half1d(const void *const input, int16_t *const output,
+ void *const dest, const int stride,
+ const int highbd_flag) {
const int16x8_t cospis0 = vld1q_s16(kCospi);
const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
@@ -484,7 +484,8 @@
}
}
-void idct16x16_10_add_half1d_pass1(const tran_low_t *input, int16_t *output) {
+void vpx_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
+ int16_t *output) {
const int16x8_t cospis0 = vld1q_s16(kCospi);
const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
@@ -637,9 +638,10 @@
vst1_s16(output, out[15]);
}
-void idct16x16_10_add_half1d_pass2(const int16_t *input, int16_t *const output,
- void *const dest, const int stride,
- const int highbd_flag) {
+void vpx_idct16x16_10_add_half1d_pass2(const int16_t *input,
+ int16_t *const output, void *const dest,
+ const int stride,
+ const int highbd_flag) {
const int16x8_t cospis0 = vld1q_s16(kCospi);
const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
@@ -770,18 +772,19 @@
// pass 1
// Parallel idct on the upper 8 rows
- idct16x16_256_add_half1d(input, row_idct_output, dest, stride, 0);
+ vpx_idct16x16_256_add_half1d(input, row_idct_output, dest, stride, 0);
// Parallel idct on the lower 8 rows
- idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest, stride,
- 0);
+ vpx_idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest,
+ stride, 0);
// pass 2
// Parallel idct to get the left 8 columns
- idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, 0);
+ vpx_idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, 0);
// Parallel idct to get the right 8 columns
- idct16x16_256_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride, 0);
+ vpx_idct16x16_256_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride,
+ 0);
}
void vpx_idct16x16_38_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -790,14 +793,15 @@
// pass 1
// Parallel idct on the upper 8 rows
- idct16x16_38_add_half1d(input, row_idct_output, dest, stride, 0);
+ vpx_idct16x16_38_add_half1d(input, row_idct_output, dest, stride, 0);
// pass 2
// Parallel idct to get the left 8 columns
- idct16x16_38_add_half1d(row_idct_output, NULL, dest, stride, 0);
+ vpx_idct16x16_38_add_half1d(row_idct_output, NULL, dest, stride, 0);
// Parallel idct to get the right 8 columns
- idct16x16_38_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride, 0);
+ vpx_idct16x16_38_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride,
+ 0);
}
void vpx_idct16x16_10_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -806,13 +810,13 @@
// pass 1
// Parallel idct on the upper 8 rows
- idct16x16_10_add_half1d_pass1(input, row_idct_output);
+ vpx_idct16x16_10_add_half1d_pass1(input, row_idct_output);
// pass 2
// Parallel idct to get the left 8 columns
- idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride, 0);
+ vpx_idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride, 0);
// Parallel idct to get the right 8 columns
- idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL, dest + 8, stride,
- 0);
+ vpx_idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL, dest + 8,
+ stride, 0);
}
--- a/vpx_dsp/arm/idct32x32_135_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_135_add_neon.c
@@ -87,7 +87,7 @@
// 13 84 93 103 110 125
// 14 98 106 115 127
// 15 117 128
-void idct32_12_neon(const tran_low_t *const input, int16_t *output) {
+void vpx_idct32_12_neon(const tran_low_t *const input, int16_t *output) {
int16x4_t tmp[8];
int16x8_t in[12], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32];
@@ -371,8 +371,8 @@
vst1q_s16(output, vsubq_s16(s7[0], s6[31]));
}
-void idct32_16_neon(const int16_t *const input, uint8_t *const output,
- const int stride, const int highbd_flag) {
+void vpx_idct32_16_neon(const int16_t *const input, uint8_t *const output,
+ const int stride, const int highbd_flag) {
int16x8_t in[16], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32],
out[32];
@@ -666,11 +666,11 @@
int16_t temp[32 * 16];
int16_t *t = temp;
- idct32_12_neon(input, temp);
- idct32_12_neon(input + 32 * 8, temp + 8);
+ vpx_idct32_12_neon(input, temp);
+ vpx_idct32_12_neon(input + 32 * 8, temp + 8);
for (i = 0; i < 32; i += 8) {
- idct32_16_neon(t, dest, stride, 0);
+ vpx_idct32_16_neon(t, dest, stride, 0);
t += (16 * 8);
dest += 8;
}
--- a/vpx_dsp/arm/idct32x32_34_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_34_add_neon.c
@@ -35,7 +35,7 @@
// 5 13 20 26
// 6 21 27 33
// 7 24 32
-void idct32_6_neon(const tran_low_t *input, int16_t *output) {
+void vpx_idct32_6_neon(const tran_low_t *input, int16_t *output) {
int16x8_t in[8], s1[32], s2[32], s3[32];
in[0] = load_tran_low_to_s16q(input);
@@ -265,8 +265,8 @@
vst1q_s16(output, vsubq_s16(s1[0], s2[31]));
}
-void idct32_8_neon(const int16_t *input, uint8_t *output, int stride,
- const int highbd_flag) {
+void vpx_idct32_8_neon(const int16_t *input, uint8_t *output, int stride,
+ const int highbd_flag) {
int16x8_t in[8], s1[32], s2[32], s3[32], out[32];
load_and_transpose_s16_8x8(input, 8, &in[0], &in[1], &in[2], &in[3], &in[4],
@@ -506,10 +506,10 @@
int16_t temp[32 * 8];
int16_t *t = temp;
- idct32_6_neon(input, t);
+ vpx_idct32_6_neon(input, t);
for (i = 0; i < 32; i += 8) {
- idct32_8_neon(t, dest, stride, 0);
+ vpx_idct32_8_neon(t, dest, stride, 0);
t += (8 * 8);
dest += 8;
}
--- a/vpx_dsp/arm/idct32x32_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_add_neon.c
@@ -508,8 +508,8 @@
q[7]);
}
-void idct32_32_neon(const tran_low_t *input, uint8_t *dest, const int stride,
- const int highbd_flag) {
+void vpx_idct32_32_neon(const tran_low_t *input, uint8_t *dest,
+ const int stride, const int highbd_flag) {
int i, idct32_pass_loop;
int16_t trans_buf[32 * 8];
int16_t pass1[32 * 32];
@@ -771,5 +771,5 @@
void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest,
int stride) {
- idct32_32_neon(input, dest, stride, 0);
+ vpx_idct32_32_neon(input, dest, stride, 0);
}
--- a/vpx_dsp/arm/idct_neon.h
+++ b/vpx_dsp/arm/idct_neon.h
@@ -963,29 +963,30 @@
highbd_idct16x16_add8x1(o[15], max, &dest, stride);
}
-void idct16x16_256_add_half1d(const void *const input, int16_t *output,
- void *const dest, const int stride,
- const int highbd_flag);
+void vpx_idct16x16_256_add_half1d(const void *const input, int16_t *output,
+ void *const dest, const int stride,
+ const int highbd_flag);
-void idct16x16_38_add_half1d(const void *const input, int16_t *const output,
- void *const dest, const int stride,
- const int highbd_flag);
+void vpx_idct16x16_38_add_half1d(const void *const input, int16_t *const output,
+ void *const dest, const int stride,
+ const int highbd_flag);
-void idct16x16_10_add_half1d_pass1(const tran_low_t *input, int16_t *output);
+void vpx_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
+ int16_t *output);
-void idct16x16_10_add_half1d_pass2(const int16_t *input, int16_t *const output,
- void *const dest, const int stride,
- const int highbd_flag);
+void vpx_idct16x16_10_add_half1d_pass2(const int16_t *input,
+ int16_t *const output, void *const dest,
+ const int stride, const int highbd_flag);
-void idct32_32_neon(const tran_low_t *input, uint8_t *dest, const int stride,
- const int highbd_flag);
+void vpx_idct32_32_neon(const tran_low_t *input, uint8_t *dest,
+ const int stride, const int highbd_flag);
-void idct32_12_neon(const tran_low_t *const input, int16_t *output);
-void idct32_16_neon(const int16_t *const input, uint8_t *const output,
- const int stride, const int highbd_flag);
+void vpx_idct32_12_neon(const tran_low_t *const input, int16_t *output);
+void vpx_idct32_16_neon(const int16_t *const input, uint8_t *const output,
+ const int stride, const int highbd_flag);
-void idct32_6_neon(const tran_low_t *input, int16_t *output);
-void idct32_8_neon(const int16_t *input, uint8_t *output, int stride,
- const int highbd_flag);
+void vpx_idct32_6_neon(const tran_low_t *input, int16_t *output);
+void vpx_idct32_8_neon(const int16_t *input, uint8_t *output, int stride,
+ const int highbd_flag);
#endif // VPX_DSP_ARM_IDCT_NEON_H_