ref: 97ec51233d1068e010e119dbf8db2300df63de04
parent: f0f00251ea9fa7675f5e155c6a85a9c852d627cd
author: Jingning Han <[email protected]>
date: Wed Jul 22 05:39:17 EDT 2015
Take out VP9_ prefix from mips/msa macros The msa macros are locally used and should not be named with VP9 prefix. Change-Id: I2c9c746c4027383c16b9ab12b77b4e70e7e7d206
--- a/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c
+++ b/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c
@@ -37,8 +37,8 @@
SLLI_4V(in12, in13, in14, in15, 2);
ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3);
ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7);
- VP9_FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
- tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+ FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
+ tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32);
SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12);
SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8);
@@ -50,13 +50,13 @@
ILVR_H2_SH(in10, in13, in11, in12, vec3, vec5);
cnst4 = __msa_splati_h(coeff, 0);
- stp25 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst4);
+ stp25 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst4);
cnst5 = __msa_splati_h(coeff, 1);
cnst5 = __msa_ilvev_h(cnst5, cnst4);
- stp22 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst5);
- stp24 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst4);
- stp23 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst5);
+ stp22 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst5);
+ stp24 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst4);
+ stp23 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst5);
/* stp2 */
BUTTERFLY_4(in8, in9, stp22, stp23, stp30, stp31, stp32, stp33);
@@ -65,11 +65,11 @@
ILVR_H2_SH(stp36, stp31, stp35, stp32, vec3, vec5);
SPLATI_H2_SH(coeff, 2, 3, cnst0, cnst1);
cnst0 = __msa_ilvev_h(cnst0, cnst1);
- stp26 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst0);
+ stp26 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst0);
cnst0 = __msa_splati_h(coeff, 4);
cnst1 = __msa_ilvev_h(cnst1, cnst0);
- stp21 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst1);
+ stp21 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst1);
BUTTERFLY_4(stp30, stp37, stp26, stp21, in8, in15, in14, in9);
ILVRL_H2_SH(in15, in8, vec1, vec0);
@@ -76,12 +76,12 @@
SPLATI_H2_SH(coeff1, 0, 1, cnst0, cnst1);
cnst0 = __msa_ilvev_h(cnst0, cnst1);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
ST_SH(in8, tmp_ptr);
cnst0 = __msa_splati_h(coeff2, 0);
cnst0 = __msa_ilvev_h(cnst1, cnst0);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
ST_SH(in8, tmp_ptr + 224);
ILVRL_H2_SH(in14, in9, vec1, vec0);
@@ -88,22 +88,22 @@
SPLATI_H2_SH(coeff1, 2, 3, cnst0, cnst1);
cnst1 = __msa_ilvev_h(cnst1, cnst0);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1);
ST_SH(in8, tmp_ptr + 128);
cnst1 = __msa_splati_h(coeff2, 2);
cnst0 = __msa_ilvev_h(cnst0, cnst1);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
ST_SH(in8, tmp_ptr + 96);
SPLATI_H2_SH(coeff, 2, 5, cnst0, cnst1);
cnst1 = __msa_ilvev_h(cnst1, cnst0);
- stp25 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1);
+ stp25 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1);
cnst1 = __msa_splati_h(coeff, 3);
cnst1 = __msa_ilvev_h(cnst0, cnst1);
- stp22 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1);
+ stp22 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1);
/* stp4 */
ADD2(stp34, stp25, stp33, stp22, in13, in10);
@@ -111,12 +111,12 @@
ILVRL_H2_SH(in13, in10, vec1, vec0);
SPLATI_H2_SH(coeff1, 4, 5, cnst0, cnst1);
cnst0 = __msa_ilvev_h(cnst0, cnst1);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
ST_SH(in8, tmp_ptr + 64);
cnst0 = __msa_splati_h(coeff2, 1);
cnst0 = __msa_ilvev_h(cnst1, cnst0);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
ST_SH(in8, tmp_ptr + 160);
SUB2(stp34, stp25, stp33, stp22, in12, in11);
@@ -124,12 +124,12 @@
SPLATI_H2_SH(coeff1, 6, 7, cnst0, cnst1);
cnst1 = __msa_ilvev_h(cnst1, cnst0);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1);
ST_SH(in8, tmp_ptr + 192);
cnst1 = __msa_splati_h(coeff2, 3);
cnst0 = __msa_ilvev_h(cnst0, cnst1);
- in8 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
+ in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
ST_SH(in8, tmp_ptr + 32);
}
@@ -156,11 +156,11 @@
in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5,
tmp6, tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, input, 16);
- VP9_FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
- tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+ FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
+ tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
LD_SH8(input, 16, in8, in9, in10, in11, in12, in13, in14, in15);
- VP9_FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
+ in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3,
tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3);
ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, output, 16);
@@ -188,10 +188,10 @@
void vp9_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
out[1] = 0;
- out[0] = VP9_LD_HADD(input, stride);
- out[0] += VP9_LD_HADD(input + 8, stride);
- out[0] += VP9_LD_HADD(input + 16 * 8, stride);
- out[0] += VP9_LD_HADD(input + 16 * 8 + 8, stride);
+ out[0] = LD_HADD(input, stride);
+ out[0] += LD_HADD(input + 8, stride);
+ out[0] += LD_HADD(input + 16 * 8, stride);
+ out[0] += LD_HADD(input + 16 * 8 + 8, stride);
out[0] >>= 1;
}
@@ -211,7 +211,7 @@
/* stage 1 */
LD_SW2(const0, 4, k0, k1);
LD_SW2(const0 + 8, 4, k2, k3);
- VP9_MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
+ MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
r3 = LD_SH(input + 3 * stride);
r4 = LD_SH(input + 4 * stride);
@@ -221,7 +221,7 @@
LD_SW2(const0 + 4 * 4, 4, k0, k1);
LD_SW2(const0 + 4 * 6, 4, k2, k3);
- VP9_MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
+ MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
/* stage 2 */
BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1);
@@ -230,7 +230,7 @@
LD_SW2(const0 + 4 * 8, 4, k0, k1);
k2 = LD_SW(const0 + 4 * 10);
- VP9_MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
+ MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
ST_SH2(h0, h1, int_buf + 8 * 8, 8);
ST_SH2(h3, h2, int_buf + 12 * 8, 8);
@@ -243,7 +243,7 @@
LD_SW2(const0 + 4 * 11, 4, k0, k1);
LD_SW2(const0 + 4 * 13, 4, k2, k3);
- VP9_MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
+ MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8);
@@ -255,7 +255,7 @@
LD_SW2(const0 + 4 * 15, 4, k0, k1);
LD_SW2(const0 + 4 * 17, 4, k2, k3);
- VP9_MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
+ MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8);
@@ -276,7 +276,7 @@
LD_SH2(int_buf + 11 * 8, 4 * 8, g5, g7);
LD_SW2(const0 + 4 * 19, 4, k0, k1);
k2 = LD_SW(const0 + 4 * 21);
- VP9_MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
+ MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
tp0 = LD_SH(int_buf + 4 * 8);
tp1 = LD_SH(int_buf + 5 * 8);
@@ -284,7 +284,7 @@
tp2 = LD_SH(int_buf + 14 * 8);
LD_SW2(const0 + 4 * 22, 4, k0, k1);
k2 = LD_SW(const0 + 4 * 24);
- VP9_MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
+ MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
out4 = -out4;
ST_SH(out4, (out + 3 * 16));
ST_SH(out5, (out_ptr + 4 * 16));
@@ -291,7 +291,7 @@
h1 = LD_SH(int_buf + 9 * 8);
h3 = LD_SH(int_buf + 12 * 8);
- VP9_MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
+ MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
out13 = -out13;
ST_SH(out12, (out + 2 * 16));
ST_SH(out13, (out_ptr + 5 * 16));
@@ -317,19 +317,19 @@
/* stage 4 */
LD_SW2(const0 + 4 * 25, 4, k0, k1);
LD_SW2(const0 + 4 * 27, 4, k2, k3);
- VP9_MADD_SHORT(h10, h11, k1, k2, out2, out3);
+ MADD_SHORT(h10, h11, k1, k2, out2, out3);
ST_SH(out2, (out + 7 * 16));
ST_SH(out3, (out_ptr));
- VP9_MADD_SHORT(out6, out7, k0, k3, out6, out7);
+ MADD_SHORT(out6, out7, k0, k3, out6, out7);
ST_SH(out6, (out + 4 * 16));
ST_SH(out7, (out_ptr + 3 * 16));
- VP9_MADD_SHORT(out10, out11, k0, k3, out10, out11);
+ MADD_SHORT(out10, out11, k0, k3, out10, out11);
ST_SH(out10, (out + 6 * 16));
ST_SH(out11, (out_ptr + 16));
- VP9_MADD_SHORT(out14, out15, k1, k2, out14, out15);
+ MADD_SHORT(out14, out15, k1, k2, out14, out15);
ST_SH(out14, (out + 5 * 16));
ST_SH(out15, (out_ptr + 2 * 16));
}
@@ -342,10 +342,10 @@
LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
r0, r1, r2, r3, r4, r5, r6, r7);
- VP9_FDCT_POSTPROC_2V_NEG_H(r0, r1);
- VP9_FDCT_POSTPROC_2V_NEG_H(r2, r3);
- VP9_FDCT_POSTPROC_2V_NEG_H(r4, r5);
- VP9_FDCT_POSTPROC_2V_NEG_H(r6, r7);
+ FDCT_POSTPROC_2V_NEG_H(r0, r1);
+ FDCT_POSTPROC_2V_NEG_H(r2, r3);
+ FDCT_POSTPROC_2V_NEG_H(r4, r5);
+ FDCT_POSTPROC_2V_NEG_H(r6, r7);
ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8);
out += 64;
@@ -352,10 +352,10 @@
LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
r8, r9, r10, r11, r12, r13, r14, r15);
- VP9_FDCT_POSTPROC_2V_NEG_H(r8, r9);
- VP9_FDCT_POSTPROC_2V_NEG_H(r10, r11);
- VP9_FDCT_POSTPROC_2V_NEG_H(r12, r13);
- VP9_FDCT_POSTPROC_2V_NEG_H(r14, r15);
+ FDCT_POSTPROC_2V_NEG_H(r8, r9);
+ FDCT_POSTPROC_2V_NEG_H(r10, r11);
+ FDCT_POSTPROC_2V_NEG_H(r12, r13);
+ FDCT_POSTPROC_2V_NEG_H(r14, r15);
ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8);
out += 64;
@@ -364,10 +364,10 @@
LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
r0, r1, r2, r3, r4, r5, r6, r7);
- VP9_FDCT_POSTPROC_2V_NEG_H(r0, r1);
- VP9_FDCT_POSTPROC_2V_NEG_H(r2, r3);
- VP9_FDCT_POSTPROC_2V_NEG_H(r4, r5);
- VP9_FDCT_POSTPROC_2V_NEG_H(r6, r7);
+ FDCT_POSTPROC_2V_NEG_H(r0, r1);
+ FDCT_POSTPROC_2V_NEG_H(r2, r3);
+ FDCT_POSTPROC_2V_NEG_H(r4, r5);
+ FDCT_POSTPROC_2V_NEG_H(r6, r7);
ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8);
out += 64;
@@ -374,10 +374,10 @@
LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
r8, r9, r10, r11, r12, r13, r14, r15);
- VP9_FDCT_POSTPROC_2V_NEG_H(r8, r9);
- VP9_FDCT_POSTPROC_2V_NEG_H(r10, r11);
- VP9_FDCT_POSTPROC_2V_NEG_H(r12, r13);
- VP9_FDCT_POSTPROC_2V_NEG_H(r14, r15);
+ FDCT_POSTPROC_2V_NEG_H(r8, r9);
+ FDCT_POSTPROC_2V_NEG_H(r10, r11);
+ FDCT_POSTPROC_2V_NEG_H(r12, r13);
+ FDCT_POSTPROC_2V_NEG_H(r14, r15);
ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8);
}
@@ -396,7 +396,7 @@
/* stage 1 */
LD_SW2(const0, 4, k0, k1);
LD_SW2(const0 + 4 * 2, 4, k2, k3);
- VP9_MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
+ MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
r3 = LD_SH(input + 3 * 8);
r4 = LD_SH(input + 4 * 8);
@@ -405,7 +405,7 @@
LD_SW2(const0 + 4 * 4, 4, k0, k1);
LD_SW2(const0 + 4 * 6, 4, k2, k3);
- VP9_MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
+ MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
/* stage 2 */
BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1);
@@ -414,7 +414,7 @@
LD_SW2(const0 + 4 * 8, 4, k0, k1);
k2 = LD_SW(const0 + 4 * 10);
- VP9_MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
+ MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
ST_SH2(h0, h3, int_buf + 8 * 8, 4 * 8);
ST_SH2(h1, h2, int_buf + 9 * 8, 4 * 8);
@@ -425,7 +425,7 @@
LD_SW2(const0 + 4 * 11, 4, k0, k1);
LD_SW2(const0 + 4 * 13, 4, k2, k3);
- VP9_MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
+ MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8);
r2 = LD_SH(input + 2 * 8);
@@ -435,7 +435,7 @@
LD_SW2(const0 + 4 * 15, 4, k0, k1);
LD_SW2(const0 + 4 * 17, 4, k2, k3);
- VP9_MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
+ MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8);
BUTTERFLY_4(h0, h2, g2, g0, tp0, tp1, tp2, tp3);
ST_SH4(tp0, tp1, tp2, tp3, int_buf + 2 * 8, 4 * 8);
@@ -457,7 +457,7 @@
LD_SW2(const0 + 4 * 19, 4, k0, k1);
k2 = LD_SW(const0 + 4 * 21);
- VP9_MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
+ MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
tp0 = LD_SH(int_buf + 4 * 8);
tp1 = LD_SH(int_buf + 5 * 8);
@@ -466,7 +466,7 @@
LD_SW2(const0 + 4 * 22, 4, k0, k1);
k2 = LD_SW(const0 + 4 * 24);
- VP9_MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
+ MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
out4 = -out4;
ST_SH(out4, (out + 3 * 16));
ST_SH(out5, (out_ptr + 4 * 16));
@@ -473,7 +473,7 @@
h1 = LD_SH(int_buf + 9 * 8);
h3 = LD_SH(int_buf + 12 * 8);
- VP9_MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
+ MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
out13 = -out13;
ST_SH(out12, (out + 2 * 16));
ST_SH(out13, (out_ptr + 5 * 16));
@@ -498,19 +498,19 @@
/* stage 4 */
LD_SW2(const0 + 4 * 25, 4, k0, k1);
LD_SW2(const0 + 4 * 27, 4, k2, k3);
- VP9_MADD_SHORT(h10, h11, k1, k2, out2, out3);
+ MADD_SHORT(h10, h11, k1, k2, out2, out3);
ST_SH(out2, (out + 7 * 16));
ST_SH(out3, (out_ptr));
- VP9_MADD_SHORT(out6, out7, k0, k3, out6, out7);
+ MADD_SHORT(out6, out7, k0, k3, out6, out7);
ST_SH(out6, (out + 4 * 16));
ST_SH(out7, (out_ptr + 3 * 16));
- VP9_MADD_SHORT(out10, out11, k0, k3, out10, out11);
+ MADD_SHORT(out10, out11, k0, k3, out10, out11);
ST_SH(out10, (out + 6 * 16));
ST_SH(out11, (out_ptr + 16));
- VP9_MADD_SHORT(out14, out15, k1, k2, out14, out15);
+ MADD_SHORT(out14, out15, k1, k2, out14, out15);
ST_SH(out14, (out + 5 * 16));
ST_SH(out15, (out_ptr + 2 * 16));
}
@@ -556,14 +556,14 @@
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
in8, in9, in10, in11, in12, in13, in14, in15);
- VP9_FDCT_POSTPROC_2V_NEG_H(in0, in1);
- VP9_FDCT_POSTPROC_2V_NEG_H(in2, in3);
- VP9_FDCT_POSTPROC_2V_NEG_H(in4, in5);
- VP9_FDCT_POSTPROC_2V_NEG_H(in6, in7);
- VP9_FDCT_POSTPROC_2V_NEG_H(in8, in9);
- VP9_FDCT_POSTPROC_2V_NEG_H(in10, in11);
- VP9_FDCT_POSTPROC_2V_NEG_H(in12, in13);
- VP9_FDCT_POSTPROC_2V_NEG_H(in14, in15);
+ FDCT_POSTPROC_2V_NEG_H(in0, in1);
+ FDCT_POSTPROC_2V_NEG_H(in2, in3);
+ FDCT_POSTPROC_2V_NEG_H(in4, in5);
+ FDCT_POSTPROC_2V_NEG_H(in6, in7);
+ FDCT_POSTPROC_2V_NEG_H(in8, in9);
+ FDCT_POSTPROC_2V_NEG_H(in10, in11);
+ FDCT_POSTPROC_2V_NEG_H(in12, in13);
+ FDCT_POSTPROC_2V_NEG_H(in14, in15);
BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
in8, in9, in10, in11, in12, in13, in14, in15,
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
@@ -570,12 +570,12 @@
in8, in9, in10, in11, in12, in13, in14, in15);
temp = intermediate;
ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16);
- VP9_FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
- tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+ FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
+ tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
temp = intermediate;
LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
- VP9_FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
+ in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3,
tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3);
ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16);
--- a/vp9/encoder/mips/msa/vp9_fdct32x32_msa.c
+++ b/vp9/encoder/mips/msa/vp9_fdct32x32_msa.c
@@ -75,57 +75,57 @@
/* Stage 3 */
ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
- VP9_DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp);
ST_SH(temp1, temp + 512);
- VP9_DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp + 256);
ST_SH(temp1, temp + 768);
SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4);
- VP9_DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+ DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
- VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp + 128);
ST_SH(temp1, temp + 896);
SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp + 640);
ST_SH(temp1, temp + 384);
- VP9_DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
- VP9_DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+ DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+ DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
- VP9_DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+ DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
ADD2(in0, in1, in2, in3, vec0, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp + 64);
ST_SH(temp1, temp + 960);
SUB2(in0, in1, in2, in3, in0, in2);
- VP9_DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp + 576);
ST_SH(temp1, temp + 448);
SUB2(in9, vec2, in14, vec5, vec2, vec5);
- VP9_DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+ DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
- VP9_DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp + 320);
ST_SH(temp1, temp + 704);
ADD2(in3, in2, in0, in1, vec3, vec4);
- VP9_DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
- VP9_FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
+ FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
ST_SH(temp0, temp + 192);
ST_SH(temp1, temp + 832);
}
@@ -139,8 +139,8 @@
in26 = LD_SH(input + 80);
in27 = LD_SH(input + 88);
- VP9_DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
- VP9_DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
+ DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
+ DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
in18 = LD_SH(input + 16);
in19 = LD_SH(input + 24);
@@ -162,8 +162,8 @@
in26 = in29 + in26;
LD_SH4(input + 48, 8, in22, in23, in24, in25);
- VP9_DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
- VP9_DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
+ DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
+ DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
in16 = LD_SH(input);
in17 = LD_SH(input + 8);
@@ -180,33 +180,33 @@
ST_SH(vec4, input + 104);
ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
- VP9_DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
- VP9_DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
+ DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
+ DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
+ DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
ADD2(in27, in26, in25, in24, in23, in20);
- VP9_DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec5, temp_ptr);
ST_SH(vec4, temp_ptr + 960);
SUB2(in27, in26, in25, in24, in22, in21);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec5, temp_ptr + 448);
ST_SH(vec4, temp_ptr + 512);
SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
- VP9_DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
+ DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
SUB2(in26, in27, in24, in25, in23, in20);
- VP9_DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec4, temp_ptr + 704);
ST_SH(vec5, temp_ptr + 256);
ADD2(in26, in27, in24, in25, in22, in21);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec4, temp_ptr + 192);
ST_SH(vec5, temp_ptr + 768);
@@ -214,32 +214,32 @@
LD_SH4(input + 80, 8, in26, in27, in24, in25);
in16 = in20;
in17 = in21;
- VP9_DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
- VP9_DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
+ DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
+ DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
+ DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
ADD2(in28, in29, in31, in30, in16, in19);
- VP9_DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec5, temp_ptr + 832);
ST_SH(vec4, temp_ptr + 128);
SUB2(in28, in29, in31, in30, in17, in18);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec5, temp_ptr + 320);
ST_SH(vec4, temp_ptr + 640);
ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
- VP9_DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
+ DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
SUB2(in29, in28, in30, in31, in16, in19);
- VP9_DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec5, temp_ptr + 576);
ST_SH(vec4, temp_ptr + 384);
ADD2(in29, in28, in30, in31, in17, in18);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
- VP9_FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+ DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
+ FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
ST_SH(vec5, temp_ptr + 64);
ST_SH(vec4, temp_ptr + 896);
}
@@ -327,67 +327,67 @@
vec3_r = vec1_r + vec2_r;
vec1_r = vec1_r - vec2_r;
- VP9_DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64,
- cospi_16_64, vec4_r, tmp3_w, vec6_r, vec3_r);
- VP9_FDCT32_POSTPROC_NEG_W(vec4_r);
- VP9_FDCT32_POSTPROC_NEG_W(tmp3_w);
- VP9_FDCT32_POSTPROC_NEG_W(vec6_r);
- VP9_FDCT32_POSTPROC_NEG_W(vec3_r);
+ DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64,
+ cospi_16_64, vec4_r, tmp3_w, vec6_r, vec3_r);
+ FDCT32_POSTPROC_NEG_W(vec4_r);
+ FDCT32_POSTPROC_NEG_W(tmp3_w);
+ FDCT32_POSTPROC_NEG_W(vec6_r);
+ FDCT32_POSTPROC_NEG_W(vec3_r);
PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
ST_SH2(vec5, vec4, out, 8);
- VP9_DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64,
- cospi_8_64, vec4_r, tmp3_w, vec6_r, vec3_r);
- VP9_FDCT32_POSTPROC_NEG_W(vec4_r);
- VP9_FDCT32_POSTPROC_NEG_W(tmp3_w);
- VP9_FDCT32_POSTPROC_NEG_W(vec6_r);
- VP9_FDCT32_POSTPROC_NEG_W(vec3_r);
+ DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64,
+ cospi_8_64, vec4_r, tmp3_w, vec6_r, vec3_r);
+ FDCT32_POSTPROC_NEG_W(vec4_r);
+ FDCT32_POSTPROC_NEG_W(tmp3_w);
+ FDCT32_POSTPROC_NEG_W(vec6_r);
+ FDCT32_POSTPROC_NEG_W(vec3_r);
PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
ST_SH2(vec5, vec4, out + 16, 8);
LD_SH8(interm_ptr, 8, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7);
SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
- VP9_DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+ DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
- VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, in5, in4);
- VP9_FDCT_POSTPROC_2V_NEG_H(in4, in5);
+ DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, in5, in4);
+ FDCT_POSTPROC_2V_NEG_H(in4, in5);
ST_SH(in4, out + 32);
ST_SH(in5, out + 56);
SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4);
- VP9_FDCT_POSTPROC_2V_NEG_H(in4, in5);
+ DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4);
+ FDCT_POSTPROC_2V_NEG_H(in4, in5);
ST_SH(in4, out + 40);
ST_SH(in5, out + 48);
LD_SH8(interm_ptr + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
- VP9_DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
- VP9_DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+ DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+ DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
- VP9_DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+ DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
ADD2(in0, in1, in2, in3, vec0, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, in5, in4);
- VP9_FDCT_POSTPROC_2V_NEG_H(in4, in5);
+ DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, in5, in4);
+ FDCT_POSTPROC_2V_NEG_H(in4, in5);
ST_SH(in4, out + 64);
ST_SH(in5, out + 120);
SUB2(in0, in1, in2, in3, in0, in2);
- VP9_DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, in5, in4);
- VP9_FDCT_POSTPROC_2V_NEG_H(in4, in5);
+ DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, in5, in4);
+ FDCT_POSTPROC_2V_NEG_H(in4, in5);
ST_SH(in4, out + 72);
ST_SH(in5, out + 112);
SUB2(in9, vec2, in14, vec5, vec2, vec5);
- VP9_DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+ DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
- VP9_DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, in5, in4);
- VP9_FDCT_POSTPROC_2V_NEG_H(in4, in5);
+ DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, in5, in4);
+ FDCT_POSTPROC_2V_NEG_H(in4, in5);
ST_SH(in4, out + 80);
ST_SH(in5, out + 104);
ADD2(in3, in2, in0, in1, vec3, vec4);
- VP9_DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, in4, in5);
- VP9_FDCT_POSTPROC_2V_NEG_H(in4, in5);
+ DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, in4, in5);
+ FDCT_POSTPROC_2V_NEG_H(in4, in5);
ST_SH(in4, out + 96);
ST_SH(in5, out + 88);
}
@@ -410,57 +410,57 @@
/* Stage 3 */
ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
- VP9_DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out);
ST_SH(temp1, out + 8);
- VP9_DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 16);
ST_SH(temp1, out + 24);
SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
- VP9_DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+ DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
- VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 32);
ST_SH(temp1, out + 56);
SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 40);
ST_SH(temp1, out + 48);
- VP9_DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
- VP9_DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+ DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+ DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
- VP9_DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+ DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
ADD2(in0, in1, in2, in3, vec0, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 64);
ST_SH(temp1, out + 120);
SUB2(in0, in1, in2, in3, in0, in2);
- VP9_DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 72);
ST_SH(temp1, out + 112);
SUB2(in9, vec2, in14, vec5, vec2, vec5);
- VP9_DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+ DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5)
- VP9_DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 80);
ST_SH(temp1, out + 104);
ADD2(in3, in2, in0, in1, vec3, vec4);
- VP9_DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
- VP9_FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+ DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
+ FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 96);
ST_SH(temp1, out + 88);
}
@@ -475,8 +475,8 @@
in26 = LD_SH(temp + 80);
in27 = LD_SH(temp + 88);
- VP9_DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
- VP9_DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
+ DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
+ DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
in18 = LD_SH(temp + 16);
in19 = LD_SH(temp + 24);
@@ -499,8 +499,8 @@
in24 = LD_SH(temp + 64);
in25 = LD_SH(temp + 72);
- VP9_DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
- VP9_DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
+ DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
+ DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
in16 = LD_SH(temp);
in17 = LD_SH(temp + 8);
@@ -517,37 +517,37 @@
ST_SH(vec4, interm_ptr + 80);
ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
- VP9_DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
- VP9_DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
+ DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
+ DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
+ DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
ADD2(in27, in26, in25, in24, in23, in20);
- VP9_DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec5, out);
ST_SH(vec4, out + 120);
SUB2(in27, in26, in25, in24, in22, in21);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec5, out + 112);
ST_SH(vec4, out + 8);
SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
- VP9_DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
+ DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
SUB2(in26, in27, in24, in25, in23, in20);
- VP9_DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec4, out + 16);
ST_SH(vec5, out + 104);
ADD2(in26, in27, in24, in25, in22, in21);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec4, out + 24);
ST_SH(vec5, out + 96);
@@ -558,8 +558,8 @@
in16 = in20;
in17 = in21;
- VP9_DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
- VP9_DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
+ DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
+ DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
in22 = LD_SH(interm_ptr + 40);
in25 = LD_SH(interm_ptr + 48);
@@ -567,32 +567,32 @@
in23 = LD_SH(interm_ptr + 80);
SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
+ DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
ADD2(in28, in29, in31, in30, in16, in19);
- VP9_DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec5, out + 32);
ST_SH(vec4, out + 88);
SUB2(in28, in29, in31, in30, in17, in18);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec5, out + 40);
ST_SH(vec4, out + 80);
ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
- VP9_DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
+ DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
SUB2(in29, in28, in30, in31, in16, in19);
- VP9_DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec5, out + 72);
ST_SH(vec4, out + 48);
ADD2(in29, in28, in30, in31, in17, in18);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+ DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
+ FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
ST_SH(vec4, out + 56);
ST_SH(vec5, out + 64);
}
@@ -700,22 +700,22 @@
void vp9_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
out[1] = 0;
- out[0] = VP9_LD_HADD(input, stride);
- out[0] += VP9_LD_HADD(input + 8, stride);
- out[0] += VP9_LD_HADD(input + 16, stride);
- out[0] += VP9_LD_HADD(input + 24, stride);
- out[0] += VP9_LD_HADD(input + 32 * 8, stride);
- out[0] += VP9_LD_HADD(input + 32 * 8 + 8, stride);
- out[0] += VP9_LD_HADD(input + 32 * 8 + 16, stride);
- out[0] += VP9_LD_HADD(input + 32 * 8 + 24, stride);
- out[0] += VP9_LD_HADD(input + 32 * 16, stride);
- out[0] += VP9_LD_HADD(input + 32 * 16 + 8, stride);
- out[0] += VP9_LD_HADD(input + 32 * 16 + 16, stride);
- out[0] += VP9_LD_HADD(input + 32 * 16 + 24, stride);
- out[0] += VP9_LD_HADD(input + 32 * 24, stride);
- out[0] += VP9_LD_HADD(input + 32 * 24 + 8, stride);
- out[0] += VP9_LD_HADD(input + 32 * 24 + 16, stride);
- out[0] += VP9_LD_HADD(input + 32 * 24 + 24, stride);
+ out[0] = LD_HADD(input, stride);
+ out[0] += LD_HADD(input + 8, stride);
+ out[0] += LD_HADD(input + 16, stride);
+ out[0] += LD_HADD(input + 24, stride);
+ out[0] += LD_HADD(input + 32 * 8, stride);
+ out[0] += LD_HADD(input + 32 * 8 + 8, stride);
+ out[0] += LD_HADD(input + 32 * 8 + 16, stride);
+ out[0] += LD_HADD(input + 32 * 8 + 24, stride);
+ out[0] += LD_HADD(input + 32 * 16, stride);
+ out[0] += LD_HADD(input + 32 * 16 + 8, stride);
+ out[0] += LD_HADD(input + 32 * 16 + 16, stride);
+ out[0] += LD_HADD(input + 32 * 16 + 24, stride);
+ out[0] += LD_HADD(input + 32 * 24, stride);
+ out[0] += LD_HADD(input + 32 * 24 + 8, stride);
+ out[0] += LD_HADD(input + 32 * 24 + 16, stride);
+ out[0] += LD_HADD(input + 32 * 24 + 24, stride);
out[0] >>= 3;
}
@@ -733,14 +733,14 @@
in8, in9, in10, in11, in12, in13, in14, in15,
vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
in8, in9, in10, in11, in12, in13, in14, in15);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec0, vec1);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec2, vec3);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec4, vec5);
- VP9_FDCT_POSTPROC_2V_NEG_H(vec6, vec7);
- VP9_FDCT_POSTPROC_2V_NEG_H(in8, in9);
- VP9_FDCT_POSTPROC_2V_NEG_H(in10, in11);
- VP9_FDCT_POSTPROC_2V_NEG_H(in12, in13);
- VP9_FDCT_POSTPROC_2V_NEG_H(in14, in15);
+ FDCT_POSTPROC_2V_NEG_H(vec0, vec1);
+ FDCT_POSTPROC_2V_NEG_H(vec2, vec3);
+ FDCT_POSTPROC_2V_NEG_H(vec4, vec5);
+ FDCT_POSTPROC_2V_NEG_H(vec6, vec7);
+ FDCT_POSTPROC_2V_NEG_H(in8, in9);
+ FDCT_POSTPROC_2V_NEG_H(in10, in11);
+ FDCT_POSTPROC_2V_NEG_H(in12, in13);
+ FDCT_POSTPROC_2V_NEG_H(in14, in15);
/* Stage 3 */
ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
@@ -750,49 +750,49 @@
in3 = in1 + in2;
in1 = in1 - in2;
- VP9_DOTP_CONST_PAIR(temp0, in3, cospi_16_64, cospi_16_64, temp1, temp0);
+ DOTP_CONST_PAIR(temp0, in3, cospi_16_64, cospi_16_64, temp1, temp0);
ST_SH(temp0, out);
ST_SH(temp1, out + 8);
- VP9_DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
+ DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
ST_SH(temp0, out + 16);
ST_SH(temp1, out + 24);
SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
- VP9_DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+ DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
- VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
+ DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
ST_SH(temp0, out + 32);
ST_SH(temp1, out + 56);
SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
+ DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
ST_SH(temp0, out + 40);
ST_SH(temp1, out + 48);
- VP9_DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
- VP9_DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+ DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+ DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
- VP9_DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+ DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
ADD2(in0, in1, in2, in3, vec0, vec7);
- VP9_DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
+ DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
ST_SH(temp0, out + 64);
ST_SH(temp1, out + 120);
SUB2(in0, in1, in2, in3, in0, in2);
- VP9_DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
+ DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
ST_SH(temp0, out + 72);
ST_SH(temp1, out + 112);
SUB2(in9, vec2, in14, vec5, vec2, vec5);
- VP9_DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+ DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
- VP9_DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
+ DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
ST_SH(temp0, out + 80);
ST_SH(temp1, out + 104);
ADD2(in3, in2, in0, in1, vec3, vec4);
- VP9_DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
+ DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
ST_SH(temp0, out + 96);
ST_SH(temp1, out + 88);
}
@@ -808,11 +808,11 @@
in26 = LD_SH(temp + 80);
in27 = LD_SH(temp + 88);
- VP9_DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
- VP9_DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
+ DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
+ DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
- VP9_FDCT_POSTPROC_2V_NEG_H(in20, in21);
- VP9_FDCT_POSTPROC_2V_NEG_H(in26, in27);
+ FDCT_POSTPROC_2V_NEG_H(in20, in21);
+ FDCT_POSTPROC_2V_NEG_H(in26, in27);
in18 = LD_SH(temp + 16);
in19 = LD_SH(temp + 24);
@@ -819,8 +819,8 @@
in28 = LD_SH(temp + 96);
in29 = LD_SH(temp + 104);
- VP9_FDCT_POSTPROC_2V_NEG_H(in18, in19);
- VP9_FDCT_POSTPROC_2V_NEG_H(in28, in29);
+ FDCT_POSTPROC_2V_NEG_H(in18, in19);
+ FDCT_POSTPROC_2V_NEG_H(in28, in29);
vec4 = in19 - in20;
ST_SH(vec4, interm_ptr + 32);
@@ -838,10 +838,10 @@
in24 = LD_SH(temp + 64);
in25 = LD_SH(temp + 72);
- VP9_DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
- VP9_DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
- VP9_FDCT_POSTPROC_2V_NEG_H(in22, in23);
- VP9_FDCT_POSTPROC_2V_NEG_H(in24, in25);
+ DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
+ DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
+ FDCT_POSTPROC_2V_NEG_H(in22, in23);
+ FDCT_POSTPROC_2V_NEG_H(in24, in25);
in16 = LD_SH(temp);
in17 = LD_SH(temp + 8);
@@ -848,8 +848,8 @@
in30 = LD_SH(temp + 112);
in31 = LD_SH(temp + 120);
- VP9_FDCT_POSTPROC_2V_NEG_H(in16, in17);
- VP9_FDCT_POSTPROC_2V_NEG_H(in30, in31);
+ FDCT_POSTPROC_2V_NEG_H(in16, in17);
+ FDCT_POSTPROC_2V_NEG_H(in30, in31);
vec4 = in17 - in22;
ST_SH(vec4, interm_ptr + 40);
@@ -861,29 +861,29 @@
ST_SH(vec4, interm_ptr + 80);
ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
- VP9_DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
- VP9_DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
+ DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
+ DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
+ DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
ADD2(in27, in26, in25, in24, in23, in20);
- VP9_DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
+ DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
ST_SH(vec5, out);
ST_SH(vec4, out + 120);
SUB2(in27, in26, in25, in24, in22, in21);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
+ DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
ST_SH(vec5, out + 112);
ST_SH(vec4, out + 8);
SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
- VP9_DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
+ DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
SUB2(in26, in27, in24, in25, in23, in20);
- VP9_DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
+ DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
ST_SH(vec4, out + 16);
ST_SH(vec5, out + 104);
ADD2(in26, in27, in24, in25, in22, in21);
- VP9_DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
+ DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
ST_SH(vec4, out + 24);
ST_SH(vec5, out + 96);
@@ -894,8 +894,8 @@
in16 = in20;
in17 = in21;
- VP9_DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
- VP9_DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
+ DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
+ DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
in22 = LD_SH(interm_ptr + 40);
in25 = LD_SH(interm_ptr + 48);
@@ -903,27 +903,27 @@
in23 = LD_SH(interm_ptr + 80);
SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
+ DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
in16 = in28 + in29;
in19 = in31 + in30;
- VP9_DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
+ DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
ST_SH(vec5, out + 32);
ST_SH(vec4, out + 88);
SUB2(in28, in29, in31, in30, in17, in18);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
+ DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
ST_SH(vec5, out + 40);
ST_SH(vec4, out + 80);
ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
- VP9_DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
+ DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
SUB2(in29, in28, in30, in31, in16, in19);
- VP9_DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
+ DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
ST_SH(vec5, out + 72);
ST_SH(vec4, out + 48);
ADD2(in29, in28, in30, in31, in17, in18);
- VP9_DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
+ DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
ST_SH(vec4, out + 56);
ST_SH(vec5, out + 64);
}
--- a/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c
+++ b/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c
@@ -28,12 +28,12 @@
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
- VP9_SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
+ SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}
void vp9_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
- out[0] = VP9_LD_HADD(input, stride);
+ out[0] = LD_HADD(input, stride);
out[1] = 0;
}
@@ -85,6 +85,6 @@
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
- VP9_SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
+ SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}
--- a/vp9/encoder/mips/msa/vp9_fdct_msa.h
+++ b/vp9/encoder/mips/msa/vp9_fdct_msa.h
@@ -15,7 +15,7 @@
#include "vp9/common/vp9_idct.h"
#include "vpx_dsp/mips/macros_msa.h"
-#define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) { \
+#define DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) { \
v8i16 k0_m = __msa_fill_h(cnst0); \
v4i32 s0_m, s1_m, s2_m, s3_m; \
\
@@ -33,8 +33,8 @@
out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \
}
-#define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \
- dst0, dst1, dst2, dst3) { \
+#define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \
+ dst0, dst1, dst2, dst3) { \
v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \
v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \
\
@@ -50,7 +50,7 @@
dst0, dst1, dst2, dst3); \
}
-#define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) ({ \
+#define DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) ({ \
v8i16 dst_m; \
v4i32 tp0_m, tp1_m; \
\
@@ -79,9 +79,9 @@
\
ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
- VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst1_m, cnst2_m, cnst3_m, in7, in0, \
- in4, in3); \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
+ cnst1_m, cnst2_m, cnst3_m, in7, in0, \
+ in4, in3); \
\
SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
cnst2_m = -cnst0_m; \
@@ -93,9 +93,9 @@
ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
\
- VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst1_m, cnst2_m, cnst3_m, in5, in2, \
- in6, in1); \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
+ cnst1_m, cnst2_m, cnst3_m, in5, in2, \
+ in6, in1); \
BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
out7 = -s0_m; \
out0 = s1_m; \
@@ -108,9 +108,9 @@
\
ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
- VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst2_m, cnst3_m, cnst1_m, out1, out6, \
- s0_m, s1_m); \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
+ cnst2_m, cnst3_m, cnst1_m, out1, out6, \
+ s0_m, s1_m); \
\
SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
@@ -117,10 +117,10 @@
\
ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
- out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
- out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
- out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
- out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
+ out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
+ out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
+ out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
\
out1 = -out1; \
out3 = -out3; \
@@ -127,7 +127,7 @@
out5 = -out5; \
}
-#define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1) { \
+#define MADD_SHORT(m0, m1, c0, c1, res0, res1) { \
v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \
v8i16 madd_s0_m, madd_s1_m; \
\
@@ -138,8 +138,8 @@
PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1); \
}
-#define VP9_MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
- out0, out1, out2, out3) { \
+#define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
+ out0, out1, out2, out3) { \
v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m; \
\
@@ -159,7 +159,7 @@
PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \
}
-#define VP9_LD_HADD(psrc, stride) ({ \
+#define LD_HADD(psrc, stride) ({ \
v8i16 in0_m, in1_m, in2_m, in3_m, in4_m, in5_m, in6_m, in7_m; \
v4i32 vec_w_m; \
\
@@ -174,7 +174,7 @@
HADD_SW_S32(vec_w_m); \
})
-#define VP9_FDCT_POSTPROC_2V_NEG_H(vec0, vec1) { \
+#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) { \
v8i16 tp0_m, tp1_m; \
v8i16 one_m = __msa_ldi_h(1); \
\
@@ -267,23 +267,23 @@
ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m); \
SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m); \
x1_m = __msa_ilvev_h(x1_m, x0_m); \
- out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
+ out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
\
SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m); \
x2_m = -x2_m; \
x2_m = __msa_ilvev_h(x3_m, x2_m); \
- out6 = VP9_DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
+ out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
\
- out0 = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
+ out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
x2_m = __msa_splati_h(coeff_m, 2); \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
- out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
+ out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
\
/* stage2 */ \
ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m); \
\
- s6_m = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
- s5_m = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
+ s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
+ s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
\
/* stage3 */ \
BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m); \
@@ -294,24 +294,24 @@
\
SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m); \
x1_m = __msa_ilvev_h(x0_m, x1_m); \
- out1 = VP9_DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \
+ out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \
\
SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m); \
x2_m = __msa_ilvev_h(x3_m, x2_m); \
- out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
+ out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
\
x1_m = __msa_splati_h(coeff_m, 5); \
x0_m = -x0_m; \
x0_m = __msa_ilvev_h(x1_m, x0_m); \
- out7 = VP9_DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \
+ out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \
\
x2_m = __msa_splati_h(coeff_m, 6); \
x3_m = -x3_m; \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
- out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
+ out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
}
-#define VP9_SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) { \
+#define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) { \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
\
SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15); \
@@ -322,8 +322,8 @@
in4, in5, in6, in7); \
}
-#define VP9_FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3, out4, out5, out6, out7) { \
+#define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, \
+ out0, out1, out2, out3, out4, out5, out6, out7) { \
v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \
v8i16 x0_m, x1_m, x2_m, x3_m; \
v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
@@ -337,23 +337,23 @@
ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m); \
SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m); \
x1_m = __msa_ilvev_h(x1_m, x0_m); \
- out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
+ out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
\
SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m); \
x2_m = -x2_m; \
x2_m = __msa_ilvev_h(x3_m, x2_m); \
- out6 = VP9_DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
+ out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
\
- out0 = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
+ out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
x2_m = __msa_splati_h(coeff_m, 2); \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
- out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
+ out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
\
/* stage2 */ \
ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m); \
\
- s6_m = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
- s5_m = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
+ s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
+ s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
\
/* stage3 */ \
BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m); \
@@ -364,27 +364,27 @@
\
SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m); \
x1_m = __msa_ilvev_h(x0_m, x1_m); \
- out1 = VP9_DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \
+ out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \
\
SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m); \
x2_m = __msa_ilvev_h(x3_m, x2_m); \
- out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
+ out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
\
x1_m = __msa_splati_h(coeff_m, 5); \
x0_m = -x0_m; \
x0_m = __msa_ilvev_h(x1_m, x0_m); \
- out7 = VP9_DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \
+ out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \
\
x2_m = __msa_splati_h(coeff_m, 6); \
x3_m = -x3_m; \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
- out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
+ out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
}
-#define VP9_FDCT8x16_ODD(input0, input1, input2, input3, \
- input4, input5, input6, input7, \
- out1, out3, out5, out7, \
- out9, out11, out13, out15) { \
+#define FDCT8x16_ODD(input0, input1, input2, input3, \
+ input4, input5, input6, input7, \
+ out1, out3, out5, out7, \
+ out9, out11, out13, out15) { \
v8i16 stp21_m, stp22_m, stp23_m, stp24_m, stp25_m, stp26_m; \
v8i16 stp30_m, stp31_m, stp32_m, stp33_m, stp34_m, stp35_m; \
v8i16 stp36_m, stp37_m, vec0_m, vec1_m; \
@@ -404,13 +404,13 @@
ILVR_H2_SH(input2, input5, input3, input4, vec3_m, vec5_m); \
\
cnst4_m = __msa_splati_h(coeff_m, 0); \
- stp25_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst4_m); \
+ stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst4_m); \
\
cnst5_m = __msa_splati_h(coeff_m, 1); \
cnst5_m = __msa_ilvev_h(cnst5_m, cnst4_m); \
- stp22_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst5_m); \
- stp24_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst4_m); \
- stp23_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst5_m); \
+ stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst5_m); \
+ stp24_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst4_m); \
+ stp23_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst5_m); \
\
/* stp2 */ \
BUTTERFLY_4(input0, input1, stp22_m, stp23_m, \
@@ -423,19 +423,19 @@
\
SPLATI_H2_SH(coeff_m, 2, 3, cnst0_m, cnst1_m); \
cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \
- stp26_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
+ stp26_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
\
cnst0_m = __msa_splati_h(coeff_m, 4); \
cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- stp21_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
+ stp21_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
\
SPLATI_H2_SH(coeff_m, 5, 2, cnst0_m, cnst1_m); \
cnst1_m = __msa_ilvev_h(cnst0_m, cnst1_m); \
- stp25_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \
+ stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \
\
cnst0_m = __msa_splati_h(coeff_m, 3); \
cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- stp22_m = VP9_DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \
+ stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \
\
/* stp4 */ \
BUTTERFLY_4(stp30_m, stp37_m, stp26_m, stp21_m, \
@@ -447,43 +447,43 @@
SPLATI_H2_SH(coeff1_m, 0, 1, cnst0_m, cnst1_m); \
cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \
\
- out1 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out1 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
\
cnst0_m = __msa_splati_h(coeff2_m, 0); \
cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- out15 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out15 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
\
ILVRL_H2_SH(vec4_m, vec5_m, vec1_m, vec0_m); \
SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
\
- out9 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
+ out9 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
\
cnst1_m = __msa_splati_h(coeff2_m, 2); \
cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \
- out7 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out7 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
\
ILVRL_H2_SH(stp23_m, stp21_m, vec1_m, vec0_m); \
SPLATI_H2_SH(coeff1_m, 4, 5, cnst0_m, cnst1_m); \
cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \
- out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out5 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
\
cnst0_m = __msa_splati_h(coeff2_m, 1); \
cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- out11 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out11 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
\
ILVRL_H2_SH(stp24_m, stp31_m, vec1_m, vec0_m); \
SPLATI_H2_SH(coeff1_m, 6, 7, cnst0_m, cnst1_m); \
cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
\
- out13 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
+ out13 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
\
cnst1_m = __msa_splati_h(coeff2_m, 3); \
cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \
- out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
}
-#define VP9_FDCT32_POSTPROC_NEG_W(vec) { \
+#define FDCT32_POSTPROC_NEG_W(vec) { \
v4i32 temp_m; \
v4i32 one_m = __msa_ldi_w(1); \
\
@@ -494,7 +494,7 @@
vec >>= 2; \
}
-#define VP9_FDCT32_POSTPROC_2V_POS_H(vec0, vec1) { \
+#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) { \
v8i16 tp0_m, tp1_m; \
v8i16 one = __msa_ldi_h(1); \
\
@@ -512,9 +512,9 @@
vec1 >>= 2; \
}
-#define VP9_DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right, \
- reg1_right, const0, const1, \
- out0, out1, out2, out3) { \
+#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right, \
+ reg1_right, const0, const1, \
+ out0, out1, out2, out3) { \
v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \
v2i64 tp0_m, tp1_m, tp2_m, tp3_m; \
v4i32 k0_m = __msa_fill_w((int32_t) const0); \