ref: 29b6a30cd971f0ad1d99dbeb081cda9f011d4b2a
parent: 93da1ba2dc1c8b8af5c266d502fdbafa5dfef55a
author: Linfeng Zhang <[email protected]>
date: Tue Feb 20 12:21:31 EST 2018
Add vp9_highbd_iht8x8_16_add_neon() BUG=webm:1403 Change-Id: I11efb652f1aee371c71eee2d29e33793e4736832
--- a/test/dct_test.cc
+++ b/test/dct_test.cc
@@ -792,6 +792,43 @@
make_tuple(&vp9_highbd_fht4x4_c,
&highbd_iht_wrapper<vp9_highbd_iht4x4_16_add_neon>, 4, 3,
VPX_BITS_12, 2),
+
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 0,
+ VPX_BITS_8, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 1,
+ VPX_BITS_8, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 2,
+ VPX_BITS_8, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 3,
+ VPX_BITS_8, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 0,
+ VPX_BITS_10, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 1,
+ VPX_BITS_10, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 2,
+ VPX_BITS_10, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 3,
+ VPX_BITS_10, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 0,
+ VPX_BITS_12, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 1,
+ VPX_BITS_12, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 2,
+ VPX_BITS_12, 2),
+ make_tuple(&vp9_highbd_fht8x8_c,
+ &highbd_iht_wrapper<vp9_highbd_iht8x8_64_add_neon>, 8, 3,
+ VPX_BITS_12, 2),
#endif // CONFIG_VP9_HIGHBITDEPTH
make_tuple(&vp9_fht8x8_c, &iht_wrapper<vp9_iht8x8_64_add_neon>, 8, 0,
VPX_BITS_8, 1),
--- /dev/null
+++ b/vp9/common/arm/neon/vp9_highbd_iht8x8_add_neon.c
@@ -1,0 +1,505 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vp9/common/vp9_enums.h"
+#include "vp9/common/arm/neon/vp9_iht_neon.h"
+#include "vpx_dsp/arm/highbd_idct_neon.h"
+#include "vpx_dsp/arm/idct_neon.h"
+#include "vpx_dsp/arm/transpose_neon.h"
+#include "vpx_dsp/inv_txfm.h"
+
+static INLINE void iadst_half_butterfly_bd10_neon(int32x4_t *const x,
+ const int32x2_t c) {
+ const int32x4_t sum = vaddq_s32(x[0], x[1]);
+ const int32x4_t sub = vsubq_s32(x[0], x[1]);
+
+ x[0] = vmulq_lane_s32(sum, c, 0);
+ x[1] = vmulq_lane_s32(sub, c, 0);
+ x[0] = vrshrq_n_s32(x[0], DCT_CONST_BITS);
+ x[1] = vrshrq_n_s32(x[1], DCT_CONST_BITS);
+}
+
+static INLINE void iadst_half_butterfly_bd12_neon(int32x4_t *const x,
+ const int32x2_t c) {
+ const int32x4_t sum = vaddq_s32(x[0], x[1]);
+ const int32x4_t sub = vsubq_s32(x[0], x[1]);
+ const int64x2_t t0_lo = vmull_lane_s32(vget_low_s32(sum), c, 0);
+ const int64x2_t t1_lo = vmull_lane_s32(vget_low_s32(sub), c, 0);
+ const int64x2_t t0_hi = vmull_lane_s32(vget_high_s32(sum), c, 0);
+ const int64x2_t t1_hi = vmull_lane_s32(vget_high_s32(sub), c, 0);
+ const int32x2_t out0_lo = vrshrn_n_s64(t0_lo, DCT_CONST_BITS);
+ const int32x2_t out1_lo = vrshrn_n_s64(t1_lo, DCT_CONST_BITS);
+ const int32x2_t out0_hi = vrshrn_n_s64(t0_hi, DCT_CONST_BITS);
+ const int32x2_t out1_hi = vrshrn_n_s64(t1_hi, DCT_CONST_BITS);
+
+ x[0] = vcombine_s32(out0_lo, out0_hi);
+ x[1] = vcombine_s32(out1_lo, out1_hi);
+}
+
+static INLINE void iadst_butterfly_lane_0_1_bd10_neon(const int32x4_t in0,
+ const int32x4_t in1,
+ const int32x2_t c,
+ int32x4_t *const s0,
+ int32x4_t *const s1) {
+ const int32x4_t t0 = vmulq_lane_s32(in0, c, 0);
+ const int32x4_t t1 = vmulq_lane_s32(in0, c, 1);
+
+ *s0 = vmlaq_lane_s32(t0, in1, c, 1);
+ *s1 = vmlsq_lane_s32(t1, in1, c, 0);
+}
+
+static INLINE void iadst_butterfly_lane_1_0_bd10_neon(const int32x4_t in0,
+ const int32x4_t in1,
+ const int32x2_t c,
+ int32x4_t *const s0,
+ int32x4_t *const s1) {
+ const int32x4_t t0 = vmulq_lane_s32(in0, c, 1);
+ const int32x4_t t1 = vmulq_lane_s32(in0, c, 0);
+
+ *s0 = vmlaq_lane_s32(t0, in1, c, 0);
+ *s1 = vmlsq_lane_s32(t1, in1, c, 1);
+}
+
+static INLINE void iadst_butterfly_lane_0_1_bd12_neon(const int32x4_t in0,
+ const int32x4_t in1,
+ const int32x2_t c,
+ int64x2_t *const s0,
+ int64x2_t *const s1) {
+ const int64x2_t t0_lo = vmull_lane_s32(vget_low_s32(in0), c, 0);
+ const int64x2_t t1_lo = vmull_lane_s32(vget_low_s32(in0), c, 1);
+ const int64x2_t t0_hi = vmull_lane_s32(vget_high_s32(in0), c, 0);
+ const int64x2_t t1_hi = vmull_lane_s32(vget_high_s32(in0), c, 1);
+
+ s0[0] = vmlal_lane_s32(t0_lo, vget_low_s32(in1), c, 1);
+ s1[0] = vmlsl_lane_s32(t1_lo, vget_low_s32(in1), c, 0);
+ s0[1] = vmlal_lane_s32(t0_hi, vget_high_s32(in1), c, 1);
+ s1[1] = vmlsl_lane_s32(t1_hi, vget_high_s32(in1), c, 0);
+}
+
+static INLINE void iadst_butterfly_lane_1_0_bd12_neon(const int32x4_t in0,
+ const int32x4_t in1,
+ const int32x2_t c,
+ int64x2_t *const s0,
+ int64x2_t *const s1) {
+ const int64x2_t t0_lo = vmull_lane_s32(vget_low_s32(in0), c, 1);
+ const int64x2_t t1_lo = vmull_lane_s32(vget_low_s32(in0), c, 0);
+ const int64x2_t t0_hi = vmull_lane_s32(vget_high_s32(in0), c, 1);
+ const int64x2_t t1_hi = vmull_lane_s32(vget_high_s32(in0), c, 0);
+
+ s0[0] = vmlal_lane_s32(t0_lo, vget_low_s32(in1), c, 0);
+ s1[0] = vmlsl_lane_s32(t1_lo, vget_low_s32(in1), c, 1);
+ s0[1] = vmlal_lane_s32(t0_hi, vget_high_s32(in1), c, 0);
+ s1[1] = vmlsl_lane_s32(t1_hi, vget_high_s32(in1), c, 1);
+}
+
+static INLINE int32x4_t
+add_dct_const_round_shift_low_8_bd10(const int32x4_t in0, const int32x4_t in1) {
+ const int32x4_t sum = vaddq_s32(in0, in1);
+ return vrshrq_n_s32(sum, DCT_CONST_BITS);
+}
+
+static INLINE int32x4_t
+sub_dct_const_round_shift_low_8_bd10(const int32x4_t in0, const int32x4_t in1) {
+ const int32x4_t sub = vsubq_s32(in0, in1);
+ return vrshrq_n_s32(sub, DCT_CONST_BITS);
+}
+
+static INLINE int32x4_t add_dct_const_round_shift_low_8_bd12(
+ const int64x2_t *const in0, const int64x2_t *const in1) {
+ const int64x2_t sum_lo = vaddq_s64(in0[0], in1[0]);
+ const int64x2_t sum_hi = vaddq_s64(in0[1], in1[1]);
+ const int32x2_t out_lo = vrshrn_n_s64(sum_lo, DCT_CONST_BITS);
+ const int32x2_t out_hi = vrshrn_n_s64(sum_hi, DCT_CONST_BITS);
+ return vcombine_s32(out_lo, out_hi);
+}
+
+static INLINE int32x4_t sub_dct_const_round_shift_low_8_bd12(
+ const int64x2_t *const in0, const int64x2_t *const in1) {
+ const int64x2_t sub_lo = vsubq_s64(in0[0], in1[0]);
+ const int64x2_t sub_hi = vsubq_s64(in0[1], in1[1]);
+ const int32x2_t out_lo = vrshrn_n_s64(sub_lo, DCT_CONST_BITS);
+ const int32x2_t out_hi = vrshrn_n_s64(sub_hi, DCT_CONST_BITS);
+ return vcombine_s32(out_lo, out_hi);
+}
+
+static INLINE void iadst8_bd10(int32x4_t *const io0, int32x4_t *const io1,
+ int32x4_t *const io2, int32x4_t *const io3,
+ int32x4_t *const io4, int32x4_t *const io5,
+ int32x4_t *const io6, int32x4_t *const io7) {
+ const int32x4_t c0 =
+ create_s32x4_neon(cospi_2_64, cospi_30_64, cospi_10_64, cospi_22_64);
+ const int32x4_t c1 =
+ create_s32x4_neon(cospi_18_64, cospi_14_64, cospi_26_64, cospi_6_64);
+ const int32x4_t c2 =
+ create_s32x4_neon(cospi_16_64, 0, cospi_8_64, cospi_24_64);
+ int32x4_t x[8], t[4];
+ int32x4_t s[8];
+
+ x[0] = *io7;
+ x[1] = *io0;
+ x[2] = *io5;
+ x[3] = *io2;
+ x[4] = *io3;
+ x[5] = *io4;
+ x[6] = *io1;
+ x[7] = *io6;
+
+ // stage 1
+ iadst_butterfly_lane_0_1_bd10_neon(x[0], x[1], vget_low_s32(c0), &s[0],
+ &s[1]);
+ iadst_butterfly_lane_0_1_bd10_neon(x[2], x[3], vget_high_s32(c0), &s[2],
+ &s[3]);
+ iadst_butterfly_lane_0_1_bd10_neon(x[4], x[5], vget_low_s32(c1), &s[4],
+ &s[5]);
+ iadst_butterfly_lane_0_1_bd10_neon(x[6], x[7], vget_high_s32(c1), &s[6],
+ &s[7]);
+
+ x[0] = add_dct_const_round_shift_low_8_bd10(s[0], s[4]);
+ x[1] = add_dct_const_round_shift_low_8_bd10(s[1], s[5]);
+ x[2] = add_dct_const_round_shift_low_8_bd10(s[2], s[6]);
+ x[3] = add_dct_const_round_shift_low_8_bd10(s[3], s[7]);
+ x[4] = sub_dct_const_round_shift_low_8_bd10(s[0], s[4]);
+ x[5] = sub_dct_const_round_shift_low_8_bd10(s[1], s[5]);
+ x[6] = sub_dct_const_round_shift_low_8_bd10(s[2], s[6]);
+ x[7] = sub_dct_const_round_shift_low_8_bd10(s[3], s[7]);
+
+ // stage 2
+ t[0] = x[0];
+ t[1] = x[1];
+ t[2] = x[2];
+ t[3] = x[3];
+ iadst_butterfly_lane_0_1_bd10_neon(x[4], x[5], vget_high_s32(c2), &s[4],
+ &s[5]);
+ iadst_butterfly_lane_1_0_bd10_neon(x[7], x[6], vget_high_s32(c2), &s[7],
+ &s[6]);
+
+ x[0] = vaddq_s32(t[0], t[2]);
+ x[1] = vaddq_s32(t[1], t[3]);
+ x[2] = vsubq_s32(t[0], t[2]);
+ x[3] = vsubq_s32(t[1], t[3]);
+ x[4] = add_dct_const_round_shift_low_8_bd10(s[4], s[6]);
+ x[5] = add_dct_const_round_shift_low_8_bd10(s[5], s[7]);
+ x[6] = sub_dct_const_round_shift_low_8_bd10(s[4], s[6]);
+ x[7] = sub_dct_const_round_shift_low_8_bd10(s[5], s[7]);
+
+ // stage 3
+ iadst_half_butterfly_bd10_neon(x + 2, vget_low_s32(c2));
+ iadst_half_butterfly_bd10_neon(x + 6, vget_low_s32(c2));
+
+ *io0 = x[0];
+ *io1 = vnegq_s32(x[4]);
+ *io2 = x[6];
+ *io3 = vnegq_s32(x[2]);
+ *io4 = x[3];
+ *io5 = vnegq_s32(x[7]);
+ *io6 = x[5];
+ *io7 = vnegq_s32(x[1]);
+}
+
+static INLINE void iadst8_bd12(int32x4_t *const io0, int32x4_t *const io1,
+ int32x4_t *const io2, int32x4_t *const io3,
+ int32x4_t *const io4, int32x4_t *const io5,
+ int32x4_t *const io6, int32x4_t *const io7) {
+ const int32x4_t c0 =
+ create_s32x4_neon(cospi_2_64, cospi_30_64, cospi_10_64, cospi_22_64);
+ const int32x4_t c1 =
+ create_s32x4_neon(cospi_18_64, cospi_14_64, cospi_26_64, cospi_6_64);
+ const int32x4_t c2 =
+ create_s32x4_neon(cospi_16_64, 0, cospi_8_64, cospi_24_64);
+ int32x4_t x[8], t[4];
+ int64x2_t s[8][2];
+
+ x[0] = *io7;
+ x[1] = *io0;
+ x[2] = *io5;
+ x[3] = *io2;
+ x[4] = *io3;
+ x[5] = *io4;
+ x[6] = *io1;
+ x[7] = *io6;
+
+ // stage 1
+ iadst_butterfly_lane_0_1_bd12_neon(x[0], x[1], vget_low_s32(c0), s[0], s[1]);
+ iadst_butterfly_lane_0_1_bd12_neon(x[2], x[3], vget_high_s32(c0), s[2], s[3]);
+ iadst_butterfly_lane_0_1_bd12_neon(x[4], x[5], vget_low_s32(c1), s[4], s[5]);
+ iadst_butterfly_lane_0_1_bd12_neon(x[6], x[7], vget_high_s32(c1), s[6], s[7]);
+
+ x[0] = add_dct_const_round_shift_low_8_bd12(s[0], s[4]);
+ x[1] = add_dct_const_round_shift_low_8_bd12(s[1], s[5]);
+ x[2] = add_dct_const_round_shift_low_8_bd12(s[2], s[6]);
+ x[3] = add_dct_const_round_shift_low_8_bd12(s[3], s[7]);
+ x[4] = sub_dct_const_round_shift_low_8_bd12(s[0], s[4]);
+ x[5] = sub_dct_const_round_shift_low_8_bd12(s[1], s[5]);
+ x[6] = sub_dct_const_round_shift_low_8_bd12(s[2], s[6]);
+ x[7] = sub_dct_const_round_shift_low_8_bd12(s[3], s[7]);
+
+ // stage 2
+ t[0] = x[0];
+ t[1] = x[1];
+ t[2] = x[2];
+ t[3] = x[3];
+ iadst_butterfly_lane_0_1_bd12_neon(x[4], x[5], vget_high_s32(c2), s[4], s[5]);
+ iadst_butterfly_lane_1_0_bd12_neon(x[7], x[6], vget_high_s32(c2), s[7], s[6]);
+
+ x[0] = vaddq_s32(t[0], t[2]);
+ x[1] = vaddq_s32(t[1], t[3]);
+ x[2] = vsubq_s32(t[0], t[2]);
+ x[3] = vsubq_s32(t[1], t[3]);
+ x[4] = add_dct_const_round_shift_low_8_bd12(s[4], s[6]);
+ x[5] = add_dct_const_round_shift_low_8_bd12(s[5], s[7]);
+ x[6] = sub_dct_const_round_shift_low_8_bd12(s[4], s[6]);
+ x[7] = sub_dct_const_round_shift_low_8_bd12(s[5], s[7]);
+
+ // stage 3
+ iadst_half_butterfly_bd12_neon(x + 2, vget_low_s32(c2));
+ iadst_half_butterfly_bd12_neon(x + 6, vget_low_s32(c2));
+
+ *io0 = x[0];
+ *io1 = vnegq_s32(x[4]);
+ *io2 = x[6];
+ *io3 = vnegq_s32(x[2]);
+ *io4 = x[3];
+ *io5 = vnegq_s32(x[7]);
+ *io6 = x[5];
+ *io7 = vnegq_s32(x[1]);
+}
+
+void vp9_highbd_iht8x8_64_add_neon(const tran_low_t *input, uint16_t *dest,
+ int stride, int tx_type, int bd) {
+ int32x4_t a[16];
+ int16x8_t c[8];
+
+ a[0] = vld1q_s32(input);
+ a[1] = vld1q_s32(input + 4);
+ a[2] = vld1q_s32(input + 8);
+ a[3] = vld1q_s32(input + 12);
+ a[4] = vld1q_s32(input + 16);
+ a[5] = vld1q_s32(input + 20);
+ a[6] = vld1q_s32(input + 24);
+ a[7] = vld1q_s32(input + 28);
+ a[8] = vld1q_s32(input + 32);
+ a[9] = vld1q_s32(input + 36);
+ a[10] = vld1q_s32(input + 40);
+ a[11] = vld1q_s32(input + 44);
+ a[12] = vld1q_s32(input + 48);
+ a[13] = vld1q_s32(input + 52);
+ a[14] = vld1q_s32(input + 56);
+ a[15] = vld1q_s32(input + 60);
+
+ if (bd == 8) {
+ c[0] = vcombine_s16(vmovn_s32(a[0]), vmovn_s32(a[1]));
+ c[1] = vcombine_s16(vmovn_s32(a[2]), vmovn_s32(a[3]));
+ c[2] = vcombine_s16(vmovn_s32(a[4]), vmovn_s32(a[5]));
+ c[3] = vcombine_s16(vmovn_s32(a[6]), vmovn_s32(a[7]));
+ c[4] = vcombine_s16(vmovn_s32(a[8]), vmovn_s32(a[9]));
+ c[5] = vcombine_s16(vmovn_s32(a[10]), vmovn_s32(a[11]));
+ c[6] = vcombine_s16(vmovn_s32(a[12]), vmovn_s32(a[13]));
+ c[7] = vcombine_s16(vmovn_s32(a[14]), vmovn_s32(a[15]));
+
+ switch (tx_type) {
+ case DCT_DCT: {
+ const int16x8_t cospis = vld1q_s16(kCospi);
+ const int16x4_t cospis0 = vget_low_s16(cospis); // cospi 0, 8, 16, 24
+ const int16x4_t cospis1 = vget_high_s16(cospis); // cospi 4, 12, 20, 28
+
+ idct8x8_64_1d_bd8(cospis0, cospis1, c);
+ idct8x8_64_1d_bd8(cospis0, cospis1, c);
+ break;
+ }
+
+ case ADST_DCT: {
+ const int16x8_t cospis = vld1q_s16(kCospi);
+ const int16x4_t cospis0 = vget_low_s16(cospis); // cospi 0, 8, 16, 24
+ const int16x4_t cospis1 = vget_high_s16(cospis); // cospi 4, 12, 20, 28
+
+ idct8x8_64_1d_bd8(cospis0, cospis1, c);
+ transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6],
+ &c[7]);
+ iadst8(c);
+ break;
+ }
+
+ case DCT_ADST: {
+ const int16x8_t cospis = vld1q_s16(kCospi);
+ const int16x4_t cospis0 = vget_low_s16(cospis); // cospi 0, 8, 16, 24
+ const int16x4_t cospis1 = vget_high_s16(cospis); // cospi 4, 12, 20, 28
+
+ transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6],
+ &c[7]);
+ iadst8(c);
+ idct8x8_64_1d_bd8(cospis0, cospis1, c);
+ break;
+ }
+
+ default: {
+ transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6],
+ &c[7]);
+ iadst8(c);
+ transpose_s16_8x8(&c[0], &c[1], &c[2], &c[3], &c[4], &c[5], &c[6],
+ &c[7]);
+ iadst8(c);
+ break;
+ }
+ }
+
+ c[0] = vrshrq_n_s16(c[0], 5);
+ c[1] = vrshrq_n_s16(c[1], 5);
+ c[2] = vrshrq_n_s16(c[2], 5);
+ c[3] = vrshrq_n_s16(c[3], 5);
+ c[4] = vrshrq_n_s16(c[4], 5);
+ c[5] = vrshrq_n_s16(c[5], 5);
+ c[6] = vrshrq_n_s16(c[6], 5);
+ c[7] = vrshrq_n_s16(c[7], 5);
+ } else {
+ switch (tx_type) {
+ case DCT_DCT: {
+ const int32x4_t cospis0 = vld1q_s32(kCospi32); // cospi 0, 8, 16, 24
+ const int32x4_t cospis1 =
+ vld1q_s32(kCospi32 + 4); // cospi 4, 12, 20, 28
+
+ if (bd == 10) {
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3],
+ &a[4], &a[5], &a[6], &a[7]);
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11],
+ &a[12], &a[13], &a[14], &a[15]);
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9],
+ &a[2], &a[10], &a[3], &a[11]);
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13],
+ &a[6], &a[14], &a[7], &a[15]);
+ } else {
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3],
+ &a[4], &a[5], &a[6], &a[7]);
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11],
+ &a[12], &a[13], &a[14], &a[15]);
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9],
+ &a[2], &a[10], &a[3], &a[11]);
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13],
+ &a[6], &a[14], &a[7], &a[15]);
+ }
+ break;
+ }
+
+ case ADST_DCT: {
+ const int32x4_t cospis0 = vld1q_s32(kCospi32); // cospi 0, 8, 16, 24
+ const int32x4_t cospis1 =
+ vld1q_s32(kCospi32 + 4); // cospi 4, 12, 20, 28
+
+ if (bd == 10) {
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3],
+ &a[4], &a[5], &a[6], &a[7]);
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11],
+ &a[12], &a[13], &a[14], &a[15]);
+ transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3],
+ &a[11]);
+ iadst8_bd10(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]);
+ transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ iadst8_bd10(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ } else {
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[1], &a[2], &a[3],
+ &a[4], &a[5], &a[6], &a[7]);
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[8], &a[9], &a[10], &a[11],
+ &a[12], &a[13], &a[14], &a[15]);
+ transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3],
+ &a[11]);
+ iadst8_bd12(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]);
+ transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ iadst8_bd12(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ }
+ break;
+ }
+
+ case DCT_ADST: {
+ const int32x4_t cospis0 = vld1q_s32(kCospi32); // cospi 0, 8, 16, 24
+ const int32x4_t cospis1 =
+ vld1q_s32(kCospi32 + 4); // cospi 4, 12, 20, 28
+
+ if (bd == 10) {
+ transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6],
+ &a[7]);
+ iadst8_bd10(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
+ transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13],
+ &a[14], &a[15]);
+ iadst8_bd10(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14],
+ &a[15]);
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9],
+ &a[2], &a[10], &a[3], &a[11]);
+ idct8x8_64_half1d_bd10(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13],
+ &a[6], &a[14], &a[7], &a[15]);
+ } else {
+ transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6],
+ &a[7]);
+ iadst8_bd12(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
+ transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13],
+ &a[14], &a[15]);
+ iadst8_bd12(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14],
+ &a[15]);
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[0], &a[8], &a[1], &a[9],
+ &a[2], &a[10], &a[3], &a[11]);
+ idct8x8_64_half1d_bd12(cospis0, cospis1, &a[4], &a[12], &a[5], &a[13],
+ &a[6], &a[14], &a[7], &a[15]);
+ }
+ break;
+ }
+
+ default: {
+ assert(tx_type == ADST_ADST);
+ if (bd == 10) {
+ transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6],
+ &a[7]);
+ iadst8_bd10(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
+ transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13],
+ &a[14], &a[15]);
+ iadst8_bd10(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14],
+ &a[15]);
+ transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3],
+ &a[11]);
+ iadst8_bd10(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]);
+ transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ iadst8_bd10(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ } else {
+ transpose_s32_8x4(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6],
+ &a[7]);
+ iadst8_bd12(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
+ transpose_s32_8x4(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13],
+ &a[14], &a[15]);
+ iadst8_bd12(&a[8], &a[9], &a[10], &a[11], &a[12], &a[13], &a[14],
+ &a[15]);
+ transpose_s32_8x4(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3],
+ &a[11]);
+ iadst8_bd12(&a[0], &a[8], &a[1], &a[9], &a[2], &a[10], &a[3], &a[11]);
+ transpose_s32_8x4(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ iadst8_bd12(&a[4], &a[12], &a[5], &a[13], &a[6], &a[14], &a[7],
+ &a[15]);
+ }
+ break;
+ }
+ }
+
+ c[0] = vcombine_s16(vrshrn_n_s32(a[0], 5), vrshrn_n_s32(a[4], 5));
+ c[1] = vcombine_s16(vrshrn_n_s32(a[8], 5), vrshrn_n_s32(a[12], 5));
+ c[2] = vcombine_s16(vrshrn_n_s32(a[1], 5), vrshrn_n_s32(a[5], 5));
+ c[3] = vcombine_s16(vrshrn_n_s32(a[9], 5), vrshrn_n_s32(a[13], 5));
+ c[4] = vcombine_s16(vrshrn_n_s32(a[2], 5), vrshrn_n_s32(a[6], 5));
+ c[5] = vcombine_s16(vrshrn_n_s32(a[10], 5), vrshrn_n_s32(a[14], 5));
+ c[6] = vcombine_s16(vrshrn_n_s32(a[3], 5), vrshrn_n_s32(a[7], 5));
+ c[7] = vcombine_s16(vrshrn_n_s32(a[11], 5), vrshrn_n_s32(a[15], 5));
+ }
+ highbd_add8x8(c, dest, stride, bd);
+}
--- a/vp9/common/arm/neon/vp9_iht8x8_add_neon.c
+++ b/vp9/common/arm/neon/vp9_iht8x8_add_neon.c
@@ -14,154 +14,10 @@
#include "./vp9_rtcd.h"
#include "./vpx_config.h"
#include "vp9/common/vp9_common.h"
+#include "vp9/common/arm/neon/vp9_iht_neon.h"
#include "vpx_dsp/arm/idct_neon.h"
#include "vpx_dsp/arm/mem_neon.h"
#include "vpx_dsp/arm/transpose_neon.h"
-
-static INLINE void iadst_half_butterfly_neon(int16x8_t *const x,
- const int16x4_t c) {
- const int16x8_t sum = vaddq_s16(x[0], x[1]);
- const int16x8_t sub = vsubq_s16(x[0], x[1]);
- int32x4_t t0[2], t1[2];
-
- t0[0] = vmull_lane_s16(vget_low_s16(sum), c, 0);
- t0[1] = vmull_lane_s16(vget_high_s16(sum), c, 0);
- t1[0] = vmull_lane_s16(vget_low_s16(sub), c, 0);
- t1[1] = vmull_lane_s16(vget_high_s16(sub), c, 0);
- x[0] = dct_const_round_shift_low_8(t0);
- x[1] = dct_const_round_shift_low_8(t1);
-}
-
-static INLINE void iadst_butterfly_lane_0_1_neon(const int16x8_t in0,
- const int16x8_t in1,
- const int16x4_t c,
- int32x4_t *const s0,
- int32x4_t *const s1) {
- s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 0);
- s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 0);
- s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 1);
- s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 1);
-
- s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 1);
- s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 1);
- s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 0);
- s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 0);
-}
-
-static INLINE void iadst_butterfly_lane_2_3_neon(const int16x8_t in0,
- const int16x8_t in1,
- const int16x4_t c,
- int32x4_t *const s0,
- int32x4_t *const s1) {
- s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 2);
- s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 2);
- s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 3);
- s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 3);
-
- s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 3);
- s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 3);
- s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 2);
- s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 2);
-}
-
-static INLINE void iadst_butterfly_lane_3_2_neon(const int16x8_t in0,
- const int16x8_t in1,
- const int16x4_t c,
- int32x4_t *const s0,
- int32x4_t *const s1) {
- s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 3);
- s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 3);
- s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 2);
- s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 2);
-
- s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 2);
- s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 2);
- s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 3);
- s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 3);
-}
-
-static INLINE int16x8_t add_dct_const_round_shift_low_8(
- const int32x4_t *const in0, const int32x4_t *const in1) {
- int32x4_t sum[2];
-
- sum[0] = vaddq_s32(in0[0], in1[0]);
- sum[1] = vaddq_s32(in0[1], in1[1]);
- return dct_const_round_shift_low_8(sum);
-}
-
-static INLINE int16x8_t sub_dct_const_round_shift_low_8(
- const int32x4_t *const in0, const int32x4_t *const in1) {
- int32x4_t sum[2];
-
- sum[0] = vsubq_s32(in0[0], in1[0]);
- sum[1] = vsubq_s32(in0[1], in1[1]);
- return dct_const_round_shift_low_8(sum);
-}
-
-static INLINE void iadst8(int16x8_t *const io) {
- const int16x4_t c0 =
- create_s16x4_neon(cospi_2_64, cospi_30_64, cospi_10_64, cospi_22_64);
- const int16x4_t c1 =
- create_s16x4_neon(cospi_18_64, cospi_14_64, cospi_26_64, cospi_6_64);
- const int16x4_t c2 =
- create_s16x4_neon(cospi_16_64, 0, cospi_8_64, cospi_24_64);
- int16x8_t x[8], t[4];
- int32x4_t s0[2], s1[2], s2[2], s3[2], s4[2], s5[2], s6[2], s7[2];
-
- x[0] = io[7];
- x[1] = io[0];
- x[2] = io[5];
- x[3] = io[2];
- x[4] = io[3];
- x[5] = io[4];
- x[6] = io[1];
- x[7] = io[6];
-
- // stage 1
- iadst_butterfly_lane_0_1_neon(x[0], x[1], c0, s0, s1);
- iadst_butterfly_lane_2_3_neon(x[2], x[3], c0, s2, s3);
- iadst_butterfly_lane_0_1_neon(x[4], x[5], c1, s4, s5);
- iadst_butterfly_lane_2_3_neon(x[6], x[7], c1, s6, s7);
-
- x[0] = add_dct_const_round_shift_low_8(s0, s4);
- x[1] = add_dct_const_round_shift_low_8(s1, s5);
- x[2] = add_dct_const_round_shift_low_8(s2, s6);
- x[3] = add_dct_const_round_shift_low_8(s3, s7);
- x[4] = sub_dct_const_round_shift_low_8(s0, s4);
- x[5] = sub_dct_const_round_shift_low_8(s1, s5);
- x[6] = sub_dct_const_round_shift_low_8(s2, s6);
- x[7] = sub_dct_const_round_shift_low_8(s3, s7);
-
- // stage 2
- t[0] = x[0];
- t[1] = x[1];
- t[2] = x[2];
- t[3] = x[3];
- iadst_butterfly_lane_2_3_neon(x[4], x[5], c2, s4, s5);
- iadst_butterfly_lane_3_2_neon(x[7], x[6], c2, s7, s6);
-
- x[0] = vaddq_s16(t[0], t[2]);
- x[1] = vaddq_s16(t[1], t[3]);
- x[2] = vsubq_s16(t[0], t[2]);
- x[3] = vsubq_s16(t[1], t[3]);
- x[4] = add_dct_const_round_shift_low_8(s4, s6);
- x[5] = add_dct_const_round_shift_low_8(s5, s7);
- x[6] = sub_dct_const_round_shift_low_8(s4, s6);
- x[7] = sub_dct_const_round_shift_low_8(s5, s7);
-
- // stage 3
- iadst_half_butterfly_neon(x + 2, c2);
- iadst_half_butterfly_neon(x + 6, c2);
-
- io[0] = x[0];
- io[1] = vnegq_s16(x[4]);
- io[2] = x[6];
- io[3] = vnegq_s16(x[2]);
- io[4] = x[3];
- io[5] = vnegq_s16(x[7]);
- io[6] = x[5];
- io[7] = vnegq_s16(x[1]);
-}
void vp9_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
--- a/vp9/common/arm/neon/vp9_iht_neon.h
+++ b/vp9/common/arm/neon/vp9_iht_neon.h
@@ -57,4 +57,149 @@
dct_const_round_shift_low_8_dual(output, &io[0], &io[1]);
}
+static INLINE void iadst_half_butterfly_neon(int16x8_t *const x,
+ const int16x4_t c) {
+ const int16x8_t sum = vaddq_s16(x[0], x[1]);
+ const int16x8_t sub = vsubq_s16(x[0], x[1]);
+ int32x4_t t0[2], t1[2];
+
+ t0[0] = vmull_lane_s16(vget_low_s16(sum), c, 0);
+ t0[1] = vmull_lane_s16(vget_high_s16(sum), c, 0);
+ t1[0] = vmull_lane_s16(vget_low_s16(sub), c, 0);
+ t1[1] = vmull_lane_s16(vget_high_s16(sub), c, 0);
+ x[0] = dct_const_round_shift_low_8(t0);
+ x[1] = dct_const_round_shift_low_8(t1);
+}
+
+static INLINE void iadst_butterfly_lane_0_1_neon(const int16x8_t in0,
+ const int16x8_t in1,
+ const int16x4_t c,
+ int32x4_t *const s0,
+ int32x4_t *const s1) {
+ s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 0);
+ s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 0);
+ s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 1);
+ s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 1);
+
+ s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 1);
+ s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 1);
+ s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 0);
+ s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 0);
+}
+
+static INLINE void iadst_butterfly_lane_2_3_neon(const int16x8_t in0,
+ const int16x8_t in1,
+ const int16x4_t c,
+ int32x4_t *const s0,
+ int32x4_t *const s1) {
+ s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 2);
+ s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 2);
+ s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 3);
+ s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 3);
+
+ s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 3);
+ s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 3);
+ s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 2);
+ s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 2);
+}
+
+static INLINE void iadst_butterfly_lane_3_2_neon(const int16x8_t in0,
+ const int16x8_t in1,
+ const int16x4_t c,
+ int32x4_t *const s0,
+ int32x4_t *const s1) {
+ s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 3);
+ s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 3);
+ s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 2);
+ s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 2);
+
+ s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 2);
+ s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 2);
+ s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 3);
+ s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 3);
+}
+
+static INLINE int16x8_t add_dct_const_round_shift_low_8(
+ const int32x4_t *const in0, const int32x4_t *const in1) {
+ int32x4_t sum[2];
+
+ sum[0] = vaddq_s32(in0[0], in1[0]);
+ sum[1] = vaddq_s32(in0[1], in1[1]);
+ return dct_const_round_shift_low_8(sum);
+}
+
+static INLINE int16x8_t sub_dct_const_round_shift_low_8(
+ const int32x4_t *const in0, const int32x4_t *const in1) {
+ int32x4_t sum[2];
+
+ sum[0] = vsubq_s32(in0[0], in1[0]);
+ sum[1] = vsubq_s32(in0[1], in1[1]);
+ return dct_const_round_shift_low_8(sum);
+}
+
+static INLINE void iadst8(int16x8_t *const io) {
+ const int16x4_t c0 =
+ create_s16x4_neon(cospi_2_64, cospi_30_64, cospi_10_64, cospi_22_64);
+ const int16x4_t c1 =
+ create_s16x4_neon(cospi_18_64, cospi_14_64, cospi_26_64, cospi_6_64);
+ const int16x4_t c2 =
+ create_s16x4_neon(cospi_16_64, 0, cospi_8_64, cospi_24_64);
+ int16x8_t x[8], t[4];
+ int32x4_t s0[2], s1[2], s2[2], s3[2], s4[2], s5[2], s6[2], s7[2];
+
+ x[0] = io[7];
+ x[1] = io[0];
+ x[2] = io[5];
+ x[3] = io[2];
+ x[4] = io[3];
+ x[5] = io[4];
+ x[6] = io[1];
+ x[7] = io[6];
+
+ // stage 1
+ iadst_butterfly_lane_0_1_neon(x[0], x[1], c0, s0, s1);
+ iadst_butterfly_lane_2_3_neon(x[2], x[3], c0, s2, s3);
+ iadst_butterfly_lane_0_1_neon(x[4], x[5], c1, s4, s5);
+ iadst_butterfly_lane_2_3_neon(x[6], x[7], c1, s6, s7);
+
+ x[0] = add_dct_const_round_shift_low_8(s0, s4);
+ x[1] = add_dct_const_round_shift_low_8(s1, s5);
+ x[2] = add_dct_const_round_shift_low_8(s2, s6);
+ x[3] = add_dct_const_round_shift_low_8(s3, s7);
+ x[4] = sub_dct_const_round_shift_low_8(s0, s4);
+ x[5] = sub_dct_const_round_shift_low_8(s1, s5);
+ x[6] = sub_dct_const_round_shift_low_8(s2, s6);
+ x[7] = sub_dct_const_round_shift_low_8(s3, s7);
+
+ // stage 2
+ t[0] = x[0];
+ t[1] = x[1];
+ t[2] = x[2];
+ t[3] = x[3];
+ iadst_butterfly_lane_2_3_neon(x[4], x[5], c2, s4, s5);
+ iadst_butterfly_lane_3_2_neon(x[7], x[6], c2, s7, s6);
+
+ x[0] = vaddq_s16(t[0], t[2]);
+ x[1] = vaddq_s16(t[1], t[3]);
+ x[2] = vsubq_s16(t[0], t[2]);
+ x[3] = vsubq_s16(t[1], t[3]);
+ x[4] = add_dct_const_round_shift_low_8(s4, s6);
+ x[5] = add_dct_const_round_shift_low_8(s5, s7);
+ x[6] = sub_dct_const_round_shift_low_8(s4, s6);
+ x[7] = sub_dct_const_round_shift_low_8(s5, s7);
+
+ // stage 3
+ iadst_half_butterfly_neon(x + 2, c2);
+ iadst_half_butterfly_neon(x + 6, c2);
+
+ io[0] = x[0];
+ io[1] = vnegq_s16(x[4]);
+ io[2] = x[6];
+ io[3] = vnegq_s16(x[2]);
+ io[4] = x[3];
+ io[5] = vnegq_s16(x[7]);
+ io[6] = x[5];
+ io[7] = vnegq_s16(x[1]);
+}
+
#endif // VP9_COMMON_ARM_NEON_VP9_IHT_NEON_H_
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -104,7 +104,7 @@
if (vpx_config("CONFIG_EMULATE_HARDWARE") ne "yes") {
specialize qw/vp9_highbd_iht4x4_16_add neon sse4_1/;
- specialize qw/vp9_highbd_iht8x8_64_add sse4_1/;
+ specialize qw/vp9_highbd_iht8x8_64_add neon sse4_1/;
specialize qw/vp9_highbd_iht16x16_256_add sse4_1/;
}
}
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -83,6 +83,7 @@
VP9_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/vp9_itrans16_dspr2.c
else
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_highbd_iht4x4_add_neon.c
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_highbd_iht8x8_add_neon.c
VP9_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp9_highbd_iht4x4_add_sse4.c
VP9_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp9_highbd_iht8x8_add_sse4.c
VP9_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/vp9_highbd_iht16x16_add_sse4.c
--- a/vpx_dsp/arm/highbd_idct8x8_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct8x8_add_neon.c
@@ -11,6 +11,7 @@
#include <arm_neon.h>
#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/arm/highbd_idct_neon.h"
#include "vpx_dsp/arm/idct_neon.h"
#include "vpx_dsp/arm/transpose_neon.h"
#include "vpx_dsp/inv_txfm.h"
@@ -222,73 +223,6 @@
*io7 = vsubq_s32(step1[0], step2[7]);
}
-static INLINE void highbd_add8x8(int16x8_t *const a, uint16_t *dest,
- const int stride, const int bd) {
- const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
- const uint16_t *dst = dest;
- uint16x8_t d0, d1, d2, d3, d4, d5, d6, d7;
- uint16x8_t d0_u16, d1_u16, d2_u16, d3_u16, d4_u16, d5_u16, d6_u16, d7_u16;
- int16x8_t d0_s16, d1_s16, d2_s16, d3_s16, d4_s16, d5_s16, d6_s16, d7_s16;
-
- d0 = vld1q_u16(dst);
- dst += stride;
- d1 = vld1q_u16(dst);
- dst += stride;
- d2 = vld1q_u16(dst);
- dst += stride;
- d3 = vld1q_u16(dst);
- dst += stride;
- d4 = vld1q_u16(dst);
- dst += stride;
- d5 = vld1q_u16(dst);
- dst += stride;
- d6 = vld1q_u16(dst);
- dst += stride;
- d7 = vld1q_u16(dst);
-
- d0_s16 = vqaddq_s16(a[0], vreinterpretq_s16_u16(d0));
- d1_s16 = vqaddq_s16(a[1], vreinterpretq_s16_u16(d1));
- d2_s16 = vqaddq_s16(a[2], vreinterpretq_s16_u16(d2));
- d3_s16 = vqaddq_s16(a[3], vreinterpretq_s16_u16(d3));
- d4_s16 = vqaddq_s16(a[4], vreinterpretq_s16_u16(d4));
- d5_s16 = vqaddq_s16(a[5], vreinterpretq_s16_u16(d5));
- d6_s16 = vqaddq_s16(a[6], vreinterpretq_s16_u16(d6));
- d7_s16 = vqaddq_s16(a[7], vreinterpretq_s16_u16(d7));
-
- d0_s16 = vminq_s16(d0_s16, max);
- d1_s16 = vminq_s16(d1_s16, max);
- d2_s16 = vminq_s16(d2_s16, max);
- d3_s16 = vminq_s16(d3_s16, max);
- d4_s16 = vminq_s16(d4_s16, max);
- d5_s16 = vminq_s16(d5_s16, max);
- d6_s16 = vminq_s16(d6_s16, max);
- d7_s16 = vminq_s16(d7_s16, max);
- d0_u16 = vqshluq_n_s16(d0_s16, 0);
- d1_u16 = vqshluq_n_s16(d1_s16, 0);
- d2_u16 = vqshluq_n_s16(d2_s16, 0);
- d3_u16 = vqshluq_n_s16(d3_s16, 0);
- d4_u16 = vqshluq_n_s16(d4_s16, 0);
- d5_u16 = vqshluq_n_s16(d5_s16, 0);
- d6_u16 = vqshluq_n_s16(d6_s16, 0);
- d7_u16 = vqshluq_n_s16(d7_s16, 0);
-
- vst1q_u16(dest, d0_u16);
- dest += stride;
- vst1q_u16(dest, d1_u16);
- dest += stride;
- vst1q_u16(dest, d2_u16);
- dest += stride;
- vst1q_u16(dest, d3_u16);
- dest += stride;
- vst1q_u16(dest, d4_u16);
- dest += stride;
- vst1q_u16(dest, d5_u16);
- dest += stride;
- vst1q_u16(dest, d6_u16);
- dest += stride;
- vst1q_u16(dest, d7_u16);
-}
-
void vpx_highbd_idct8x8_12_add_neon(const tran_low_t *input, uint16_t *dest,
int stride, int bd) {
int32x4_t a[16];
@@ -351,202 +285,6 @@
c[7] = vcombine_s16(vrshrn_n_s32(a[11], 5), vrshrn_n_s32(a[15], 5));
}
highbd_add8x8(c, dest, stride, bd);
-}
-
-static INLINE void idct8x8_64_half1d_bd10(
- const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
- int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
- int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
- int32x4_t *const io7) {
- int32x4_t step1[8], step2[8];
-
- transpose_s32_8x4(io0, io1, io2, io3, io4, io5, io6, io7);
-
- // stage 1
- step1[4] = vmulq_lane_s32(*io1, vget_high_s32(cospis1), 1);
- step1[5] = vmulq_lane_s32(*io3, vget_high_s32(cospis1), 0);
- step1[6] = vmulq_lane_s32(*io3, vget_low_s32(cospis1), 1);
- step1[7] = vmulq_lane_s32(*io1, vget_low_s32(cospis1), 0);
-
- step1[4] = vmlsq_lane_s32(step1[4], *io7, vget_low_s32(cospis1), 0);
- step1[5] = vmlaq_lane_s32(step1[5], *io5, vget_low_s32(cospis1), 1);
- step1[6] = vmlsq_lane_s32(step1[6], *io5, vget_high_s32(cospis1), 0);
- step1[7] = vmlaq_lane_s32(step1[7], *io7, vget_high_s32(cospis1), 1);
-
- step1[4] = vrshrq_n_s32(step1[4], DCT_CONST_BITS);
- step1[5] = vrshrq_n_s32(step1[5], DCT_CONST_BITS);
- step1[6] = vrshrq_n_s32(step1[6], DCT_CONST_BITS);
- step1[7] = vrshrq_n_s32(step1[7], DCT_CONST_BITS);
-
- // stage 2
- step2[1] = vmulq_lane_s32(*io0, vget_high_s32(cospis0), 0);
- step2[2] = vmulq_lane_s32(*io2, vget_high_s32(cospis0), 1);
- step2[3] = vmulq_lane_s32(*io2, vget_low_s32(cospis0), 1);
-
- step2[0] = vmlaq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0);
- step2[1] = vmlsq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0);
- step2[2] = vmlsq_lane_s32(step2[2], *io6, vget_low_s32(cospis0), 1);
- step2[3] = vmlaq_lane_s32(step2[3], *io6, vget_high_s32(cospis0), 1);
-
- step2[0] = vrshrq_n_s32(step2[0], DCT_CONST_BITS);
- step2[1] = vrshrq_n_s32(step2[1], DCT_CONST_BITS);
- step2[2] = vrshrq_n_s32(step2[2], DCT_CONST_BITS);
- step2[3] = vrshrq_n_s32(step2[3], DCT_CONST_BITS);
-
- step2[4] = vaddq_s32(step1[4], step1[5]);
- step2[5] = vsubq_s32(step1[4], step1[5]);
- step2[6] = vsubq_s32(step1[7], step1[6]);
- step2[7] = vaddq_s32(step1[7], step1[6]);
-
- // stage 3
- step1[0] = vaddq_s32(step2[0], step2[3]);
- step1[1] = vaddq_s32(step2[1], step2[2]);
- step1[2] = vsubq_s32(step2[1], step2[2]);
- step1[3] = vsubq_s32(step2[0], step2[3]);
-
- step1[6] = vmulq_lane_s32(step2[6], vget_high_s32(cospis0), 0);
- step1[5] = vmlsq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
- step1[6] = vmlaq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
- step1[5] = vrshrq_n_s32(step1[5], DCT_CONST_BITS);
- step1[6] = vrshrq_n_s32(step1[6], DCT_CONST_BITS);
-
- // stage 4
- *io0 = vaddq_s32(step1[0], step2[7]);
- *io1 = vaddq_s32(step1[1], step1[6]);
- *io2 = vaddq_s32(step1[2], step1[5]);
- *io3 = vaddq_s32(step1[3], step2[4]);
- *io4 = vsubq_s32(step1[3], step2[4]);
- *io5 = vsubq_s32(step1[2], step1[5]);
- *io6 = vsubq_s32(step1[1], step1[6]);
- *io7 = vsubq_s32(step1[0], step2[7]);
-}
-
-static INLINE void idct8x8_64_half1d_bd12(
- const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
- int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
- int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
- int32x4_t *const io7) {
- int32x2_t input1l, input1h, input3l, input3h, input5l, input5h, input7l,
- input7h;
- int32x2_t step1l[4], step1h[4];
- int32x4_t step1[8], step2[8];
- int64x2_t t64[8];
- int32x2_t t32[8];
-
- transpose_s32_8x4(io0, io1, io2, io3, io4, io5, io6, io7);
-
- // stage 1
- input1l = vget_low_s32(*io1);
- input1h = vget_high_s32(*io1);
- input3l = vget_low_s32(*io3);
- input3h = vget_high_s32(*io3);
- input5l = vget_low_s32(*io5);
- input5h = vget_high_s32(*io5);
- input7l = vget_low_s32(*io7);
- input7h = vget_high_s32(*io7);
- step1l[0] = vget_low_s32(*io0);
- step1h[0] = vget_high_s32(*io0);
- step1l[1] = vget_low_s32(*io2);
- step1h[1] = vget_high_s32(*io2);
- step1l[2] = vget_low_s32(*io4);
- step1h[2] = vget_high_s32(*io4);
- step1l[3] = vget_low_s32(*io6);
- step1h[3] = vget_high_s32(*io6);
-
- t64[0] = vmull_lane_s32(input1l, vget_high_s32(cospis1), 1);
- t64[1] = vmull_lane_s32(input1h, vget_high_s32(cospis1), 1);
- t64[2] = vmull_lane_s32(input3l, vget_high_s32(cospis1), 0);
- t64[3] = vmull_lane_s32(input3h, vget_high_s32(cospis1), 0);
- t64[4] = vmull_lane_s32(input3l, vget_low_s32(cospis1), 1);
- t64[5] = vmull_lane_s32(input3h, vget_low_s32(cospis1), 1);
- t64[6] = vmull_lane_s32(input1l, vget_low_s32(cospis1), 0);
- t64[7] = vmull_lane_s32(input1h, vget_low_s32(cospis1), 0);
- t64[0] = vmlsl_lane_s32(t64[0], input7l, vget_low_s32(cospis1), 0);
- t64[1] = vmlsl_lane_s32(t64[1], input7h, vget_low_s32(cospis1), 0);
- t64[2] = vmlal_lane_s32(t64[2], input5l, vget_low_s32(cospis1), 1);
- t64[3] = vmlal_lane_s32(t64[3], input5h, vget_low_s32(cospis1), 1);
- t64[4] = vmlsl_lane_s32(t64[4], input5l, vget_high_s32(cospis1), 0);
- t64[5] = vmlsl_lane_s32(t64[5], input5h, vget_high_s32(cospis1), 0);
- t64[6] = vmlal_lane_s32(t64[6], input7l, vget_high_s32(cospis1), 1);
- t64[7] = vmlal_lane_s32(t64[7], input7h, vget_high_s32(cospis1), 1);
- t32[0] = vrshrn_n_s64(t64[0], DCT_CONST_BITS);
- t32[1] = vrshrn_n_s64(t64[1], DCT_CONST_BITS);
- t32[2] = vrshrn_n_s64(t64[2], DCT_CONST_BITS);
- t32[3] = vrshrn_n_s64(t64[3], DCT_CONST_BITS);
- t32[4] = vrshrn_n_s64(t64[4], DCT_CONST_BITS);
- t32[5] = vrshrn_n_s64(t64[5], DCT_CONST_BITS);
- t32[6] = vrshrn_n_s64(t64[6], DCT_CONST_BITS);
- t32[7] = vrshrn_n_s64(t64[7], DCT_CONST_BITS);
- step1[4] = vcombine_s32(t32[0], t32[1]);
- step1[5] = vcombine_s32(t32[2], t32[3]);
- step1[6] = vcombine_s32(t32[4], t32[5]);
- step1[7] = vcombine_s32(t32[6], t32[7]);
-
- // stage 2
- t64[2] = vmull_lane_s32(step1l[0], vget_high_s32(cospis0), 0);
- t64[3] = vmull_lane_s32(step1h[0], vget_high_s32(cospis0), 0);
- t64[4] = vmull_lane_s32(step1l[1], vget_high_s32(cospis0), 1);
- t64[5] = vmull_lane_s32(step1h[1], vget_high_s32(cospis0), 1);
- t64[6] = vmull_lane_s32(step1l[1], vget_low_s32(cospis0), 1);
- t64[7] = vmull_lane_s32(step1h[1], vget_low_s32(cospis0), 1);
- t64[0] = vmlal_lane_s32(t64[2], step1l[2], vget_high_s32(cospis0), 0);
- t64[1] = vmlal_lane_s32(t64[3], step1h[2], vget_high_s32(cospis0), 0);
- t64[2] = vmlsl_lane_s32(t64[2], step1l[2], vget_high_s32(cospis0), 0);
- t64[3] = vmlsl_lane_s32(t64[3], step1h[2], vget_high_s32(cospis0), 0);
- t64[4] = vmlsl_lane_s32(t64[4], step1l[3], vget_low_s32(cospis0), 1);
- t64[5] = vmlsl_lane_s32(t64[5], step1h[3], vget_low_s32(cospis0), 1);
- t64[6] = vmlal_lane_s32(t64[6], step1l[3], vget_high_s32(cospis0), 1);
- t64[7] = vmlal_lane_s32(t64[7], step1h[3], vget_high_s32(cospis0), 1);
- t32[0] = vrshrn_n_s64(t64[0], DCT_CONST_BITS);
- t32[1] = vrshrn_n_s64(t64[1], DCT_CONST_BITS);
- t32[2] = vrshrn_n_s64(t64[2], DCT_CONST_BITS);
- t32[3] = vrshrn_n_s64(t64[3], DCT_CONST_BITS);
- t32[4] = vrshrn_n_s64(t64[4], DCT_CONST_BITS);
- t32[5] = vrshrn_n_s64(t64[5], DCT_CONST_BITS);
- t32[6] = vrshrn_n_s64(t64[6], DCT_CONST_BITS);
- t32[7] = vrshrn_n_s64(t64[7], DCT_CONST_BITS);
- step2[0] = vcombine_s32(t32[0], t32[1]);
- step2[1] = vcombine_s32(t32[2], t32[3]);
- step2[2] = vcombine_s32(t32[4], t32[5]);
- step2[3] = vcombine_s32(t32[6], t32[7]);
-
- step2[4] = vaddq_s32(step1[4], step1[5]);
- step2[5] = vsubq_s32(step1[4], step1[5]);
- step2[6] = vsubq_s32(step1[7], step1[6]);
- step2[7] = vaddq_s32(step1[7], step1[6]);
-
- // stage 3
- step1[0] = vaddq_s32(step2[0], step2[3]);
- step1[1] = vaddq_s32(step2[1], step2[2]);
- step1[2] = vsubq_s32(step2[1], step2[2]);
- step1[3] = vsubq_s32(step2[0], step2[3]);
-
- t64[2] = vmull_lane_s32(vget_low_s32(step2[6]), vget_high_s32(cospis0), 0);
- t64[3] = vmull_lane_s32(vget_high_s32(step2[6]), vget_high_s32(cospis0), 0);
- t64[0] =
- vmlsl_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
- t64[1] = vmlsl_lane_s32(t64[3], vget_high_s32(step2[5]),
- vget_high_s32(cospis0), 0);
- t64[2] =
- vmlal_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
- t64[3] = vmlal_lane_s32(t64[3], vget_high_s32(step2[5]),
- vget_high_s32(cospis0), 0);
- t32[0] = vrshrn_n_s64(t64[0], DCT_CONST_BITS);
- t32[1] = vrshrn_n_s64(t64[1], DCT_CONST_BITS);
- t32[2] = vrshrn_n_s64(t64[2], DCT_CONST_BITS);
- t32[3] = vrshrn_n_s64(t64[3], DCT_CONST_BITS);
- step1[5] = vcombine_s32(t32[0], t32[1]);
- step1[6] = vcombine_s32(t32[2], t32[3]);
-
- // stage 4
- *io0 = vaddq_s32(step1[0], step2[7]);
- *io1 = vaddq_s32(step1[1], step1[6]);
- *io2 = vaddq_s32(step1[2], step1[5]);
- *io3 = vaddq_s32(step1[3], step2[4]);
- *io4 = vsubq_s32(step1[3], step2[4]);
- *io5 = vsubq_s32(step1[2], step1[5]);
- *io6 = vsubq_s32(step1[1], step1[6]);
- *io7 = vsubq_s32(step1[0], step2[7]);
}
void vpx_highbd_idct8x8_64_add_neon(const tran_low_t *input, uint16_t *dest,
--- a/vpx_dsp/arm/highbd_idct_neon.h
+++ b/vpx_dsp/arm/highbd_idct_neon.h
@@ -96,4 +96,267 @@
a[3] = vsubq_s32(b0, b3);
}
+static INLINE void highbd_add8x8(int16x8_t *const a, uint16_t *dest,
+ const int stride, const int bd) {
+ const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
+ const uint16_t *dst = dest;
+ uint16x8_t d0, d1, d2, d3, d4, d5, d6, d7;
+ uint16x8_t d0_u16, d1_u16, d2_u16, d3_u16, d4_u16, d5_u16, d6_u16, d7_u16;
+ int16x8_t d0_s16, d1_s16, d2_s16, d3_s16, d4_s16, d5_s16, d6_s16, d7_s16;
+
+ d0 = vld1q_u16(dst);
+ dst += stride;
+ d1 = vld1q_u16(dst);
+ dst += stride;
+ d2 = vld1q_u16(dst);
+ dst += stride;
+ d3 = vld1q_u16(dst);
+ dst += stride;
+ d4 = vld1q_u16(dst);
+ dst += stride;
+ d5 = vld1q_u16(dst);
+ dst += stride;
+ d6 = vld1q_u16(dst);
+ dst += stride;
+ d7 = vld1q_u16(dst);
+
+ d0_s16 = vqaddq_s16(a[0], vreinterpretq_s16_u16(d0));
+ d1_s16 = vqaddq_s16(a[1], vreinterpretq_s16_u16(d1));
+ d2_s16 = vqaddq_s16(a[2], vreinterpretq_s16_u16(d2));
+ d3_s16 = vqaddq_s16(a[3], vreinterpretq_s16_u16(d3));
+ d4_s16 = vqaddq_s16(a[4], vreinterpretq_s16_u16(d4));
+ d5_s16 = vqaddq_s16(a[5], vreinterpretq_s16_u16(d5));
+ d6_s16 = vqaddq_s16(a[6], vreinterpretq_s16_u16(d6));
+ d7_s16 = vqaddq_s16(a[7], vreinterpretq_s16_u16(d7));
+
+ d0_s16 = vminq_s16(d0_s16, max);
+ d1_s16 = vminq_s16(d1_s16, max);
+ d2_s16 = vminq_s16(d2_s16, max);
+ d3_s16 = vminq_s16(d3_s16, max);
+ d4_s16 = vminq_s16(d4_s16, max);
+ d5_s16 = vminq_s16(d5_s16, max);
+ d6_s16 = vminq_s16(d6_s16, max);
+ d7_s16 = vminq_s16(d7_s16, max);
+ d0_u16 = vqshluq_n_s16(d0_s16, 0);
+ d1_u16 = vqshluq_n_s16(d1_s16, 0);
+ d2_u16 = vqshluq_n_s16(d2_s16, 0);
+ d3_u16 = vqshluq_n_s16(d3_s16, 0);
+ d4_u16 = vqshluq_n_s16(d4_s16, 0);
+ d5_u16 = vqshluq_n_s16(d5_s16, 0);
+ d6_u16 = vqshluq_n_s16(d6_s16, 0);
+ d7_u16 = vqshluq_n_s16(d7_s16, 0);
+
+ vst1q_u16(dest, d0_u16);
+ dest += stride;
+ vst1q_u16(dest, d1_u16);
+ dest += stride;
+ vst1q_u16(dest, d2_u16);
+ dest += stride;
+ vst1q_u16(dest, d3_u16);
+ dest += stride;
+ vst1q_u16(dest, d4_u16);
+ dest += stride;
+ vst1q_u16(dest, d5_u16);
+ dest += stride;
+ vst1q_u16(dest, d6_u16);
+ dest += stride;
+ vst1q_u16(dest, d7_u16);
+}
+
+static INLINE void idct8x8_64_half1d_bd10(
+ const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
+ int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
+ int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
+ int32x4_t *const io7) {
+ int32x4_t step1[8], step2[8];
+
+ transpose_s32_8x4(io0, io1, io2, io3, io4, io5, io6, io7);
+
+ // stage 1
+ step1[4] = vmulq_lane_s32(*io1, vget_high_s32(cospis1), 1);
+ step1[5] = vmulq_lane_s32(*io3, vget_high_s32(cospis1), 0);
+ step1[6] = vmulq_lane_s32(*io3, vget_low_s32(cospis1), 1);
+ step1[7] = vmulq_lane_s32(*io1, vget_low_s32(cospis1), 0);
+
+ step1[4] = vmlsq_lane_s32(step1[4], *io7, vget_low_s32(cospis1), 0);
+ step1[5] = vmlaq_lane_s32(step1[5], *io5, vget_low_s32(cospis1), 1);
+ step1[6] = vmlsq_lane_s32(step1[6], *io5, vget_high_s32(cospis1), 0);
+ step1[7] = vmlaq_lane_s32(step1[7], *io7, vget_high_s32(cospis1), 1);
+
+ step1[4] = vrshrq_n_s32(step1[4], DCT_CONST_BITS);
+ step1[5] = vrshrq_n_s32(step1[5], DCT_CONST_BITS);
+ step1[6] = vrshrq_n_s32(step1[6], DCT_CONST_BITS);
+ step1[7] = vrshrq_n_s32(step1[7], DCT_CONST_BITS);
+
+ // stage 2
+ step2[1] = vmulq_lane_s32(*io0, vget_high_s32(cospis0), 0);
+ step2[2] = vmulq_lane_s32(*io2, vget_high_s32(cospis0), 1);
+ step2[3] = vmulq_lane_s32(*io2, vget_low_s32(cospis0), 1);
+
+ step2[0] = vmlaq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0);
+ step2[1] = vmlsq_lane_s32(step2[1], *io4, vget_high_s32(cospis0), 0);
+ step2[2] = vmlsq_lane_s32(step2[2], *io6, vget_low_s32(cospis0), 1);
+ step2[3] = vmlaq_lane_s32(step2[3], *io6, vget_high_s32(cospis0), 1);
+
+ step2[0] = vrshrq_n_s32(step2[0], DCT_CONST_BITS);
+ step2[1] = vrshrq_n_s32(step2[1], DCT_CONST_BITS);
+ step2[2] = vrshrq_n_s32(step2[2], DCT_CONST_BITS);
+ step2[3] = vrshrq_n_s32(step2[3], DCT_CONST_BITS);
+
+ step2[4] = vaddq_s32(step1[4], step1[5]);
+ step2[5] = vsubq_s32(step1[4], step1[5]);
+ step2[6] = vsubq_s32(step1[7], step1[6]);
+ step2[7] = vaddq_s32(step1[7], step1[6]);
+
+ // stage 3
+ step1[0] = vaddq_s32(step2[0], step2[3]);
+ step1[1] = vaddq_s32(step2[1], step2[2]);
+ step1[2] = vsubq_s32(step2[1], step2[2]);
+ step1[3] = vsubq_s32(step2[0], step2[3]);
+
+ step1[6] = vmulq_lane_s32(step2[6], vget_high_s32(cospis0), 0);
+ step1[5] = vmlsq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
+ step1[6] = vmlaq_lane_s32(step1[6], step2[5], vget_high_s32(cospis0), 0);
+ step1[5] = vrshrq_n_s32(step1[5], DCT_CONST_BITS);
+ step1[6] = vrshrq_n_s32(step1[6], DCT_CONST_BITS);
+
+ // stage 4
+ *io0 = vaddq_s32(step1[0], step2[7]);
+ *io1 = vaddq_s32(step1[1], step1[6]);
+ *io2 = vaddq_s32(step1[2], step1[5]);
+ *io3 = vaddq_s32(step1[3], step2[4]);
+ *io4 = vsubq_s32(step1[3], step2[4]);
+ *io5 = vsubq_s32(step1[2], step1[5]);
+ *io6 = vsubq_s32(step1[1], step1[6]);
+ *io7 = vsubq_s32(step1[0], step2[7]);
+}
+
+static INLINE void idct8x8_64_half1d_bd12(
+ const int32x4_t cospis0, const int32x4_t cospis1, int32x4_t *const io0,
+ int32x4_t *const io1, int32x4_t *const io2, int32x4_t *const io3,
+ int32x4_t *const io4, int32x4_t *const io5, int32x4_t *const io6,
+ int32x4_t *const io7) {
+ int32x2_t input1l, input1h, input3l, input3h, input5l, input5h, input7l,
+ input7h;
+ int32x2_t step1l[4], step1h[4];
+ int32x4_t step1[8], step2[8];
+ int64x2_t t64[8];
+ int32x2_t t32[8];
+
+ transpose_s32_8x4(io0, io1, io2, io3, io4, io5, io6, io7);
+
+ // stage 1
+ input1l = vget_low_s32(*io1);
+ input1h = vget_high_s32(*io1);
+ input3l = vget_low_s32(*io3);
+ input3h = vget_high_s32(*io3);
+ input5l = vget_low_s32(*io5);
+ input5h = vget_high_s32(*io5);
+ input7l = vget_low_s32(*io7);
+ input7h = vget_high_s32(*io7);
+ step1l[0] = vget_low_s32(*io0);
+ step1h[0] = vget_high_s32(*io0);
+ step1l[1] = vget_low_s32(*io2);
+ step1h[1] = vget_high_s32(*io2);
+ step1l[2] = vget_low_s32(*io4);
+ step1h[2] = vget_high_s32(*io4);
+ step1l[3] = vget_low_s32(*io6);
+ step1h[3] = vget_high_s32(*io6);
+
+ t64[0] = vmull_lane_s32(input1l, vget_high_s32(cospis1), 1);
+ t64[1] = vmull_lane_s32(input1h, vget_high_s32(cospis1), 1);
+ t64[2] = vmull_lane_s32(input3l, vget_high_s32(cospis1), 0);
+ t64[3] = vmull_lane_s32(input3h, vget_high_s32(cospis1), 0);
+ t64[4] = vmull_lane_s32(input3l, vget_low_s32(cospis1), 1);
+ t64[5] = vmull_lane_s32(input3h, vget_low_s32(cospis1), 1);
+ t64[6] = vmull_lane_s32(input1l, vget_low_s32(cospis1), 0);
+ t64[7] = vmull_lane_s32(input1h, vget_low_s32(cospis1), 0);
+ t64[0] = vmlsl_lane_s32(t64[0], input7l, vget_low_s32(cospis1), 0);
+ t64[1] = vmlsl_lane_s32(t64[1], input7h, vget_low_s32(cospis1), 0);
+ t64[2] = vmlal_lane_s32(t64[2], input5l, vget_low_s32(cospis1), 1);
+ t64[3] = vmlal_lane_s32(t64[3], input5h, vget_low_s32(cospis1), 1);
+ t64[4] = vmlsl_lane_s32(t64[4], input5l, vget_high_s32(cospis1), 0);
+ t64[5] = vmlsl_lane_s32(t64[5], input5h, vget_high_s32(cospis1), 0);
+ t64[6] = vmlal_lane_s32(t64[6], input7l, vget_high_s32(cospis1), 1);
+ t64[7] = vmlal_lane_s32(t64[7], input7h, vget_high_s32(cospis1), 1);
+ t32[0] = vrshrn_n_s64(t64[0], DCT_CONST_BITS);
+ t32[1] = vrshrn_n_s64(t64[1], DCT_CONST_BITS);
+ t32[2] = vrshrn_n_s64(t64[2], DCT_CONST_BITS);
+ t32[3] = vrshrn_n_s64(t64[3], DCT_CONST_BITS);
+ t32[4] = vrshrn_n_s64(t64[4], DCT_CONST_BITS);
+ t32[5] = vrshrn_n_s64(t64[5], DCT_CONST_BITS);
+ t32[6] = vrshrn_n_s64(t64[6], DCT_CONST_BITS);
+ t32[7] = vrshrn_n_s64(t64[7], DCT_CONST_BITS);
+ step1[4] = vcombine_s32(t32[0], t32[1]);
+ step1[5] = vcombine_s32(t32[2], t32[3]);
+ step1[6] = vcombine_s32(t32[4], t32[5]);
+ step1[7] = vcombine_s32(t32[6], t32[7]);
+
+ // stage 2
+ t64[2] = vmull_lane_s32(step1l[0], vget_high_s32(cospis0), 0);
+ t64[3] = vmull_lane_s32(step1h[0], vget_high_s32(cospis0), 0);
+ t64[4] = vmull_lane_s32(step1l[1], vget_high_s32(cospis0), 1);
+ t64[5] = vmull_lane_s32(step1h[1], vget_high_s32(cospis0), 1);
+ t64[6] = vmull_lane_s32(step1l[1], vget_low_s32(cospis0), 1);
+ t64[7] = vmull_lane_s32(step1h[1], vget_low_s32(cospis0), 1);
+ t64[0] = vmlal_lane_s32(t64[2], step1l[2], vget_high_s32(cospis0), 0);
+ t64[1] = vmlal_lane_s32(t64[3], step1h[2], vget_high_s32(cospis0), 0);
+ t64[2] = vmlsl_lane_s32(t64[2], step1l[2], vget_high_s32(cospis0), 0);
+ t64[3] = vmlsl_lane_s32(t64[3], step1h[2], vget_high_s32(cospis0), 0);
+ t64[4] = vmlsl_lane_s32(t64[4], step1l[3], vget_low_s32(cospis0), 1);
+ t64[5] = vmlsl_lane_s32(t64[5], step1h[3], vget_low_s32(cospis0), 1);
+ t64[6] = vmlal_lane_s32(t64[6], step1l[3], vget_high_s32(cospis0), 1);
+ t64[7] = vmlal_lane_s32(t64[7], step1h[3], vget_high_s32(cospis0), 1);
+ t32[0] = vrshrn_n_s64(t64[0], DCT_CONST_BITS);
+ t32[1] = vrshrn_n_s64(t64[1], DCT_CONST_BITS);
+ t32[2] = vrshrn_n_s64(t64[2], DCT_CONST_BITS);
+ t32[3] = vrshrn_n_s64(t64[3], DCT_CONST_BITS);
+ t32[4] = vrshrn_n_s64(t64[4], DCT_CONST_BITS);
+ t32[5] = vrshrn_n_s64(t64[5], DCT_CONST_BITS);
+ t32[6] = vrshrn_n_s64(t64[6], DCT_CONST_BITS);
+ t32[7] = vrshrn_n_s64(t64[7], DCT_CONST_BITS);
+ step2[0] = vcombine_s32(t32[0], t32[1]);
+ step2[1] = vcombine_s32(t32[2], t32[3]);
+ step2[2] = vcombine_s32(t32[4], t32[5]);
+ step2[3] = vcombine_s32(t32[6], t32[7]);
+
+ step2[4] = vaddq_s32(step1[4], step1[5]);
+ step2[5] = vsubq_s32(step1[4], step1[5]);
+ step2[6] = vsubq_s32(step1[7], step1[6]);
+ step2[7] = vaddq_s32(step1[7], step1[6]);
+
+ // stage 3
+ step1[0] = vaddq_s32(step2[0], step2[3]);
+ step1[1] = vaddq_s32(step2[1], step2[2]);
+ step1[2] = vsubq_s32(step2[1], step2[2]);
+ step1[3] = vsubq_s32(step2[0], step2[3]);
+
+ t64[2] = vmull_lane_s32(vget_low_s32(step2[6]), vget_high_s32(cospis0), 0);
+ t64[3] = vmull_lane_s32(vget_high_s32(step2[6]), vget_high_s32(cospis0), 0);
+ t64[0] =
+ vmlsl_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
+ t64[1] = vmlsl_lane_s32(t64[3], vget_high_s32(step2[5]),
+ vget_high_s32(cospis0), 0);
+ t64[2] =
+ vmlal_lane_s32(t64[2], vget_low_s32(step2[5]), vget_high_s32(cospis0), 0);
+ t64[3] = vmlal_lane_s32(t64[3], vget_high_s32(step2[5]),
+ vget_high_s32(cospis0), 0);
+ t32[0] = vrshrn_n_s64(t64[0], DCT_CONST_BITS);
+ t32[1] = vrshrn_n_s64(t64[1], DCT_CONST_BITS);
+ t32[2] = vrshrn_n_s64(t64[2], DCT_CONST_BITS);
+ t32[3] = vrshrn_n_s64(t64[3], DCT_CONST_BITS);
+ step1[5] = vcombine_s32(t32[0], t32[1]);
+ step1[6] = vcombine_s32(t32[2], t32[3]);
+
+ // stage 4
+ *io0 = vaddq_s32(step1[0], step2[7]);
+ *io1 = vaddq_s32(step1[1], step1[6]);
+ *io2 = vaddq_s32(step1[2], step1[5]);
+ *io3 = vaddq_s32(step1[3], step2[4]);
+ *io4 = vsubq_s32(step1[3], step2[4]);
+ *io5 = vsubq_s32(step1[2], step1[5]);
+ *io6 = vsubq_s32(step1[1], step1[6]);
+ *io7 = vsubq_s32(step1[0], step2[7]);
+}
+
#endif // VPX_DSP_ARM_HIGHBD_IDCT_NEON_H_
--- a/vpx_dsp/arm/mem_neon.h
+++ b/vpx_dsp/arm/mem_neon.h
@@ -26,6 +26,15 @@
((int64_t)(uint16_t)c3 << 48));
}
+static INLINE int32x2_t create_s32x2_neon(const int32_t c0, const int32_t c1) {
+ return vcreate_s32((uint32_t)c0 | ((int64_t)(uint32_t)c1 << 32));
+}
+
+static INLINE int32x4_t create_s32x4_neon(const int32_t c0, const int32_t c1,
+ const int32_t c2, const int32_t c3) {
+ return vcombine_s32(create_s32x2_neon(c0, c1), create_s32x2_neon(c2, c3));
+}
+
// Helper functions used to load tran_low_t into int16, narrowing if necessary.
static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
#if CONFIG_VP9_HIGHBITDEPTH