shithub: libvpx

Download patch

ref: a73f0f4170b54ef5f8ab82f4801b39afd43be903
parent: b1fb6e0365fc00eeddfa69dff3087726db230202
parent: a6a4659bea0859dfa663cd8b0e85cb7c7bf3a50e
author: Jingning Han <[email protected]>
date: Tue Jul 28 16:36:59 EDT 2015

Merge "Factor 32x32 fwd DCT to vpx_dsp folder"

--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -12,14 +12,15 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include "third_party/googletest/src/include/gtest/gtest.h"
 #include "test/acm_random.h"
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "test/util.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
 
-#include "./vpx_config.h"
 #include "./vp9_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
 #include "vp9/common/vp9_entropy.h"
 #include "vpx/vpx_codec.h"
 #include "vpx/vpx_integer.h"
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -837,12 +837,6 @@
 
   add_proto qw/void vp9_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_fdct32x32_1 sse2/;
-
-  add_proto qw/void vp9_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp9_fdct32x32 sse2/;
-
-  add_proto qw/void vp9_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp9_fdct32x32_rd sse2/;
 } else {
   add_proto qw/void vp9_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
   specialize qw/vp9_fht4x4 sse2 msa/;
@@ -867,12 +861,6 @@
 
   add_proto qw/void vp9_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_fdct32x32_1 sse2 msa/;
-
-  add_proto qw/void vp9_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp9_fdct32x32 sse2 avx2 msa/;
-
-  add_proto qw/void vp9_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp9_fdct32x32_rd sse2 avx2 msa/;
 }
 
 #
@@ -934,12 +922,6 @@
 
   add_proto qw/void vp9_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_highbd_fdct32x32_1/;
-
-  add_proto qw/void vp9_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp9_highbd_fdct32x32 sse2/;
-
-  add_proto qw/void vp9_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp9_highbd_fdct32x32_rd sse2/;
 
   add_proto qw/void vp9_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
   specialize qw/vp9_highbd_temporal_filter_apply/;
--- a/vp9/encoder/mips/msa/vp9_fdct32x32_msa.c
+++ b/vp9/encoder/mips/msa/vp9_fdct32x32_msa.c
@@ -8,695 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "./vp9_rtcd.h"
 #include "vp9/encoder/mips/msa/vp9_fdct_msa.h"
 
-static void fdct8x32_1d_column_load_butterfly(const int16_t *input,
-                                              int32_t src_stride,
-                                              int16_t *temp_buff) {
-  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 step0, step1, step2, step3;
-  v8i16 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
-  v8i16 step0_1, step1_1, step2_1, step3_1;
-
-  /* 1st and 2nd set */
-  LD_SH4(input, src_stride, in0, in1, in2, in3);
-  LD_SH4(input + (28 * src_stride), src_stride, in4, in5, in6, in7);
-  LD_SH4(input + (4 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1);
-  LD_SH4(input + (24 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1);
-  SLLI_4V(in0, in1, in2, in3, 2);
-  SLLI_4V(in4, in5, in6, in7, 2);
-  SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
-  SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
-  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,
-              step0, step1, step2, step3, in4, in5, in6, in7);
-  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-              step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
-  ST_SH4(step0, step1, step2, step3, temp_buff, 8);
-  ST_SH4(in4, in5, in6, in7, temp_buff + (28 * 8), 8);
-  ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (4 * 8), 8);
-  ST_SH4(in4_1, in5_1, in6_1, in7_1, temp_buff + (24 * 8), 8);
-
-  /* 3rd and 4th set */
-  LD_SH4(input + (8 * src_stride), src_stride, in0, in1, in2, in3);
-  LD_SH4(input + (20 * src_stride), src_stride, in4, in5, in6, in7);
-  LD_SH4(input + (12 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1);
-  LD_SH4(input + (16 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1);
-  SLLI_4V(in0, in1, in2, in3, 2);
-  SLLI_4V(in4, in5, in6, in7, 2);
-  SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
-  SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
-  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,
-              step0, step1, step2, step3, in4, in5, in6, in7);
-  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-              step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
-  ST_SH4(step0, step1, step2, step3, temp_buff + (8 * 8), 8);
-  ST_SH4(in4, in5, in6, in7, temp_buff + (20 * 8), 8);
-  ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (12 * 8), 8);
-  ST_SH4(in4_1, in5_1, in6_1, in7_1, temp_buff + (15 * 8) + 8, 8);
-}
-
-static void fdct8x32_1d_column_even_store(int16_t *input, int16_t *temp) {
-  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v8i16 temp0, temp1;
-
-  /* fdct even */
-  LD_SH4(input, 8, in0, in1, in2, in3);
-  LD_SH4(input + 96, 8, in12, in13, in14, in15);
-  BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15,
-              vec0, vec1, vec2, vec3, in12, in13, in14, in15);
-  LD_SH4(input + 32, 8, in4, in5, in6, in7);
-  LD_SH4(input + 64, 8, in8, in9, in10, in11);
-  BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11,
-              vec4, vec5, vec6, vec7, in8, in9, in10, in11);
-
-  /* Stage 3 */
-  ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
-  BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
-  DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp);
-  ST_SH(temp1, temp + 512);
-
-  DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp + 256);
-  ST_SH(temp1, temp + 768);
-
-  SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4);
-  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
-  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
-  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp + 128);
-  ST_SH(temp1, temp + 896);
-
-  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
-  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp + 640);
-  ST_SH(temp1, temp + 384);
-
-  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
-  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
-  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
-  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
-  ADD2(in0, in1, in2, in3, vec0, vec7);
-  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp + 64);
-  ST_SH(temp1, temp + 960);
-
-  SUB2(in0, in1, in2, in3, in0, in2);
-  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp + 576);
-  ST_SH(temp1, temp + 448);
-
-  SUB2(in9, vec2, in14, vec5, vec2, vec5);
-  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
-  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
-  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp + 320);
-  ST_SH(temp1, temp + 704);
-
-  ADD2(in3, in2, in0, in1, vec3, vec4);
-  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
-  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
-  ST_SH(temp0, temp + 192);
-  ST_SH(temp1, temp + 832);
-}
-
-static void fdct8x32_1d_column_odd_store(int16_t *input, int16_t *temp_ptr) {
-  v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
-  v8i16 in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
-
-  in20 = LD_SH(input + 32);
-  in21 = LD_SH(input + 40);
-  in26 = LD_SH(input + 80);
-  in27 = LD_SH(input + 88);
-
-  DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
-  DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
-
-  in18 = LD_SH(input + 16);
-  in19 = LD_SH(input + 24);
-  in28 = LD_SH(input + 96);
-  in29 = LD_SH(input + 104);
-
-  vec4 = in19 - in20;
-  ST_SH(vec4, input + 32);
-  vec4 = in18 - in21;
-  ST_SH(vec4, input + 40);
-  vec4 = in29 - in26;
-  ST_SH(vec4, input + 80);
-  vec4 = in28 - in27;
-  ST_SH(vec4, input + 88);
-
-  in21 = in18 + in21;
-  in20 = in19 + in20;
-  in27 = in28 + in27;
-  in26 = in29 + in26;
-
-  LD_SH4(input + 48, 8, in22, in23, in24, in25);
-  DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
-  DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
-
-  in16 = LD_SH(input);
-  in17 = LD_SH(input + 8);
-  in30 = LD_SH(input + 112);
-  in31 = LD_SH(input + 120);
-
-  vec4 = in17 - in22;
-  ST_SH(vec4, input + 16);
-  vec4 = in16 - in23;
-  ST_SH(vec4, input + 24);
-  vec4 = in31 - in24;
-  ST_SH(vec4, input + 96);
-  vec4 = in30 - in25;
-  ST_SH(vec4, input + 104);
-
-  ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
-  DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
-  DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
-  ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
-  DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
-  ADD2(in27, in26, in25, in24, in23, in20);
-  DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec5, temp_ptr);
-  ST_SH(vec4, temp_ptr + 960);
-
-  SUB2(in27, in26, in25, in24, in22, in21);
-  DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec5, temp_ptr + 448);
-  ST_SH(vec4, temp_ptr + 512);
-
-  SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
-  DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
-  SUB2(in26, in27, in24, in25, in23, in20);
-  DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec4, temp_ptr + 704);
-  ST_SH(vec5, temp_ptr + 256);
-
-  ADD2(in26, in27, in24, in25, in22, in21);
-  DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec4, temp_ptr + 192);
-  ST_SH(vec5, temp_ptr + 768);
-
-  LD_SH4(input + 16, 8, in22, in23, in20, in21);
-  LD_SH4(input + 80, 8, in26, in27, in24, in25);
-  in16 = in20;
-  in17 = in21;
-  DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
-  DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
-  SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
-  DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
-  ADD2(in28, in29, in31, in30, in16, in19);
-  DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec5, temp_ptr + 832);
-  ST_SH(vec4, temp_ptr + 128);
-
-  SUB2(in28, in29, in31, in30, in17, in18);
-  DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec5, temp_ptr + 320);
-  ST_SH(vec4, temp_ptr + 640);
-  ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
-  DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
-  SUB2(in29, in28, in30, in31, in16, in19);
-  DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec5, temp_ptr + 576);
-  ST_SH(vec4, temp_ptr + 384);
-
-  ADD2(in29, in28, in30, in31, in17, in18);
-  DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
-  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
-  ST_SH(vec5, temp_ptr + 64);
-  ST_SH(vec4, temp_ptr + 896);
-}
-
-static void fdct8x32_1d_column(const int16_t *input, int32_t src_stride,
-                               int16_t *tmp_buf, int16_t *tmp_buf_big) {
-  fdct8x32_1d_column_load_butterfly(input, src_stride, tmp_buf);
-  fdct8x32_1d_column_even_store(tmp_buf, tmp_buf_big);
-  fdct8x32_1d_column_odd_store(tmp_buf + 128, (tmp_buf_big + 32));
-}
-
-static void fdct8x32_1d_row_load_butterfly(int16_t *temp_buff,
-                                           int16_t *output) {
-  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
-  v8i16 step0, step1, step2, step3, step4, step5, step6, step7;
-
-  LD_SH8(temp_buff, 32, in0, in1, in2, in3, in4, in5, in6, in7);
-  LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
-                     in8, in9, in10, in11, in12, in13, in14, in15);
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               step0, step1, step2, step3, step4, step5, step6, step7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
-  ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, output, 8);
-  ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 24 * 8), 8);
-
-  /* 2nd set */
-  LD_SH8(temp_buff + 8, 32, in0, in1, in2, in3, in4, in5, in6, in7);
-  LD_SH8(temp_buff + 16, 32, in8, in9, in10, in11, in12, in13, in14, in15);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
-                     in8, in9, in10, in11, in12, in13, in14, in15);
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               step0, step1, step2, step3, step4, step5, step6, step7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
-  ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7,
-         (output + 8 * 8), 8);
-  ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 16 * 8), 8);
-}
-
-static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr,
-                                    int16_t *out) {
-  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
-  v4i32 vec0_l, vec1_l, vec2_l, vec3_l, vec4_l, vec5_l, vec6_l, vec7_l;
-  v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec4_r, vec5_r, vec6_r, vec7_r;
-  v4i32 tmp0_w, tmp1_w, tmp2_w, tmp3_w;
-
-  /* fdct32 even */
-  /* stage 2 */
-  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
-  LD_SH8(input + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
-
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
-  ST_SH8(vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, interm_ptr, 8);
-  ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, interm_ptr + 64, 8);
-
-  /* Stage 3 */
-  UNPCK_SH_SW(vec0, vec0_l, vec0_r);
-  UNPCK_SH_SW(vec1, vec1_l, vec1_r);
-  UNPCK_SH_SW(vec2, vec2_l, vec2_r);
-  UNPCK_SH_SW(vec3, vec3_l, vec3_r);
-  UNPCK_SH_SW(vec4, vec4_l, vec4_r);
-  UNPCK_SH_SW(vec5, vec5_l, vec5_r);
-  UNPCK_SH_SW(vec6, vec6_l, vec6_r);
-  UNPCK_SH_SW(vec7, vec7_l, vec7_r);
-  ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r,
-       tmp0_w, tmp1_w, tmp2_w, tmp3_w);
-  BUTTERFLY_4(tmp0_w, tmp1_w, tmp2_w, tmp3_w, vec4_r, vec6_r, vec7_r, vec5_r);
-  ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l,
-       vec0_r, vec1_r, vec2_r, vec3_r);
-
-  tmp3_w = vec0_r + vec3_r;
-  vec0_r = vec0_r - vec3_r;
-  vec3_r = vec1_r + vec2_r;
-  vec1_r = vec1_r - vec2_r;
-
-  DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64,
-                    cospi_16_64, vec4_r, tmp3_w, vec6_r, vec3_r);
-  FDCT32_POSTPROC_NEG_W(vec4_r);
-  FDCT32_POSTPROC_NEG_W(tmp3_w);
-  FDCT32_POSTPROC_NEG_W(vec6_r);
-  FDCT32_POSTPROC_NEG_W(vec3_r);
-  PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
-  ST_SH2(vec5, vec4, out, 8);
-
-  DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64,
-                    cospi_8_64, vec4_r, tmp3_w, vec6_r, vec3_r);
-  FDCT32_POSTPROC_NEG_W(vec4_r);
-  FDCT32_POSTPROC_NEG_W(tmp3_w);
-  FDCT32_POSTPROC_NEG_W(vec6_r);
-  FDCT32_POSTPROC_NEG_W(vec3_r);
-  PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
-  ST_SH2(vec5, vec4, out + 16, 8);
-
-  LD_SH8(interm_ptr, 8, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7);
-  SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
-  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
-  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
-  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, in5, in4);
-  FDCT_POSTPROC_2V_NEG_H(in4, in5);
-  ST_SH(in4, out + 32);
-  ST_SH(in5, out + 56);
-
-  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
-  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4);
-  FDCT_POSTPROC_2V_NEG_H(in4, in5);
-  ST_SH(in4, out + 40);
-  ST_SH(in5, out + 48);
-
-  LD_SH8(interm_ptr + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
-  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
-  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
-  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
-  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
-  ADD2(in0, in1, in2, in3, vec0, vec7);
-  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, in5, in4);
-  FDCT_POSTPROC_2V_NEG_H(in4, in5);
-  ST_SH(in4, out + 64);
-  ST_SH(in5, out + 120);
-
-  SUB2(in0, in1, in2, in3, in0, in2);
-  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, in5, in4);
-  FDCT_POSTPROC_2V_NEG_H(in4, in5);
-  ST_SH(in4, out + 72);
-  ST_SH(in5, out + 112);
-
-  SUB2(in9, vec2, in14, vec5, vec2, vec5);
-  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
-  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
-  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, in5, in4);
-  FDCT_POSTPROC_2V_NEG_H(in4, in5);
-  ST_SH(in4, out + 80);
-  ST_SH(in5, out + 104);
-
-  ADD2(in3, in2, in0, in1, vec3, vec4);
-  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, in4, in5);
-  FDCT_POSTPROC_2V_NEG_H(in4, in5);
-  ST_SH(in4, out + 96);
-  ST_SH(in5, out + 88);
-}
-
-static void fdct8x32_1d_row_even(int16_t *temp, int16_t *out) {
-  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
-
-  /* fdct32 even */
-  /* stage 2 */
-  LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
-  LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
-
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
-
-  /* Stage 3 */
-  ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
-  BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
-  DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out);
-  ST_SH(temp1, out + 8);
-
-  DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out + 16);
-  ST_SH(temp1, out + 24);
-
-  SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
-  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
-  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
-  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out + 32);
-  ST_SH(temp1, out + 56);
-
-  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
-  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out + 40);
-  ST_SH(temp1, out + 48);
-
-  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
-  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
-  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
-  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
-  ADD2(in0, in1, in2, in3, vec0, vec7);
-  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out + 64);
-  ST_SH(temp1, out + 120);
-
-  SUB2(in0, in1, in2, in3, in0, in2);
-  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out + 72);
-  ST_SH(temp1, out + 112);
-
-  SUB2(in9, vec2, in14, vec5, vec2, vec5);
-  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
-  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5)
-  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out + 80);
-  ST_SH(temp1, out + 104);
-
-  ADD2(in3, in2, in0, in1, vec3, vec4);
-  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
-  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
-  ST_SH(temp0, out + 96);
-  ST_SH(temp1, out + 88);
-}
-
-static void fdct8x32_1d_row_odd(int16_t *temp, int16_t *interm_ptr,
-                                int16_t *out) {
-  v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
-  v8i16 in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
-
-  in20 = LD_SH(temp + 32);
-  in21 = LD_SH(temp + 40);
-  in26 = LD_SH(temp + 80);
-  in27 = LD_SH(temp + 88);
-
-  DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
-  DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
-
-  in18 = LD_SH(temp + 16);
-  in19 = LD_SH(temp + 24);
-  in28 = LD_SH(temp + 96);
-  in29 = LD_SH(temp + 104);
-
-  vec4 = in19 - in20;
-  ST_SH(vec4, interm_ptr + 32);
-  vec4 = in18 - in21;
-  ST_SH(vec4, interm_ptr + 88);
-  vec4 = in28 - in27;
-  ST_SH(vec4, interm_ptr + 56);
-  vec4 = in29 - in26;
-  ST_SH(vec4, interm_ptr + 64);
-
-  ADD4(in18, in21, in19, in20, in28, in27, in29, in26, in21, in20, in27, in26);
-
-  in22 = LD_SH(temp + 48);
-  in23 = LD_SH(temp + 56);
-  in24 = LD_SH(temp + 64);
-  in25 = LD_SH(temp + 72);
-
-  DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
-  DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
-
-  in16 = LD_SH(temp);
-  in17 = LD_SH(temp + 8);
-  in30 = LD_SH(temp + 112);
-  in31 = LD_SH(temp + 120);
-
-  vec4 = in17 - in22;
-  ST_SH(vec4, interm_ptr + 40);
-  vec4 = in30 - in25;
-  ST_SH(vec4, interm_ptr + 48);
-  vec4 = in31 - in24;
-  ST_SH(vec4, interm_ptr + 72);
-  vec4 = in16 - in23;
-  ST_SH(vec4, interm_ptr + 80);
-
-  ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
-  DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
-  DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
-
-  ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
-  DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
-  ADD2(in27, in26, in25, in24, in23, in20);
-
-  DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec5, out);
-  ST_SH(vec4, out + 120);
-
-  SUB2(in27, in26, in25, in24, in22, in21);
-
-  DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec5, out + 112);
-  ST_SH(vec4, out + 8);
-
-  SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
-  DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
-  SUB2(in26, in27, in24, in25, in23, in20);
-
-  DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec4, out + 16);
-  ST_SH(vec5, out + 104);
-
-  ADD2(in26, in27, in24, in25, in22, in21);
-  DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec4, out + 24);
-  ST_SH(vec5, out + 96);
-
-  in20 = LD_SH(interm_ptr + 32);
-  in21 = LD_SH(interm_ptr + 88);
-  in27 = LD_SH(interm_ptr + 56);
-  in26 = LD_SH(interm_ptr + 64);
-
-  in16 = in20;
-  in17 = in21;
-  DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
-  DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
-
-  in22 = LD_SH(interm_ptr + 40);
-  in25 = LD_SH(interm_ptr + 48);
-  in24 = LD_SH(interm_ptr + 72);
-  in23 = LD_SH(interm_ptr + 80);
-
-  SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
-  DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
-  ADD2(in28, in29, in31, in30, in16, in19);
-  DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec5, out + 32);
-  ST_SH(vec4, out + 88);
-
-  SUB2(in28, in29, in31, in30, in17, in18);
-  DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec5, out + 40);
-  ST_SH(vec4, out + 80);
-
-  ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
-  DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
-  SUB2(in29, in28, in30, in31, in16, in19);
-
-  DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec5, out + 72);
-  ST_SH(vec4, out + 48);
-
-  ADD2(in29, in28, in30, in31, in17, in18);
-
-  DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
-  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
-  ST_SH(vec4, out + 56);
-  ST_SH(vec5, out + 64);
-}
-
-static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) {
-  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
-
-  /* 1st set */
-  in0 = LD_SH(temp);
-  in4 = LD_SH(temp + 32);
-  in2 = LD_SH(temp + 64);
-  in6 = LD_SH(temp + 96);
-  in1 = LD_SH(temp + 128);
-  in7 = LD_SH(temp + 152);
-  in3 = LD_SH(temp + 192);
-  in5 = LD_SH(temp + 216);
-
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-
-  /* 2nd set */
-  in0_1 = LD_SH(temp + 16);
-  in1_1 = LD_SH(temp + 232);
-  in2_1 = LD_SH(temp + 80);
-  in3_1 = LD_SH(temp + 168);
-  in4_1 = LD_SH(temp + 48);
-  in5_1 = LD_SH(temp + 176);
-  in6_1 = LD_SH(temp + 112);
-  in7_1 = LD_SH(temp + 240);
-
-  ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 32);
-  TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-                     in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
-
-  /* 3rd set */
-  in0 = LD_SH(temp + 8);
-  in1 = LD_SH(temp + 136);
-  in2 = LD_SH(temp + 72);
-  in3 = LD_SH(temp + 200);
-  in4 = LD_SH(temp + 40);
-  in5 = LD_SH(temp + 208);
-  in6 = LD_SH(temp + 104);
-  in7 = LD_SH(temp + 144);
-
-  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-         output + 8, 32);
-  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
-                     in0, in1, in2, in3, in4, in5, in6, in7);
-  ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output + 16, 32);
-
-  /* 4th set */
-  in0_1 = LD_SH(temp + 24);
-  in1_1 = LD_SH(temp + 224);
-  in2_1 = LD_SH(temp + 88);
-  in3_1 = LD_SH(temp + 160);
-  in4_1 = LD_SH(temp + 56);
-  in5_1 = LD_SH(temp + 184);
-  in6_1 = LD_SH(temp + 120);
-  in7_1 = LD_SH(temp + 248);
-
-  TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-                     in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
-  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
-         output + 24, 32);
-}
-
-static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf,
-                            int16_t *output) {
-  fdct8x32_1d_row_load_butterfly(temp, temp_buf);
-  fdct8x32_1d_row_even(temp_buf, temp_buf);
-  fdct8x32_1d_row_odd(temp_buf + 128, temp, temp_buf + 128);
-  fdct8x32_1d_row_transpose_store(temp_buf, output);
-}
-
-static void fdct32x8_1d_row_4x(int16_t *tmp_buf_big, int16_t *tmp_buf,
-                               int16_t *output) {
-  fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
-  fdct8x32_1d_row_even_4x(tmp_buf, tmp_buf_big, tmp_buf);
-  fdct8x32_1d_row_odd(tmp_buf + 128, tmp_buf_big, tmp_buf + 128);
-  fdct8x32_1d_row_transpose_store(tmp_buf, output);
-}
-
-void vp9_fdct32x32_msa(const int16_t *input, int16_t *output,
-                       int32_t src_stride) {
-  int32_t i;
-  DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
-  DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
-
-  /* column transform */
-  for (i = 0; i < 4; ++i) {
-    fdct8x32_1d_column(input + (8 * i), src_stride, tmp_buf,
-                       tmp_buf_big + (8 * i));
-  }
-
-  /* row transform */
-  fdct32x8_1d_row_4x(tmp_buf_big, tmp_buf, output);
-
-  /* row transform */
-  for (i = 1; i < 4; ++i) {
-    fdct32x8_1d_row(tmp_buf_big + (i * 256), tmp_buf, output + (i * 256));
-  }
-}
-
 void vp9_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
   out[1] = 0;
 
@@ -717,240 +30,4 @@
   out[0] += LD_HADD(input + 32 * 24 + 16, stride);
   out[0] += LD_HADD(input + 32 * 24 + 24, stride);
   out[0] >>= 3;
-}
-
-static void fdct8x32_1d_row_even_rd(int16_t *temp, int16_t *out) {
-  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
-  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
-  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
-
-  /* fdct32 even */
-  /* stage 2 */
-  LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
-  LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
-
-  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
-               in8, in9, in10, in11, in12, in13, in14, in15,
-               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
-               in8, in9, in10, in11, in12, in13, in14, in15);
-  FDCT_POSTPROC_2V_NEG_H(vec0, vec1);
-  FDCT_POSTPROC_2V_NEG_H(vec2, vec3);
-  FDCT_POSTPROC_2V_NEG_H(vec4, vec5);
-  FDCT_POSTPROC_2V_NEG_H(vec6, vec7);
-  FDCT_POSTPROC_2V_NEG_H(in8, in9);
-  FDCT_POSTPROC_2V_NEG_H(in10, in11);
-  FDCT_POSTPROC_2V_NEG_H(in12, in13);
-  FDCT_POSTPROC_2V_NEG_H(in14, in15);
-
-  /* Stage 3 */
-  ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
-
-  temp0 = in0 + in3;
-  in0 = in0 - in3;
-  in3 = in1 + in2;
-  in1 = in1 - in2;
-
-  DOTP_CONST_PAIR(temp0, in3, cospi_16_64, cospi_16_64, temp1, temp0);
-  ST_SH(temp0, out);
-  ST_SH(temp1, out + 8);
-
-  DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
-  ST_SH(temp0, out + 16);
-  ST_SH(temp1, out + 24);
-
-  SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
-  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
-  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
-  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
-  ST_SH(temp0, out + 32);
-  ST_SH(temp1, out + 56);
-
-  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
-  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
-  ST_SH(temp0, out + 40);
-  ST_SH(temp1, out + 48);
-
-  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
-  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
-  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
-  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
-  ADD2(in0, in1, in2, in3, vec0, vec7);
-  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
-  ST_SH(temp0, out + 64);
-  ST_SH(temp1, out + 120);
-
-  SUB2(in0, in1, in2, in3, in0, in2);
-  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
-  ST_SH(temp0, out + 72);
-  ST_SH(temp1, out + 112);
-
-  SUB2(in9, vec2, in14, vec5, vec2, vec5);
-  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
-  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
-  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
-  ST_SH(temp0, out + 80);
-  ST_SH(temp1, out + 104);
-
-  ADD2(in3, in2, in0, in1, vec3, vec4);
-  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
-  ST_SH(temp0, out + 96);
-  ST_SH(temp1, out + 88);
-}
-
-static void fdct8x32_1d_row_odd_rd(int16_t *temp, int16_t *interm_ptr,
-                                   int16_t *out) {
-  v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
-  v8i16 in24, in25, in26, in27, in28, in29, in30, in31;
-  v8i16 vec4, vec5;
-
-  in20 = LD_SH(temp + 32);
-  in21 = LD_SH(temp + 40);
-  in26 = LD_SH(temp + 80);
-  in27 = LD_SH(temp + 88);
-
-  DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
-  DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
-
-  FDCT_POSTPROC_2V_NEG_H(in20, in21);
-  FDCT_POSTPROC_2V_NEG_H(in26, in27);
-
-  in18 = LD_SH(temp + 16);
-  in19 = LD_SH(temp + 24);
-  in28 = LD_SH(temp + 96);
-  in29 = LD_SH(temp + 104);
-
-  FDCT_POSTPROC_2V_NEG_H(in18, in19);
-  FDCT_POSTPROC_2V_NEG_H(in28, in29);
-
-  vec4 = in19 - in20;
-  ST_SH(vec4, interm_ptr + 32);
-  vec4 = in18 - in21;
-  ST_SH(vec4, interm_ptr + 88);
-  vec4 = in29 - in26;
-  ST_SH(vec4, interm_ptr + 64);
-  vec4 = in28 - in27;
-  ST_SH(vec4, interm_ptr + 56);
-
-  ADD4(in18, in21, in19, in20, in28, in27, in29, in26, in21, in20, in27, in26);
-
-  in22 = LD_SH(temp + 48);
-  in23 = LD_SH(temp + 56);
-  in24 = LD_SH(temp + 64);
-  in25 = LD_SH(temp + 72);
-
-  DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
-  DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
-  FDCT_POSTPROC_2V_NEG_H(in22, in23);
-  FDCT_POSTPROC_2V_NEG_H(in24, in25);
-
-  in16 = LD_SH(temp);
-  in17 = LD_SH(temp + 8);
-  in30 = LD_SH(temp + 112);
-  in31 = LD_SH(temp + 120);
-
-  FDCT_POSTPROC_2V_NEG_H(in16, in17);
-  FDCT_POSTPROC_2V_NEG_H(in30, in31);
-
-  vec4 = in17 - in22;
-  ST_SH(vec4, interm_ptr + 40);
-  vec4 = in30 - in25;
-  ST_SH(vec4, interm_ptr + 48);
-  vec4 = in31 - in24;
-  ST_SH(vec4, interm_ptr + 72);
-  vec4 = in16 - in23;
-  ST_SH(vec4, interm_ptr + 80);
-
-  ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
-  DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
-  DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
-  ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
-  DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
-  ADD2(in27, in26, in25, in24, in23, in20);
-  DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
-  ST_SH(vec5, out);
-  ST_SH(vec4, out + 120);
-
-  SUB2(in27, in26, in25, in24, in22, in21);
-  DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
-  ST_SH(vec5, out + 112);
-  ST_SH(vec4, out + 8);
-
-  SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
-  DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
-  SUB2(in26, in27, in24, in25, in23, in20);
-  DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
-  ST_SH(vec4, out + 16);
-  ST_SH(vec5, out + 104);
-
-  ADD2(in26, in27, in24, in25, in22, in21);
-  DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
-  ST_SH(vec4, out + 24);
-  ST_SH(vec5, out + 96);
-
-  in20 = LD_SH(interm_ptr + 32);
-  in21 = LD_SH(interm_ptr + 88);
-  in27 = LD_SH(interm_ptr + 56);
-  in26 = LD_SH(interm_ptr + 64);
-
-  in16 = in20;
-  in17 = in21;
-  DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
-  DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
-
-  in22 = LD_SH(interm_ptr + 40);
-  in25 = LD_SH(interm_ptr + 48);
-  in24 = LD_SH(interm_ptr + 72);
-  in23 = LD_SH(interm_ptr + 80);
-
-  SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
-  DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
-  in16 = in28 + in29;
-  in19 = in31 + in30;
-  DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
-  ST_SH(vec5, out + 32);
-  ST_SH(vec4, out + 88);
-
-  SUB2(in28, in29, in31, in30, in17, in18);
-  DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
-  ST_SH(vec5, out + 40);
-  ST_SH(vec4, out + 80);
-
-  ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
-  DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
-  SUB2(in29, in28, in30, in31, in16, in19);
-  DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
-  ST_SH(vec5, out + 72);
-  ST_SH(vec4, out + 48);
-
-  ADD2(in29, in28, in30, in31, in17, in18);
-  DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
-  ST_SH(vec4, out + 56);
-  ST_SH(vec5, out + 64);
-}
-
-static void fdct32x8_1d_row_rd(int16_t *tmp_buf_big, int16_t *tmp_buf,
-                               int16_t *output) {
-  fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
-  fdct8x32_1d_row_even_rd(tmp_buf, tmp_buf);
-  fdct8x32_1d_row_odd_rd((tmp_buf + 128), tmp_buf_big, (tmp_buf + 128));
-  fdct8x32_1d_row_transpose_store(tmp_buf, output);
-}
-
-void vp9_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
-                          int32_t src_stride) {
-  int32_t i;
-  DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
-  DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
-
-  /* column transform */
-  for (i = 0; i < 4; ++i) {
-    fdct8x32_1d_column(input + (8 * i), src_stride, &tmp_buf[0],
-                       &tmp_buf_big[0] + (8 * i));
-  }
-
-  /* row transform */
-  for (i = 0; i < 4; ++i) {
-    fdct32x8_1d_row_rd(&tmp_buf_big[0] + (8 * i * 32), &tmp_buf[0],
-                       out + (8 * i * 32));
-  }
 }
--- a/vp9/encoder/mips/msa/vp9_fdct_msa.h
+++ b/vp9/encoder/mips/msa/vp9_fdct_msa.h
@@ -96,22 +96,6 @@
   HADD_SW_S32(vec_w_m);                                               \
 })
 
-#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) {      \
-  v8i16 tp0_m, tp1_m;                             \
-  v8i16 one_m = __msa_ldi_h(1);                   \
-                                                  \
-  tp0_m = __msa_clti_s_h(vec0, 0);                \
-  tp1_m = __msa_clti_s_h(vec1, 0);                \
-  vec0 += 1;                                      \
-  vec1 += 1;                                      \
-  tp0_m = one_m & tp0_m;                          \
-  tp1_m = one_m & tp1_m;                          \
-  vec0 += tp0_m;                                  \
-  vec1 += tp1_m;                                  \
-  vec0 >>= 2;                                     \
-  vec1 >>= 2;                                     \
-}
-
 #define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) {  \
   v4i32 s0_m, s1_m, s2_m, s3_m, constant_m;                       \
   v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m;                       \
@@ -144,68 +128,5 @@
   SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS);      \
   PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m,     \
               s3_m, s3_m, out0, out1, out2, out3);                \
-}
-
-#define FDCT32_POSTPROC_NEG_W(vec) {      \
-  v4i32 temp_m;                           \
-  v4i32 one_m = __msa_ldi_w(1);           \
-                                          \
-  temp_m = __msa_clti_s_w(vec, 0);        \
-  vec += 1;                               \
-  temp_m = one_m & temp_m;                \
-  vec += temp_m;                          \
-  vec >>= 2;                              \
-}
-
-#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) {      \
-  v8i16 tp0_m, tp1_m;                               \
-  v8i16 one = __msa_ldi_h(1);                       \
-                                                    \
-  tp0_m = __msa_clei_s_h(vec0, 0);                  \
-  tp1_m = __msa_clei_s_h(vec1, 0);                  \
-  tp0_m = (v8i16)__msa_xori_b((v16u8)tp0_m, 255);   \
-  tp1_m = (v8i16)__msa_xori_b((v16u8)tp1_m, 255);   \
-  vec0 += 1;                                        \
-  vec1 += 1;                                        \
-  tp0_m = one & tp0_m;                              \
-  tp1_m = one & tp1_m;                              \
-  vec0 += tp0_m;                                    \
-  vec1 += tp1_m;                                    \
-  vec0 >>= 2;                                       \
-  vec1 >>= 2;                                       \
-}
-
-#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right,      \
-                          reg1_right, const0, const1,            \
-                          out0, out1, out2, out3) {              \
-  v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m;          \
-  v2i64 tp0_m, tp1_m, tp2_m, tp3_m;                              \
-  v4i32 k0_m = __msa_fill_w((int32_t) const0);                   \
-                                                                 \
-  s0_m = __msa_fill_w((int32_t) const1);                         \
-  k0_m = __msa_ilvev_w(s0_m, k0_m);                              \
-                                                                 \
-  ILVRL_W2_SW(-reg1_left, reg0_left, s1_m, s0_m);                \
-  ILVRL_W2_SW(reg0_left, reg1_left, s3_m, s2_m);                 \
-  ILVRL_W2_SW(-reg1_right, reg0_right, s5_m, s4_m);              \
-  ILVRL_W2_SW(reg0_right, reg1_right, s7_m, s6_m);               \
-                                                                 \
-  DOTP_SW2_SD(s0_m, s1_m, k0_m, k0_m, tp0_m, tp1_m);             \
-  DOTP_SW2_SD(s4_m, s5_m, k0_m, k0_m, tp2_m, tp3_m);             \
-  tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                  \
-  tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                  \
-  tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                  \
-  tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                  \
-  out0 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);              \
-  out1 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);              \
-                                                                 \
-  DOTP_SW2_SD(s2_m, s3_m, k0_m, k0_m, tp0_m, tp1_m);             \
-  DOTP_SW2_SD(s6_m, s7_m, k0_m, k0_m, tp2_m, tp3_m);             \
-  tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                  \
-  tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                  \
-  tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                  \
-  tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                  \
-  out2 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);              \
-  out3 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);              \
 }
 #endif  /* VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ */
--- a/vp9/encoder/vp9_dct.c
+++ b/vp9/encoder/vp9_dct.c
@@ -822,410 +822,6 @@
   }
 }
 
-static INLINE tran_high_t dct_32_round(tran_high_t input) {
-  tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
-  // TODO(debargha, peter.derivaz): Find new bounds for this assert,
-  // and make the bounds consts.
-  // assert(-131072 <= rv && rv <= 131071);
-  return rv;
-}
-
-static INLINE tran_high_t half_round_shift(tran_high_t input) {
-  tran_high_t rv = (input + 1 + (input < 0)) >> 2;
-  return rv;
-}
-
-void vp9_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
-  tran_high_t step[32];
-  // Stage 1
-  step[0] = input[0] + input[(32 - 1)];
-  step[1] = input[1] + input[(32 - 2)];
-  step[2] = input[2] + input[(32 - 3)];
-  step[3] = input[3] + input[(32 - 4)];
-  step[4] = input[4] + input[(32 - 5)];
-  step[5] = input[5] + input[(32 - 6)];
-  step[6] = input[6] + input[(32 - 7)];
-  step[7] = input[7] + input[(32 - 8)];
-  step[8] = input[8] + input[(32 - 9)];
-  step[9] = input[9] + input[(32 - 10)];
-  step[10] = input[10] + input[(32 - 11)];
-  step[11] = input[11] + input[(32 - 12)];
-  step[12] = input[12] + input[(32 - 13)];
-  step[13] = input[13] + input[(32 - 14)];
-  step[14] = input[14] + input[(32 - 15)];
-  step[15] = input[15] + input[(32 - 16)];
-  step[16] = -input[16] + input[(32 - 17)];
-  step[17] = -input[17] + input[(32 - 18)];
-  step[18] = -input[18] + input[(32 - 19)];
-  step[19] = -input[19] + input[(32 - 20)];
-  step[20] = -input[20] + input[(32 - 21)];
-  step[21] = -input[21] + input[(32 - 22)];
-  step[22] = -input[22] + input[(32 - 23)];
-  step[23] = -input[23] + input[(32 - 24)];
-  step[24] = -input[24] + input[(32 - 25)];
-  step[25] = -input[25] + input[(32 - 26)];
-  step[26] = -input[26] + input[(32 - 27)];
-  step[27] = -input[27] + input[(32 - 28)];
-  step[28] = -input[28] + input[(32 - 29)];
-  step[29] = -input[29] + input[(32 - 30)];
-  step[30] = -input[30] + input[(32 - 31)];
-  step[31] = -input[31] + input[(32 - 32)];
-
-  // Stage 2
-  output[0] = step[0] + step[16 - 1];
-  output[1] = step[1] + step[16 - 2];
-  output[2] = step[2] + step[16 - 3];
-  output[3] = step[3] + step[16 - 4];
-  output[4] = step[4] + step[16 - 5];
-  output[5] = step[5] + step[16 - 6];
-  output[6] = step[6] + step[16 - 7];
-  output[7] = step[7] + step[16 - 8];
-  output[8] = -step[8] + step[16 - 9];
-  output[9] = -step[9] + step[16 - 10];
-  output[10] = -step[10] + step[16 - 11];
-  output[11] = -step[11] + step[16 - 12];
-  output[12] = -step[12] + step[16 - 13];
-  output[13] = -step[13] + step[16 - 14];
-  output[14] = -step[14] + step[16 - 15];
-  output[15] = -step[15] + step[16 - 16];
-
-  output[16] = step[16];
-  output[17] = step[17];
-  output[18] = step[18];
-  output[19] = step[19];
-
-  output[20] = dct_32_round((-step[20] + step[27]) * cospi_16_64);
-  output[21] = dct_32_round((-step[21] + step[26]) * cospi_16_64);
-  output[22] = dct_32_round((-step[22] + step[25]) * cospi_16_64);
-  output[23] = dct_32_round((-step[23] + step[24]) * cospi_16_64);
-
-  output[24] = dct_32_round((step[24] + step[23]) * cospi_16_64);
-  output[25] = dct_32_round((step[25] + step[22]) * cospi_16_64);
-  output[26] = dct_32_round((step[26] + step[21]) * cospi_16_64);
-  output[27] = dct_32_round((step[27] + step[20]) * cospi_16_64);
-
-  output[28] = step[28];
-  output[29] = step[29];
-  output[30] = step[30];
-  output[31] = step[31];
-
-  // dump the magnitude by 4, hence the intermediate values are within
-  // the range of 16 bits.
-  if (round) {
-    output[0] = half_round_shift(output[0]);
-    output[1] = half_round_shift(output[1]);
-    output[2] = half_round_shift(output[2]);
-    output[3] = half_round_shift(output[3]);
-    output[4] = half_round_shift(output[4]);
-    output[5] = half_round_shift(output[5]);
-    output[6] = half_round_shift(output[6]);
-    output[7] = half_round_shift(output[7]);
-    output[8] = half_round_shift(output[8]);
-    output[9] = half_round_shift(output[9]);
-    output[10] = half_round_shift(output[10]);
-    output[11] = half_round_shift(output[11]);
-    output[12] = half_round_shift(output[12]);
-    output[13] = half_round_shift(output[13]);
-    output[14] = half_round_shift(output[14]);
-    output[15] = half_round_shift(output[15]);
-
-    output[16] = half_round_shift(output[16]);
-    output[17] = half_round_shift(output[17]);
-    output[18] = half_round_shift(output[18]);
-    output[19] = half_round_shift(output[19]);
-    output[20] = half_round_shift(output[20]);
-    output[21] = half_round_shift(output[21]);
-    output[22] = half_round_shift(output[22]);
-    output[23] = half_round_shift(output[23]);
-    output[24] = half_round_shift(output[24]);
-    output[25] = half_round_shift(output[25]);
-    output[26] = half_round_shift(output[26]);
-    output[27] = half_round_shift(output[27]);
-    output[28] = half_round_shift(output[28]);
-    output[29] = half_round_shift(output[29]);
-    output[30] = half_round_shift(output[30]);
-    output[31] = half_round_shift(output[31]);
-  }
-
-  // Stage 3
-  step[0] = output[0] + output[(8 - 1)];
-  step[1] = output[1] + output[(8 - 2)];
-  step[2] = output[2] + output[(8 - 3)];
-  step[3] = output[3] + output[(8 - 4)];
-  step[4] = -output[4] + output[(8 - 5)];
-  step[5] = -output[5] + output[(8 - 6)];
-  step[6] = -output[6] + output[(8 - 7)];
-  step[7] = -output[7] + output[(8 - 8)];
-  step[8] = output[8];
-  step[9] = output[9];
-  step[10] = dct_32_round((-output[10] + output[13]) * cospi_16_64);
-  step[11] = dct_32_round((-output[11] + output[12]) * cospi_16_64);
-  step[12] = dct_32_round((output[12] + output[11]) * cospi_16_64);
-  step[13] = dct_32_round((output[13] + output[10]) * cospi_16_64);
-  step[14] = output[14];
-  step[15] = output[15];
-
-  step[16] = output[16] + output[23];
-  step[17] = output[17] + output[22];
-  step[18] = output[18] + output[21];
-  step[19] = output[19] + output[20];
-  step[20] = -output[20] + output[19];
-  step[21] = -output[21] + output[18];
-  step[22] = -output[22] + output[17];
-  step[23] = -output[23] + output[16];
-  step[24] = -output[24] + output[31];
-  step[25] = -output[25] + output[30];
-  step[26] = -output[26] + output[29];
-  step[27] = -output[27] + output[28];
-  step[28] = output[28] + output[27];
-  step[29] = output[29] + output[26];
-  step[30] = output[30] + output[25];
-  step[31] = output[31] + output[24];
-
-  // Stage 4
-  output[0] = step[0] + step[3];
-  output[1] = step[1] + step[2];
-  output[2] = -step[2] + step[1];
-  output[3] = -step[3] + step[0];
-  output[4] = step[4];
-  output[5] = dct_32_round((-step[5] + step[6]) * cospi_16_64);
-  output[6] = dct_32_round((step[6] + step[5]) * cospi_16_64);
-  output[7] = step[7];
-  output[8] = step[8] + step[11];
-  output[9] = step[9] + step[10];
-  output[10] = -step[10] + step[9];
-  output[11] = -step[11] + step[8];
-  output[12] = -step[12] + step[15];
-  output[13] = -step[13] + step[14];
-  output[14] = step[14] + step[13];
-  output[15] = step[15] + step[12];
-
-  output[16] = step[16];
-  output[17] = step[17];
-  output[18] = dct_32_round(step[18] * -cospi_8_64 + step[29] * cospi_24_64);
-  output[19] = dct_32_round(step[19] * -cospi_8_64 + step[28] * cospi_24_64);
-  output[20] = dct_32_round(step[20] * -cospi_24_64 + step[27] * -cospi_8_64);
-  output[21] = dct_32_round(step[21] * -cospi_24_64 + step[26] * -cospi_8_64);
-  output[22] = step[22];
-  output[23] = step[23];
-  output[24] = step[24];
-  output[25] = step[25];
-  output[26] = dct_32_round(step[26] * cospi_24_64 + step[21] * -cospi_8_64);
-  output[27] = dct_32_round(step[27] * cospi_24_64 + step[20] * -cospi_8_64);
-  output[28] = dct_32_round(step[28] * cospi_8_64 + step[19] * cospi_24_64);
-  output[29] = dct_32_round(step[29] * cospi_8_64 + step[18] * cospi_24_64);
-  output[30] = step[30];
-  output[31] = step[31];
-
-  // Stage 5
-  step[0] = dct_32_round((output[0] + output[1]) * cospi_16_64);
-  step[1] = dct_32_round((-output[1] + output[0]) * cospi_16_64);
-  step[2] = dct_32_round(output[2] * cospi_24_64 + output[3] * cospi_8_64);
-  step[3] = dct_32_round(output[3] * cospi_24_64 - output[2] * cospi_8_64);
-  step[4] = output[4] + output[5];
-  step[5] = -output[5] + output[4];
-  step[6] = -output[6] + output[7];
-  step[7] = output[7] + output[6];
-  step[8] = output[8];
-  step[9] = dct_32_round(output[9] * -cospi_8_64 + output[14] * cospi_24_64);
-  step[10] = dct_32_round(output[10] * -cospi_24_64 + output[13] * -cospi_8_64);
-  step[11] = output[11];
-  step[12] = output[12];
-  step[13] = dct_32_round(output[13] * cospi_24_64 + output[10] * -cospi_8_64);
-  step[14] = dct_32_round(output[14] * cospi_8_64 + output[9] * cospi_24_64);
-  step[15] = output[15];
-
-  step[16] = output[16] + output[19];
-  step[17] = output[17] + output[18];
-  step[18] = -output[18] + output[17];
-  step[19] = -output[19] + output[16];
-  step[20] = -output[20] + output[23];
-  step[21] = -output[21] + output[22];
-  step[22] = output[22] + output[21];
-  step[23] = output[23] + output[20];
-  step[24] = output[24] + output[27];
-  step[25] = output[25] + output[26];
-  step[26] = -output[26] + output[25];
-  step[27] = -output[27] + output[24];
-  step[28] = -output[28] + output[31];
-  step[29] = -output[29] + output[30];
-  step[30] = output[30] + output[29];
-  step[31] = output[31] + output[28];
-
-  // Stage 6
-  output[0] = step[0];
-  output[1] = step[1];
-  output[2] = step[2];
-  output[3] = step[3];
-  output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64);
-  output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64);
-  output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64);
-  output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64);
-  output[8] = step[8] + step[9];
-  output[9] = -step[9] + step[8];
-  output[10] = -step[10] + step[11];
-  output[11] = step[11] + step[10];
-  output[12] = step[12] + step[13];
-  output[13] = -step[13] + step[12];
-  output[14] = -step[14] + step[15];
-  output[15] = step[15] + step[14];
-
-  output[16] = step[16];
-  output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64);
-  output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64);
-  output[19] = step[19];
-  output[20] = step[20];
-  output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64);
-  output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64);
-  output[23] = step[23];
-  output[24] = step[24];
-  output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64);
-  output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64);
-  output[27] = step[27];
-  output[28] = step[28];
-  output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64);
-  output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64);
-  output[31] = step[31];
-
-  // Stage 7
-  step[0] = output[0];
-  step[1] = output[1];
-  step[2] = output[2];
-  step[3] = output[3];
-  step[4] = output[4];
-  step[5] = output[5];
-  step[6] = output[6];
-  step[7] = output[7];
-  step[8] = dct_32_round(output[8] * cospi_30_64 + output[15] * cospi_2_64);
-  step[9] = dct_32_round(output[9] * cospi_14_64 + output[14] * cospi_18_64);
-  step[10] = dct_32_round(output[10] * cospi_22_64 + output[13] * cospi_10_64);
-  step[11] = dct_32_round(output[11] * cospi_6_64 + output[12] * cospi_26_64);
-  step[12] = dct_32_round(output[12] * cospi_6_64 + output[11] * -cospi_26_64);
-  step[13] = dct_32_round(output[13] * cospi_22_64 + output[10] * -cospi_10_64);
-  step[14] = dct_32_round(output[14] * cospi_14_64 + output[9] * -cospi_18_64);
-  step[15] = dct_32_round(output[15] * cospi_30_64 + output[8] * -cospi_2_64);
-
-  step[16] = output[16] + output[17];
-  step[17] = -output[17] + output[16];
-  step[18] = -output[18] + output[19];
-  step[19] = output[19] + output[18];
-  step[20] = output[20] + output[21];
-  step[21] = -output[21] + output[20];
-  step[22] = -output[22] + output[23];
-  step[23] = output[23] + output[22];
-  step[24] = output[24] + output[25];
-  step[25] = -output[25] + output[24];
-  step[26] = -output[26] + output[27];
-  step[27] = output[27] + output[26];
-  step[28] = output[28] + output[29];
-  step[29] = -output[29] + output[28];
-  step[30] = -output[30] + output[31];
-  step[31] = output[31] + output[30];
-
-  // Final stage --- outputs indices are bit-reversed.
-  output[0]  = step[0];
-  output[16] = step[1];
-  output[8]  = step[2];
-  output[24] = step[3];
-  output[4]  = step[4];
-  output[20] = step[5];
-  output[12] = step[6];
-  output[28] = step[7];
-  output[2]  = step[8];
-  output[18] = step[9];
-  output[10] = step[10];
-  output[26] = step[11];
-  output[6]  = step[12];
-  output[22] = step[13];
-  output[14] = step[14];
-  output[30] = step[15];
-
-  output[1]  = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
-  output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64);
-  output[9]  = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
-  output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64);
-  output[5]  = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
-  output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64);
-  output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64);
-  output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64);
-  output[3]  = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
-  output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64);
-  output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64);
-  output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64);
-  output[7]  = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
-  output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64);
-  output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64);
-  output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
-}
-
-void vp9_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
-  int r, c;
-  tran_low_t sum = 0;
-  for (r = 0; r < 32; ++r)
-    for (c = 0; c < 32; ++c)
-      sum += input[r * stride + c];
-
-  output[0] = sum >> 3;
-  output[1] = 0;
-}
-
-void vp9_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
-  int i, j;
-  tran_high_t output[32 * 32];
-
-  // Columns
-  for (i = 0; i < 32; ++i) {
-    tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = input[j * stride + i] * 4;
-    vp9_fdct32(temp_in, temp_out, 0);
-    for (j = 0; j < 32; ++j)
-      output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
-  }
-
-  // Rows
-  for (i = 0; i < 32; ++i) {
-    tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = output[j + i * 32];
-    vp9_fdct32(temp_in, temp_out, 0);
-    for (j = 0; j < 32; ++j)
-      out[j + i * 32] =
-          (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
-  }
-}
-
-// Note that although we use dct_32_round in dct32 computation flow,
-// this 2d fdct32x32 for rate-distortion optimization loop is operating
-// within 16 bits precision.
-void vp9_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
-  int i, j;
-  tran_high_t output[32 * 32];
-
-  // Columns
-  for (i = 0; i < 32; ++i) {
-    tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = input[j * stride + i] * 4;
-    vp9_fdct32(temp_in, temp_out, 0);
-    for (j = 0; j < 32; ++j)
-      // TODO(cd): see quality impact of only doing
-      //           output[j * 32 + i] = (temp_out[j] + 1) >> 2;
-      //           PS: also change code in vp9/encoder/x86/vp9_dct_sse2.c
-      output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
-  }
-
-  // Rows
-  for (i = 0; i < 32; ++i) {
-    tran_high_t temp_in[32], temp_out[32];
-    for (j = 0; j < 32; ++j)
-      temp_in[j] = output[j + i * 32];
-    vp9_fdct32(temp_in, temp_out, 1);
-    for (j = 0; j < 32; ++j)
-      out[j + i * 32] = (tran_low_t)temp_out[j];
-  }
-}
-
 #if CONFIG_VP9_HIGHBITDEPTH
 void vp9_highbd_fht4x4_c(const int16_t *input, tran_low_t *output,
                          int stride, int tx_type) {
@@ -1260,14 +856,5 @@
 void vp9_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
                               int stride) {
   vp9_fdct32x32_1_c(input, out, stride);
-}
-
-void vp9_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
-  vp9_fdct32x32_c(input, out, stride);
-}
-
-void vp9_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
-                               int stride) {
-  vp9_fdct32x32_rd_c(input, out, stride);
 }
 #endif  // CONFIG_VP9_HIGHBITDEPTH
--- a/vp9/encoder/x86/vp9_dct32x32_avx2_impl.h
+++ /dev/null
@@ -1,2713 +1,0 @@
-/*
- *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <immintrin.h>  // AVX2
-
-#include "./vp9_rtcd.h"
-#include "vpx_dsp/txfm_common.h"
-#include "vpx_ports/mem.h"
-
-#define pair256_set_epi16(a, b) \
-  _mm256_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
-                   (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
-                   (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
-                   (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a))
-
-#define pair256_set_epi32(a, b) \
-  _mm256_set_epi32((int)(b), (int)(a), (int)(b), (int)(a), \
-                   (int)(b), (int)(a), (int)(b), (int)(a))
-
-#if FDCT32x32_HIGH_PRECISION
-static INLINE __m256i k_madd_epi32_avx2(__m256i a, __m256i b) {
-  __m256i buf0, buf1;
-  buf0 = _mm256_mul_epu32(a, b);
-  a = _mm256_srli_epi64(a, 32);
-  b = _mm256_srli_epi64(b, 32);
-  buf1 = _mm256_mul_epu32(a, b);
-  return _mm256_add_epi64(buf0, buf1);
-}
-
-static INLINE __m256i k_packs_epi64_avx2(__m256i a, __m256i b) {
-  __m256i buf0 = _mm256_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 2, 0));
-  __m256i buf1 = _mm256_shuffle_epi32(b, _MM_SHUFFLE(0, 0, 2, 0));
-  return _mm256_unpacklo_epi64(buf0, buf1);
-}
-#endif
-
-void FDCT32x32_2D_AVX2(const int16_t *input,
-                  int16_t *output_org, int stride) {
-  // Calculate pre-multiplied strides
-  const int str1 = stride;
-  const int str2 = 2 * stride;
-  const int str3 = 2 * stride + str1;
-  // We need an intermediate buffer between passes.
-  DECLARE_ALIGNED(32, int16_t, intermediate[32 * 32]);
-  // Constants
-  //    When we use them, in one case, they are all the same. In all others
-  //    it's a pair of them that we need to repeat four times. This is done
-  //    by constructing the 32 bit constant corresponding to that pair.
-  const __m256i k__cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
-  const __m256i k__cospi_p16_m16 = pair256_set_epi16(+cospi_16_64, -cospi_16_64);
-  const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64,   cospi_24_64);
-  const __m256i k__cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
-  const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64,  cospi_8_64);
-  const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64,  cospi_20_64);
-  const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64,  cospi_12_64);
-  const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64,   cospi_28_64);
-  const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64,  cospi_4_64);
-  const __m256i k__cospi_m28_m04 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
-  const __m256i k__cospi_m12_m20 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
-  const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64,  cospi_2_64);
-  const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64,  cospi_18_64);
-  const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64,  cospi_10_64);
-  const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64,   cospi_26_64);
-  const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64,  cospi_6_64);
-  const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64,  cospi_22_64);
-  const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64,  cospi_14_64);
-  const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64,   cospi_30_64);
-  const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64,  cospi_1_64);
-  const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64,  cospi_17_64);
-  const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64,  cospi_9_64);
-  const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64,   cospi_25_64);
-  const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64,  cospi_7_64);
-  const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64,   cospi_23_64);
-  const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64,  cospi_15_64);
-  const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64,   cospi_31_64);
-  const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64,  cospi_5_64);
-  const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64,  cospi_21_64);
-  const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64,  cospi_13_64);
-  const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64,   cospi_29_64);
-  const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64,  cospi_3_64);
-  const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64,  cospi_19_64);
-  const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64,  cospi_11_64);
-  const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64,   cospi_27_64);
-  const __m256i k__DCT_CONST_ROUNDING = _mm256_set1_epi32(DCT_CONST_ROUNDING);
-  const __m256i kZero = _mm256_set1_epi16(0);
-  const __m256i kOne  = _mm256_set1_epi16(1);
-  // Do the two transform/transpose passes
-  int pass;
-  for (pass = 0; pass < 2; ++pass) {
-    // We process sixteen columns (transposed rows in second pass) at a time.
-    int column_start;
-    for (column_start = 0; column_start < 32; column_start += 16) {
-      __m256i step1[32];
-      __m256i step2[32];
-      __m256i step3[32];
-      __m256i out[32];
-      // Stage 1
-      // Note: even though all the loads below are aligned, using the aligned
-      //       intrinsic make the code slightly slower.
-      if (0 == pass) {
-        const int16_t *in  = &input[column_start];
-        // step1[i] =  (in[ 0 * stride] + in[(32 -  1) * stride]) << 2;
-        // Note: the next four blocks could be in a loop. That would help the
-        //       instruction cache but is actually slower.
-        {
-          const int16_t *ina =  in +  0 * str1;
-          const int16_t *inb =  in + 31 * str1;
-          __m256i *step1a = &step1[ 0];
-          __m256i *step1b = &step1[31];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
-          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
-        }
-        {
-          const int16_t *ina =  in +  4 * str1;
-          const int16_t *inb =  in + 27 * str1;
-          __m256i *step1a = &step1[ 4];
-          __m256i *step1b = &step1[27];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
-          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
-        }
-        {
-          const int16_t *ina =  in +  8 * str1;
-          const int16_t *inb =  in + 23 * str1;
-          __m256i *step1a = &step1[ 8];
-          __m256i *step1b = &step1[23];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
-          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
-        }
-        {
-          const int16_t *ina =  in + 12 * str1;
-          const int16_t *inb =  in + 19 * str1;
-          __m256i *step1a = &step1[12];
-          __m256i *step1b = &step1[19];
-          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
-          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
-          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
-          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
-          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
-          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
-          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
-          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
-          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
-          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
-        }
-      } else {
-        int16_t *in = &intermediate[column_start];
-        // step1[i] =  in[ 0 * 32] + in[(32 -  1) * 32];
-        // Note: using the same approach as above to have common offset is
-        //       counter-productive as all offsets can be calculated at compile
-        //       time.
-        // Note: the next four blocks could be in a loop. That would help the
-        //       instruction cache but is actually slower.
-        {
-          __m256i in00  = _mm256_loadu_si256((const __m256i *)(in +  0 * 32));
-          __m256i in01  = _mm256_loadu_si256((const __m256i *)(in +  1 * 32));
-          __m256i in02  = _mm256_loadu_si256((const __m256i *)(in +  2 * 32));
-          __m256i in03  = _mm256_loadu_si256((const __m256i *)(in +  3 * 32));
-          __m256i in28  = _mm256_loadu_si256((const __m256i *)(in + 28 * 32));
-          __m256i in29  = _mm256_loadu_si256((const __m256i *)(in + 29 * 32));
-          __m256i in30  = _mm256_loadu_si256((const __m256i *)(in + 30 * 32));
-          __m256i in31  = _mm256_loadu_si256((const __m256i *)(in + 31 * 32));
-          step1[ 0] = _mm256_add_epi16(in00, in31);
-          step1[ 1] = _mm256_add_epi16(in01, in30);
-          step1[ 2] = _mm256_add_epi16(in02, in29);
-          step1[ 3] = _mm256_add_epi16(in03, in28);
-          step1[28] = _mm256_sub_epi16(in03, in28);
-          step1[29] = _mm256_sub_epi16(in02, in29);
-          step1[30] = _mm256_sub_epi16(in01, in30);
-          step1[31] = _mm256_sub_epi16(in00, in31);
-        }
-        {
-          __m256i in04  = _mm256_loadu_si256((const __m256i *)(in +  4 * 32));
-          __m256i in05  = _mm256_loadu_si256((const __m256i *)(in +  5 * 32));
-          __m256i in06  = _mm256_loadu_si256((const __m256i *)(in +  6 * 32));
-          __m256i in07  = _mm256_loadu_si256((const __m256i *)(in +  7 * 32));
-          __m256i in24  = _mm256_loadu_si256((const __m256i *)(in + 24 * 32));
-          __m256i in25  = _mm256_loadu_si256((const __m256i *)(in + 25 * 32));
-          __m256i in26  = _mm256_loadu_si256((const __m256i *)(in + 26 * 32));
-          __m256i in27  = _mm256_loadu_si256((const __m256i *)(in + 27 * 32));
-          step1[ 4] = _mm256_add_epi16(in04, in27);
-          step1[ 5] = _mm256_add_epi16(in05, in26);
-          step1[ 6] = _mm256_add_epi16(in06, in25);
-          step1[ 7] = _mm256_add_epi16(in07, in24);
-          step1[24] = _mm256_sub_epi16(in07, in24);
-          step1[25] = _mm256_sub_epi16(in06, in25);
-          step1[26] = _mm256_sub_epi16(in05, in26);
-          step1[27] = _mm256_sub_epi16(in04, in27);
-        }
-        {
-          __m256i in08  = _mm256_loadu_si256((const __m256i *)(in +  8 * 32));
-          __m256i in09  = _mm256_loadu_si256((const __m256i *)(in +  9 * 32));
-          __m256i in10  = _mm256_loadu_si256((const __m256i *)(in + 10 * 32));
-          __m256i in11  = _mm256_loadu_si256((const __m256i *)(in + 11 * 32));
-          __m256i in20  = _mm256_loadu_si256((const __m256i *)(in + 20 * 32));
-          __m256i in21  = _mm256_loadu_si256((const __m256i *)(in + 21 * 32));
-          __m256i in22  = _mm256_loadu_si256((const __m256i *)(in + 22 * 32));
-          __m256i in23  = _mm256_loadu_si256((const __m256i *)(in + 23 * 32));
-          step1[ 8] = _mm256_add_epi16(in08, in23);
-          step1[ 9] = _mm256_add_epi16(in09, in22);
-          step1[10] = _mm256_add_epi16(in10, in21);
-          step1[11] = _mm256_add_epi16(in11, in20);
-          step1[20] = _mm256_sub_epi16(in11, in20);
-          step1[21] = _mm256_sub_epi16(in10, in21);
-          step1[22] = _mm256_sub_epi16(in09, in22);
-          step1[23] = _mm256_sub_epi16(in08, in23);
-        }
-        {
-          __m256i in12  = _mm256_loadu_si256((const __m256i *)(in + 12 * 32));
-          __m256i in13  = _mm256_loadu_si256((const __m256i *)(in + 13 * 32));
-          __m256i in14  = _mm256_loadu_si256((const __m256i *)(in + 14 * 32));
-          __m256i in15  = _mm256_loadu_si256((const __m256i *)(in + 15 * 32));
-          __m256i in16  = _mm256_loadu_si256((const __m256i *)(in + 16 * 32));
-          __m256i in17  = _mm256_loadu_si256((const __m256i *)(in + 17 * 32));
-          __m256i in18  = _mm256_loadu_si256((const __m256i *)(in + 18 * 32));
-          __m256i in19  = _mm256_loadu_si256((const __m256i *)(in + 19 * 32));
-          step1[12] = _mm256_add_epi16(in12, in19);
-          step1[13] = _mm256_add_epi16(in13, in18);
-          step1[14] = _mm256_add_epi16(in14, in17);
-          step1[15] = _mm256_add_epi16(in15, in16);
-          step1[16] = _mm256_sub_epi16(in15, in16);
-          step1[17] = _mm256_sub_epi16(in14, in17);
-          step1[18] = _mm256_sub_epi16(in13, in18);
-          step1[19] = _mm256_sub_epi16(in12, in19);
-        }
-      }
-      // Stage 2
-      {
-        step2[ 0] = _mm256_add_epi16(step1[0], step1[15]);
-        step2[ 1] = _mm256_add_epi16(step1[1], step1[14]);
-        step2[ 2] = _mm256_add_epi16(step1[2], step1[13]);
-        step2[ 3] = _mm256_add_epi16(step1[3], step1[12]);
-        step2[ 4] = _mm256_add_epi16(step1[4], step1[11]);
-        step2[ 5] = _mm256_add_epi16(step1[5], step1[10]);
-        step2[ 6] = _mm256_add_epi16(step1[6], step1[ 9]);
-        step2[ 7] = _mm256_add_epi16(step1[7], step1[ 8]);
-        step2[ 8] = _mm256_sub_epi16(step1[7], step1[ 8]);
-        step2[ 9] = _mm256_sub_epi16(step1[6], step1[ 9]);
-        step2[10] = _mm256_sub_epi16(step1[5], step1[10]);
-        step2[11] = _mm256_sub_epi16(step1[4], step1[11]);
-        step2[12] = _mm256_sub_epi16(step1[3], step1[12]);
-        step2[13] = _mm256_sub_epi16(step1[2], step1[13]);
-        step2[14] = _mm256_sub_epi16(step1[1], step1[14]);
-        step2[15] = _mm256_sub_epi16(step1[0], step1[15]);
-      }
-      {
-        const __m256i s2_20_0 = _mm256_unpacklo_epi16(step1[27], step1[20]);
-        const __m256i s2_20_1 = _mm256_unpackhi_epi16(step1[27], step1[20]);
-        const __m256i s2_21_0 = _mm256_unpacklo_epi16(step1[26], step1[21]);
-        const __m256i s2_21_1 = _mm256_unpackhi_epi16(step1[26], step1[21]);
-        const __m256i s2_22_0 = _mm256_unpacklo_epi16(step1[25], step1[22]);
-        const __m256i s2_22_1 = _mm256_unpackhi_epi16(step1[25], step1[22]);
-        const __m256i s2_23_0 = _mm256_unpacklo_epi16(step1[24], step1[23]);
-        const __m256i s2_23_1 = _mm256_unpackhi_epi16(step1[24], step1[23]);
-        const __m256i s2_20_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_m16);
-        const __m256i s2_20_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_m16);
-        const __m256i s2_21_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_m16);
-        const __m256i s2_21_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_m16);
-        const __m256i s2_22_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_m16);
-        const __m256i s2_22_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_m16);
-        const __m256i s2_23_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_m16);
-        const __m256i s2_23_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_m16);
-        const __m256i s2_24_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_p16);
-        const __m256i s2_24_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_p16);
-        const __m256i s2_25_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_p16);
-        const __m256i s2_25_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_p16);
-        const __m256i s2_26_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_p16);
-        const __m256i s2_26_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_p16);
-        const __m256i s2_27_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_p16);
-        const __m256i s2_27_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m256i s2_20_4 = _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_20_5 = _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_21_4 = _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_21_5 = _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_22_4 = _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_22_5 = _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_23_4 = _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_23_5 = _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_24_4 = _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_24_5 = _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_25_4 = _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_25_5 = _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_26_4 = _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_26_5 = _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_27_4 = _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_27_5 = _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_20_6 = _mm256_srai_epi32(s2_20_4, DCT_CONST_BITS);
-        const __m256i s2_20_7 = _mm256_srai_epi32(s2_20_5, DCT_CONST_BITS);
-        const __m256i s2_21_6 = _mm256_srai_epi32(s2_21_4, DCT_CONST_BITS);
-        const __m256i s2_21_7 = _mm256_srai_epi32(s2_21_5, DCT_CONST_BITS);
-        const __m256i s2_22_6 = _mm256_srai_epi32(s2_22_4, DCT_CONST_BITS);
-        const __m256i s2_22_7 = _mm256_srai_epi32(s2_22_5, DCT_CONST_BITS);
-        const __m256i s2_23_6 = _mm256_srai_epi32(s2_23_4, DCT_CONST_BITS);
-        const __m256i s2_23_7 = _mm256_srai_epi32(s2_23_5, DCT_CONST_BITS);
-        const __m256i s2_24_6 = _mm256_srai_epi32(s2_24_4, DCT_CONST_BITS);
-        const __m256i s2_24_7 = _mm256_srai_epi32(s2_24_5, DCT_CONST_BITS);
-        const __m256i s2_25_6 = _mm256_srai_epi32(s2_25_4, DCT_CONST_BITS);
-        const __m256i s2_25_7 = _mm256_srai_epi32(s2_25_5, DCT_CONST_BITS);
-        const __m256i s2_26_6 = _mm256_srai_epi32(s2_26_4, DCT_CONST_BITS);
-        const __m256i s2_26_7 = _mm256_srai_epi32(s2_26_5, DCT_CONST_BITS);
-        const __m256i s2_27_6 = _mm256_srai_epi32(s2_27_4, DCT_CONST_BITS);
-        const __m256i s2_27_7 = _mm256_srai_epi32(s2_27_5, DCT_CONST_BITS);
-        // Combine
-        step2[20] = _mm256_packs_epi32(s2_20_6, s2_20_7);
-        step2[21] = _mm256_packs_epi32(s2_21_6, s2_21_7);
-        step2[22] = _mm256_packs_epi32(s2_22_6, s2_22_7);
-        step2[23] = _mm256_packs_epi32(s2_23_6, s2_23_7);
-        step2[24] = _mm256_packs_epi32(s2_24_6, s2_24_7);
-        step2[25] = _mm256_packs_epi32(s2_25_6, s2_25_7);
-        step2[26] = _mm256_packs_epi32(s2_26_6, s2_26_7);
-        step2[27] = _mm256_packs_epi32(s2_27_6, s2_27_7);
-      }
-
-#if !FDCT32x32_HIGH_PRECISION
-      // dump the magnitude by half, hence the intermediate values are within
-      // the range of 16 bits.
-      if (1 == pass) {
-        __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero,step2[ 0]);
-        __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero,step2[ 1]);
-        __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero,step2[ 2]);
-        __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero,step2[ 3]);
-        __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero,step2[ 4]);
-        __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero,step2[ 5]);
-        __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero,step2[ 6]);
-        __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero,step2[ 7]);
-        __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero,step2[ 8]);
-        __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero,step2[ 9]);
-        __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero,step2[10]);
-        __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero,step2[11]);
-        __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero,step2[12]);
-        __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero,step2[13]);
-        __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero,step2[14]);
-        __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero,step2[15]);
-        __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero,step1[16]);
-        __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero,step1[17]);
-        __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero,step1[18]);
-        __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero,step1[19]);
-        __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero,step2[20]);
-        __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero,step2[21]);
-        __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero,step2[22]);
-        __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero,step2[23]);
-        __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero,step2[24]);
-        __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero,step2[25]);
-        __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero,step2[26]);
-        __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero,step2[27]);
-        __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero,step1[28]);
-        __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero,step1[29]);
-        __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero,step1[30]);
-        __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero,step1[31]);
-
-        step2[ 0] = _mm256_sub_epi16(step2[ 0], s3_00_0);
-        step2[ 1] = _mm256_sub_epi16(step2[ 1], s3_01_0);
-        step2[ 2] = _mm256_sub_epi16(step2[ 2], s3_02_0);
-        step2[ 3] = _mm256_sub_epi16(step2[ 3], s3_03_0);
-        step2[ 4] = _mm256_sub_epi16(step2[ 4], s3_04_0);
-        step2[ 5] = _mm256_sub_epi16(step2[ 5], s3_05_0);
-        step2[ 6] = _mm256_sub_epi16(step2[ 6], s3_06_0);
-        step2[ 7] = _mm256_sub_epi16(step2[ 7], s3_07_0);
-        step2[ 8] = _mm256_sub_epi16(step2[ 8], s2_08_0);
-        step2[ 9] = _mm256_sub_epi16(step2[ 9], s2_09_0);
-        step2[10] = _mm256_sub_epi16(step2[10], s3_10_0);
-        step2[11] = _mm256_sub_epi16(step2[11], s3_11_0);
-        step2[12] = _mm256_sub_epi16(step2[12], s3_12_0);
-        step2[13] = _mm256_sub_epi16(step2[13], s3_13_0);
-        step2[14] = _mm256_sub_epi16(step2[14], s2_14_0);
-        step2[15] = _mm256_sub_epi16(step2[15], s2_15_0);
-        step1[16] = _mm256_sub_epi16(step1[16], s3_16_0);
-        step1[17] = _mm256_sub_epi16(step1[17], s3_17_0);
-        step1[18] = _mm256_sub_epi16(step1[18], s3_18_0);
-        step1[19] = _mm256_sub_epi16(step1[19], s3_19_0);
-        step2[20] = _mm256_sub_epi16(step2[20], s3_20_0);
-        step2[21] = _mm256_sub_epi16(step2[21], s3_21_0);
-        step2[22] = _mm256_sub_epi16(step2[22], s3_22_0);
-        step2[23] = _mm256_sub_epi16(step2[23], s3_23_0);
-        step2[24] = _mm256_sub_epi16(step2[24], s3_24_0);
-        step2[25] = _mm256_sub_epi16(step2[25], s3_25_0);
-        step2[26] = _mm256_sub_epi16(step2[26], s3_26_0);
-        step2[27] = _mm256_sub_epi16(step2[27], s3_27_0);
-        step1[28] = _mm256_sub_epi16(step1[28], s3_28_0);
-        step1[29] = _mm256_sub_epi16(step1[29], s3_29_0);
-        step1[30] = _mm256_sub_epi16(step1[30], s3_30_0);
-        step1[31] = _mm256_sub_epi16(step1[31], s3_31_0);
-
-        step2[ 0] = _mm256_add_epi16(step2[ 0], kOne);
-        step2[ 1] = _mm256_add_epi16(step2[ 1], kOne);
-        step2[ 2] = _mm256_add_epi16(step2[ 2], kOne);
-        step2[ 3] = _mm256_add_epi16(step2[ 3], kOne);
-        step2[ 4] = _mm256_add_epi16(step2[ 4], kOne);
-        step2[ 5] = _mm256_add_epi16(step2[ 5], kOne);
-        step2[ 6] = _mm256_add_epi16(step2[ 6], kOne);
-        step2[ 7] = _mm256_add_epi16(step2[ 7], kOne);
-        step2[ 8] = _mm256_add_epi16(step2[ 8], kOne);
-        step2[ 9] = _mm256_add_epi16(step2[ 9], kOne);
-        step2[10] = _mm256_add_epi16(step2[10], kOne);
-        step2[11] = _mm256_add_epi16(step2[11], kOne);
-        step2[12] = _mm256_add_epi16(step2[12], kOne);
-        step2[13] = _mm256_add_epi16(step2[13], kOne);
-        step2[14] = _mm256_add_epi16(step2[14], kOne);
-        step2[15] = _mm256_add_epi16(step2[15], kOne);
-        step1[16] = _mm256_add_epi16(step1[16], kOne);
-        step1[17] = _mm256_add_epi16(step1[17], kOne);
-        step1[18] = _mm256_add_epi16(step1[18], kOne);
-        step1[19] = _mm256_add_epi16(step1[19], kOne);
-        step2[20] = _mm256_add_epi16(step2[20], kOne);
-        step2[21] = _mm256_add_epi16(step2[21], kOne);
-        step2[22] = _mm256_add_epi16(step2[22], kOne);
-        step2[23] = _mm256_add_epi16(step2[23], kOne);
-        step2[24] = _mm256_add_epi16(step2[24], kOne);
-        step2[25] = _mm256_add_epi16(step2[25], kOne);
-        step2[26] = _mm256_add_epi16(step2[26], kOne);
-        step2[27] = _mm256_add_epi16(step2[27], kOne);
-        step1[28] = _mm256_add_epi16(step1[28], kOne);
-        step1[29] = _mm256_add_epi16(step1[29], kOne);
-        step1[30] = _mm256_add_epi16(step1[30], kOne);
-        step1[31] = _mm256_add_epi16(step1[31], kOne);
-
-        step2[ 0] = _mm256_srai_epi16(step2[ 0], 2);
-        step2[ 1] = _mm256_srai_epi16(step2[ 1], 2);
-        step2[ 2] = _mm256_srai_epi16(step2[ 2], 2);
-        step2[ 3] = _mm256_srai_epi16(step2[ 3], 2);
-        step2[ 4] = _mm256_srai_epi16(step2[ 4], 2);
-        step2[ 5] = _mm256_srai_epi16(step2[ 5], 2);
-        step2[ 6] = _mm256_srai_epi16(step2[ 6], 2);
-        step2[ 7] = _mm256_srai_epi16(step2[ 7], 2);
-        step2[ 8] = _mm256_srai_epi16(step2[ 8], 2);
-        step2[ 9] = _mm256_srai_epi16(step2[ 9], 2);
-        step2[10] = _mm256_srai_epi16(step2[10], 2);
-        step2[11] = _mm256_srai_epi16(step2[11], 2);
-        step2[12] = _mm256_srai_epi16(step2[12], 2);
-        step2[13] = _mm256_srai_epi16(step2[13], 2);
-        step2[14] = _mm256_srai_epi16(step2[14], 2);
-        step2[15] = _mm256_srai_epi16(step2[15], 2);
-        step1[16] = _mm256_srai_epi16(step1[16], 2);
-        step1[17] = _mm256_srai_epi16(step1[17], 2);
-        step1[18] = _mm256_srai_epi16(step1[18], 2);
-        step1[19] = _mm256_srai_epi16(step1[19], 2);
-        step2[20] = _mm256_srai_epi16(step2[20], 2);
-        step2[21] = _mm256_srai_epi16(step2[21], 2);
-        step2[22] = _mm256_srai_epi16(step2[22], 2);
-        step2[23] = _mm256_srai_epi16(step2[23], 2);
-        step2[24] = _mm256_srai_epi16(step2[24], 2);
-        step2[25] = _mm256_srai_epi16(step2[25], 2);
-        step2[26] = _mm256_srai_epi16(step2[26], 2);
-        step2[27] = _mm256_srai_epi16(step2[27], 2);
-        step1[28] = _mm256_srai_epi16(step1[28], 2);
-        step1[29] = _mm256_srai_epi16(step1[29], 2);
-        step1[30] = _mm256_srai_epi16(step1[30], 2);
-        step1[31] = _mm256_srai_epi16(step1[31], 2);
-      }
-#endif
-
-#if FDCT32x32_HIGH_PRECISION
-      if (pass == 0) {
-#endif
-      // Stage 3
-      {
-        step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]);
-        step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]);
-        step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]);
-        step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]);
-        step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]);
-        step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]);
-        step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]);
-        step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]);
-      }
-      {
-        const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
-        const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
-        const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
-        const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
-        const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
-        const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
-        const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
-        const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
-        const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
-        const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
-        const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
-        const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
-        const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
-        const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
-        const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
-        const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
-        const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
-        const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
-        const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
-        // Combine
-        step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7);
-        step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7);
-        step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7);
-        step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7);
-      }
-      {
-        step3[16] = _mm256_add_epi16(step2[23], step1[16]);
-        step3[17] = _mm256_add_epi16(step2[22], step1[17]);
-        step3[18] = _mm256_add_epi16(step2[21], step1[18]);
-        step3[19] = _mm256_add_epi16(step2[20], step1[19]);
-        step3[20] = _mm256_sub_epi16(step1[19], step2[20]);
-        step3[21] = _mm256_sub_epi16(step1[18], step2[21]);
-        step3[22] = _mm256_sub_epi16(step1[17], step2[22]);
-        step3[23] = _mm256_sub_epi16(step1[16], step2[23]);
-        step3[24] = _mm256_sub_epi16(step1[31], step2[24]);
-        step3[25] = _mm256_sub_epi16(step1[30], step2[25]);
-        step3[26] = _mm256_sub_epi16(step1[29], step2[26]);
-        step3[27] = _mm256_sub_epi16(step1[28], step2[27]);
-        step3[28] = _mm256_add_epi16(step2[27], step1[28]);
-        step3[29] = _mm256_add_epi16(step2[26], step1[29]);
-        step3[30] = _mm256_add_epi16(step2[25], step1[30]);
-        step3[31] = _mm256_add_epi16(step2[24], step1[31]);
-      }
-
-      // Stage 4
-      {
-        step1[ 0] = _mm256_add_epi16(step3[ 3], step3[ 0]);
-        step1[ 1] = _mm256_add_epi16(step3[ 2], step3[ 1]);
-        step1[ 2] = _mm256_sub_epi16(step3[ 1], step3[ 2]);
-        step1[ 3] = _mm256_sub_epi16(step3[ 0], step3[ 3]);
-        step1[ 8] = _mm256_add_epi16(step3[11], step2[ 8]);
-        step1[ 9] = _mm256_add_epi16(step3[10], step2[ 9]);
-        step1[10] = _mm256_sub_epi16(step2[ 9], step3[10]);
-        step1[11] = _mm256_sub_epi16(step2[ 8], step3[11]);
-        step1[12] = _mm256_sub_epi16(step2[15], step3[12]);
-        step1[13] = _mm256_sub_epi16(step2[14], step3[13]);
-        step1[14] = _mm256_add_epi16(step3[13], step2[14]);
-        step1[15] = _mm256_add_epi16(step3[12], step2[15]);
-      }
-      {
-        const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]);
-        const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]);
-        const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16);
-        const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16);
-        const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16);
-        const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m256i s1_05_4 = _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_05_5 = _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_06_4 = _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_06_5 = _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS);
-        const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS);
-        const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS);
-        const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS);
-        // Combine
-        step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7);
-        step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7);
-      }
-      {
-        const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]);
-        const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]);
-        const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]);
-        const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]);
-        const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]);
-        const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]);
-        const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]);
-        const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]);
-        const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24);
-        const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24);
-        const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24);
-        const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24);
-        const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08);
-        const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08);
-        const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08);
-        const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08);
-        const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24);
-        const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24);
-        const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24);
-        const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24);
-        const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08);
-        const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08);
-        const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08);
-        const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m256i s1_18_4 = _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_18_5 = _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_19_4 = _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_19_5 = _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_20_4 = _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_20_5 = _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_21_4 = _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_21_5 = _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_26_4 = _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_26_5 = _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_27_4 = _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_27_5 = _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_28_4 = _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_28_5 = _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_29_4 = _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
-        const __m256i s1_29_5 = _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
-        const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS);
-        const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS);
-        const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS);
-        const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS);
-        const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS);
-        const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS);
-        const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS);
-        const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS);
-        const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS);
-        const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS);
-        const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS);
-        const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS);
-        const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS);
-        const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS);
-        const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS);
-        const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS);
-        // Combine
-        step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7);
-        step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7);
-        step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7);
-        step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7);
-        step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7);
-        step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7);
-        step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7);
-        step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7);
-      }
-      // Stage 5
-      {
-        step2[4] = _mm256_add_epi16(step1[5], step3[4]);
-        step2[5] = _mm256_sub_epi16(step3[4], step1[5]);
-        step2[6] = _mm256_sub_epi16(step3[7], step1[6]);
-        step2[7] = _mm256_add_epi16(step1[6], step3[7]);
-      }
-      {
-        const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]);
-        const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]);
-        const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]);
-        const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]);
-        const __m256i out_00_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_p16);
-        const __m256i out_00_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_p16);
-        const __m256i out_16_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_m16);
-        const __m256i out_16_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_m16);
-        const __m256i out_08_2 = _mm256_madd_epi16(out_08_0, k__cospi_p24_p08);
-        const __m256i out_08_3 = _mm256_madd_epi16(out_08_1, k__cospi_p24_p08);
-        const __m256i out_24_2 = _mm256_madd_epi16(out_08_0, k__cospi_m08_p24);
-        const __m256i out_24_3 = _mm256_madd_epi16(out_08_1, k__cospi_m08_p24);
-        // dct_const_round_shift
-        const __m256i out_00_4 = _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_00_5 = _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_16_4 = _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_16_5 = _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_08_4 = _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_08_5 = _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_24_4 = _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_24_5 = _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS);
-        const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS);
-        const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS);
-        const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS);
-        const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS);
-        const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS);
-        const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS);
-        const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS);
-        // Combine
-        out[ 0] = _mm256_packs_epi32(out_00_6, out_00_7);
-        out[16] = _mm256_packs_epi32(out_16_6, out_16_7);
-        out[ 8] = _mm256_packs_epi32(out_08_6, out_08_7);
-        out[24] = _mm256_packs_epi32(out_24_6, out_24_7);
-      }
-      {
-        const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[ 9], step1[14]);
-        const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[ 9], step1[14]);
-        const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]);
-        const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]);
-        const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24);
-        const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24);
-        const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08);
-        const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08);
-        const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24);
-        const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24);
-        const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08);
-        const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m256i s2_09_4 = _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_09_5 = _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_10_4 = _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_10_5 = _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_13_4 = _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_13_5 = _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_14_4 = _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
-        const __m256i s2_14_5 = _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
-        const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS);
-        const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS);
-        const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS);
-        const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS);
-        const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS);
-        const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS);
-        const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS);
-        const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS);
-        // Combine
-        step2[ 9] = _mm256_packs_epi32(s2_09_6, s2_09_7);
-        step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7);
-        step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7);
-        step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7);
-      }
-      {
-        step2[16] = _mm256_add_epi16(step1[19], step3[16]);
-        step2[17] = _mm256_add_epi16(step1[18], step3[17]);
-        step2[18] = _mm256_sub_epi16(step3[17], step1[18]);
-        step2[19] = _mm256_sub_epi16(step3[16], step1[19]);
-        step2[20] = _mm256_sub_epi16(step3[23], step1[20]);
-        step2[21] = _mm256_sub_epi16(step3[22], step1[21]);
-        step2[22] = _mm256_add_epi16(step1[21], step3[22]);
-        step2[23] = _mm256_add_epi16(step1[20], step3[23]);
-        step2[24] = _mm256_add_epi16(step1[27], step3[24]);
-        step2[25] = _mm256_add_epi16(step1[26], step3[25]);
-        step2[26] = _mm256_sub_epi16(step3[25], step1[26]);
-        step2[27] = _mm256_sub_epi16(step3[24], step1[27]);
-        step2[28] = _mm256_sub_epi16(step3[31], step1[28]);
-        step2[29] = _mm256_sub_epi16(step3[30], step1[29]);
-        step2[30] = _mm256_add_epi16(step1[29], step3[30]);
-        step2[31] = _mm256_add_epi16(step1[28], step3[31]);
-      }
-      // Stage 6
-      {
-        const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
-        const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
-        const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
-        const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
-        const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
-        const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
-        const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
-        const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
-        const __m256i out_04_2 = _mm256_madd_epi16(out_04_0, k__cospi_p28_p04);
-        const __m256i out_04_3 = _mm256_madd_epi16(out_04_1, k__cospi_p28_p04);
-        const __m256i out_20_2 = _mm256_madd_epi16(out_20_0, k__cospi_p12_p20);
-        const __m256i out_20_3 = _mm256_madd_epi16(out_20_1, k__cospi_p12_p20);
-        const __m256i out_12_2 = _mm256_madd_epi16(out_12_0, k__cospi_m20_p12);
-        const __m256i out_12_3 = _mm256_madd_epi16(out_12_1, k__cospi_m20_p12);
-        const __m256i out_28_2 = _mm256_madd_epi16(out_28_0, k__cospi_m04_p28);
-        const __m256i out_28_3 = _mm256_madd_epi16(out_28_1, k__cospi_m04_p28);
-        // dct_const_round_shift
-        const __m256i out_04_4 = _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_04_5 = _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_20_4 = _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_20_5 = _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_12_4 = _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_12_5 = _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_28_4 = _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_28_5 = _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS);
-        const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS);
-        const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS);
-        const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS);
-        const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS);
-        const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS);
-        const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS);
-        const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS);
-        // Combine
-        out[ 4] = _mm256_packs_epi32(out_04_6, out_04_7);
-        out[20] = _mm256_packs_epi32(out_20_6, out_20_7);
-        out[12] = _mm256_packs_epi32(out_12_6, out_12_7);
-        out[28] = _mm256_packs_epi32(out_28_6, out_28_7);
-      }
-      {
-        step3[ 8] = _mm256_add_epi16(step2[ 9], step1[ 8]);
-        step3[ 9] = _mm256_sub_epi16(step1[ 8], step2[ 9]);
-        step3[10] = _mm256_sub_epi16(step1[11], step2[10]);
-        step3[11] = _mm256_add_epi16(step2[10], step1[11]);
-        step3[12] = _mm256_add_epi16(step2[13], step1[12]);
-        step3[13] = _mm256_sub_epi16(step1[12], step2[13]);
-        step3[14] = _mm256_sub_epi16(step1[15], step2[14]);
-        step3[15] = _mm256_add_epi16(step2[14], step1[15]);
-      }
-      {
-        const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]);
-        const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]);
-        const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]);
-        const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]);
-        const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]);
-        const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]);
-        const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]);
-        const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]);
-        const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28);
-        const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28);
-        const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04);
-        const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04);
-        const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12);
-        const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12);
-        const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20);
-        const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20);
-        const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12);
-        const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12);
-        const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20);
-        const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20);
-        const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28);
-        const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28);
-        const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04);
-        const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04);
-        // dct_const_round_shift
-        const __m256i s3_17_4 = _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_17_5 = _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_18_4 = _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_18_5 = _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_21_4 = _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_21_5 = _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_22_4 = _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_22_5 = _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS);
-        const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS);
-        const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS);
-        const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS);
-        const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS);
-        const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS);
-        const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS);
-        const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS);
-        const __m256i s3_25_4 = _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_25_5 = _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_26_4 = _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_26_5 = _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_29_4 = _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_29_5 = _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_30_4 = _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
-        const __m256i s3_30_5 = _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
-        const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS);
-        const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS);
-        const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS);
-        const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS);
-        const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS);
-        const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS);
-        const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS);
-        const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS);
-        // Combine
-        step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7);
-        step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7);
-        step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7);
-        step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7);
-        // Combine
-        step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7);
-        step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7);
-        step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7);
-        step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7);
-      }
-      // Stage 7
-      {
-        const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[ 8], step3[15]);
-        const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[ 8], step3[15]);
-        const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[ 9], step3[14]);
-        const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[ 9], step3[14]);
-        const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]);
-        const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]);
-        const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]);
-        const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]);
-        const __m256i out_02_2 = _mm256_madd_epi16(out_02_0, k__cospi_p30_p02);
-        const __m256i out_02_3 = _mm256_madd_epi16(out_02_1, k__cospi_p30_p02);
-        const __m256i out_18_2 = _mm256_madd_epi16(out_18_0, k__cospi_p14_p18);
-        const __m256i out_18_3 = _mm256_madd_epi16(out_18_1, k__cospi_p14_p18);
-        const __m256i out_10_2 = _mm256_madd_epi16(out_10_0, k__cospi_p22_p10);
-        const __m256i out_10_3 = _mm256_madd_epi16(out_10_1, k__cospi_p22_p10);
-        const __m256i out_26_2 = _mm256_madd_epi16(out_26_0, k__cospi_p06_p26);
-        const __m256i out_26_3 = _mm256_madd_epi16(out_26_1, k__cospi_p06_p26);
-        const __m256i out_06_2 = _mm256_madd_epi16(out_26_0, k__cospi_m26_p06);
-        const __m256i out_06_3 = _mm256_madd_epi16(out_26_1, k__cospi_m26_p06);
-        const __m256i out_22_2 = _mm256_madd_epi16(out_10_0, k__cospi_m10_p22);
-        const __m256i out_22_3 = _mm256_madd_epi16(out_10_1, k__cospi_m10_p22);
-        const __m256i out_14_2 = _mm256_madd_epi16(out_18_0, k__cospi_m18_p14);
-        const __m256i out_14_3 = _mm256_madd_epi16(out_18_1, k__cospi_m18_p14);
-        const __m256i out_30_2 = _mm256_madd_epi16(out_02_0, k__cospi_m02_p30);
-        const __m256i out_30_3 = _mm256_madd_epi16(out_02_1, k__cospi_m02_p30);
-        // dct_const_round_shift
-        const __m256i out_02_4 = _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_02_5 = _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_18_4 = _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_18_5 = _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_10_4 = _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_10_5 = _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_26_4 = _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_26_5 = _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_06_4 = _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_06_5 = _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_22_4 = _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_22_5 = _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_14_4 = _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_14_5 = _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_30_4 = _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_30_5 = _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS);
-        const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS);
-        const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS);
-        const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS);
-        const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS);
-        const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS);
-        const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS);
-        const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS);
-        const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS);
-        const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS);
-        const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS);
-        const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS);
-        const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS);
-        const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS);
-        const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS);
-        const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS);
-        // Combine
-        out[ 2] = _mm256_packs_epi32(out_02_6, out_02_7);
-        out[18] = _mm256_packs_epi32(out_18_6, out_18_7);
-        out[10] = _mm256_packs_epi32(out_10_6, out_10_7);
-        out[26] = _mm256_packs_epi32(out_26_6, out_26_7);
-        out[ 6] = _mm256_packs_epi32(out_06_6, out_06_7);
-        out[22] = _mm256_packs_epi32(out_22_6, out_22_7);
-        out[14] = _mm256_packs_epi32(out_14_6, out_14_7);
-        out[30] = _mm256_packs_epi32(out_30_6, out_30_7);
-      }
-      {
-        step1[16] = _mm256_add_epi16(step3[17], step2[16]);
-        step1[17] = _mm256_sub_epi16(step2[16], step3[17]);
-        step1[18] = _mm256_sub_epi16(step2[19], step3[18]);
-        step1[19] = _mm256_add_epi16(step3[18], step2[19]);
-        step1[20] = _mm256_add_epi16(step3[21], step2[20]);
-        step1[21] = _mm256_sub_epi16(step2[20], step3[21]);
-        step1[22] = _mm256_sub_epi16(step2[23], step3[22]);
-        step1[23] = _mm256_add_epi16(step3[22], step2[23]);
-        step1[24] = _mm256_add_epi16(step3[25], step2[24]);
-        step1[25] = _mm256_sub_epi16(step2[24], step3[25]);
-        step1[26] = _mm256_sub_epi16(step2[27], step3[26]);
-        step1[27] = _mm256_add_epi16(step3[26], step2[27]);
-        step1[28] = _mm256_add_epi16(step3[29], step2[28]);
-        step1[29] = _mm256_sub_epi16(step2[28], step3[29]);
-        step1[30] = _mm256_sub_epi16(step2[31], step3[30]);
-        step1[31] = _mm256_add_epi16(step3[30], step2[31]);
-      }
-      // Final stage --- outputs indices are bit-reversed.
-      {
-        const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]);
-        const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]);
-        const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]);
-        const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]);
-        const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]);
-        const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]);
-        const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]);
-        const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]);
-        const __m256i out_01_2 = _mm256_madd_epi16(out_01_0, k__cospi_p31_p01);
-        const __m256i out_01_3 = _mm256_madd_epi16(out_01_1, k__cospi_p31_p01);
-        const __m256i out_17_2 = _mm256_madd_epi16(out_17_0, k__cospi_p15_p17);
-        const __m256i out_17_3 = _mm256_madd_epi16(out_17_1, k__cospi_p15_p17);
-        const __m256i out_09_2 = _mm256_madd_epi16(out_09_0, k__cospi_p23_p09);
-        const __m256i out_09_3 = _mm256_madd_epi16(out_09_1, k__cospi_p23_p09);
-        const __m256i out_25_2 = _mm256_madd_epi16(out_25_0, k__cospi_p07_p25);
-        const __m256i out_25_3 = _mm256_madd_epi16(out_25_1, k__cospi_p07_p25);
-        const __m256i out_07_2 = _mm256_madd_epi16(out_25_0, k__cospi_m25_p07);
-        const __m256i out_07_3 = _mm256_madd_epi16(out_25_1, k__cospi_m25_p07);
-        const __m256i out_23_2 = _mm256_madd_epi16(out_09_0, k__cospi_m09_p23);
-        const __m256i out_23_3 = _mm256_madd_epi16(out_09_1, k__cospi_m09_p23);
-        const __m256i out_15_2 = _mm256_madd_epi16(out_17_0, k__cospi_m17_p15);
-        const __m256i out_15_3 = _mm256_madd_epi16(out_17_1, k__cospi_m17_p15);
-        const __m256i out_31_2 = _mm256_madd_epi16(out_01_0, k__cospi_m01_p31);
-        const __m256i out_31_3 = _mm256_madd_epi16(out_01_1, k__cospi_m01_p31);
-        // dct_const_round_shift
-        const __m256i out_01_4 = _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_01_5 = _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_17_4 = _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_17_5 = _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_09_4 = _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_09_5 = _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_25_4 = _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_25_5 = _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_07_4 = _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_07_5 = _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_23_4 = _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_23_5 = _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_15_4 = _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_15_5 = _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_31_4 = _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_31_5 = _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS);
-        const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS);
-        const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS);
-        const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS);
-        const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS);
-        const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS);
-        const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS);
-        const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS);
-        const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS);
-        const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS);
-        const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS);
-        const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS);
-        const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS);
-        const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS);
-        const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS);
-        const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS);
-        // Combine
-        out[ 1] = _mm256_packs_epi32(out_01_6, out_01_7);
-        out[17] = _mm256_packs_epi32(out_17_6, out_17_7);
-        out[ 9] = _mm256_packs_epi32(out_09_6, out_09_7);
-        out[25] = _mm256_packs_epi32(out_25_6, out_25_7);
-        out[ 7] = _mm256_packs_epi32(out_07_6, out_07_7);
-        out[23] = _mm256_packs_epi32(out_23_6, out_23_7);
-        out[15] = _mm256_packs_epi32(out_15_6, out_15_7);
-        out[31] = _mm256_packs_epi32(out_31_6, out_31_7);
-      }
-      {
-        const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]);
-        const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]);
-        const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]);
-        const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]);
-        const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]);
-        const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]);
-        const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]);
-        const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]);
-        const __m256i out_05_2 = _mm256_madd_epi16(out_05_0, k__cospi_p27_p05);
-        const __m256i out_05_3 = _mm256_madd_epi16(out_05_1, k__cospi_p27_p05);
-        const __m256i out_21_2 = _mm256_madd_epi16(out_21_0, k__cospi_p11_p21);
-        const __m256i out_21_3 = _mm256_madd_epi16(out_21_1, k__cospi_p11_p21);
-        const __m256i out_13_2 = _mm256_madd_epi16(out_13_0, k__cospi_p19_p13);
-        const __m256i out_13_3 = _mm256_madd_epi16(out_13_1, k__cospi_p19_p13);
-        const __m256i out_29_2 = _mm256_madd_epi16(out_29_0, k__cospi_p03_p29);
-        const __m256i out_29_3 = _mm256_madd_epi16(out_29_1, k__cospi_p03_p29);
-        const __m256i out_03_2 = _mm256_madd_epi16(out_29_0, k__cospi_m29_p03);
-        const __m256i out_03_3 = _mm256_madd_epi16(out_29_1, k__cospi_m29_p03);
-        const __m256i out_19_2 = _mm256_madd_epi16(out_13_0, k__cospi_m13_p19);
-        const __m256i out_19_3 = _mm256_madd_epi16(out_13_1, k__cospi_m13_p19);
-        const __m256i out_11_2 = _mm256_madd_epi16(out_21_0, k__cospi_m21_p11);
-        const __m256i out_11_3 = _mm256_madd_epi16(out_21_1, k__cospi_m21_p11);
-        const __m256i out_27_2 = _mm256_madd_epi16(out_05_0, k__cospi_m05_p27);
-        const __m256i out_27_3 = _mm256_madd_epi16(out_05_1, k__cospi_m05_p27);
-        // dct_const_round_shift
-        const __m256i out_05_4 = _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_05_5 = _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_21_4 = _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_21_5 = _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_13_4 = _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_13_5 = _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_29_4 = _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_29_5 = _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_03_4 = _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_03_5 = _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_19_4 = _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_19_5 = _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_11_4 = _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_11_5 = _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_27_4 = _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
-        const __m256i out_27_5 = _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
-        const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS);
-        const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS);
-        const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS);
-        const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS);
-        const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS);
-        const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS);
-        const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS);
-        const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS);
-        const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS);
-        const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS);
-        const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS);
-        const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS);
-        const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS);
-        const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS);
-        const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS);
-        const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS);
-        // Combine
-        out[ 5] = _mm256_packs_epi32(out_05_6, out_05_7);
-        out[21] = _mm256_packs_epi32(out_21_6, out_21_7);
-        out[13] = _mm256_packs_epi32(out_13_6, out_13_7);
-        out[29] = _mm256_packs_epi32(out_29_6, out_29_7);
-        out[ 3] = _mm256_packs_epi32(out_03_6, out_03_7);
-        out[19] = _mm256_packs_epi32(out_19_6, out_19_7);
-        out[11] = _mm256_packs_epi32(out_11_6, out_11_7);
-        out[27] = _mm256_packs_epi32(out_27_6, out_27_7);
-      }
-#if FDCT32x32_HIGH_PRECISION
-      } else {
-        __m256i lstep1[64], lstep2[64], lstep3[64];
-        __m256i u[32], v[32], sign[16];
-        const __m256i K32One = _mm256_set_epi32(1, 1, 1, 1, 1, 1, 1, 1);
-        // start using 32-bit operations
-        // stage 3
-        {
-          // expanding to 32-bit length priori to addition operations
-          lstep2[ 0] = _mm256_unpacklo_epi16(step2[ 0], kZero);
-          lstep2[ 1] = _mm256_unpackhi_epi16(step2[ 0], kZero);
-          lstep2[ 2] = _mm256_unpacklo_epi16(step2[ 1], kZero);
-          lstep2[ 3] = _mm256_unpackhi_epi16(step2[ 1], kZero);
-          lstep2[ 4] = _mm256_unpacklo_epi16(step2[ 2], kZero);
-          lstep2[ 5] = _mm256_unpackhi_epi16(step2[ 2], kZero);
-          lstep2[ 6] = _mm256_unpacklo_epi16(step2[ 3], kZero);
-          lstep2[ 7] = _mm256_unpackhi_epi16(step2[ 3], kZero);
-          lstep2[ 8] = _mm256_unpacklo_epi16(step2[ 4], kZero);
-          lstep2[ 9] = _mm256_unpackhi_epi16(step2[ 4], kZero);
-          lstep2[10] = _mm256_unpacklo_epi16(step2[ 5], kZero);
-          lstep2[11] = _mm256_unpackhi_epi16(step2[ 5], kZero);
-          lstep2[12] = _mm256_unpacklo_epi16(step2[ 6], kZero);
-          lstep2[13] = _mm256_unpackhi_epi16(step2[ 6], kZero);
-          lstep2[14] = _mm256_unpacklo_epi16(step2[ 7], kZero);
-          lstep2[15] = _mm256_unpackhi_epi16(step2[ 7], kZero);
-          lstep2[ 0] = _mm256_madd_epi16(lstep2[ 0], kOne);
-          lstep2[ 1] = _mm256_madd_epi16(lstep2[ 1], kOne);
-          lstep2[ 2] = _mm256_madd_epi16(lstep2[ 2], kOne);
-          lstep2[ 3] = _mm256_madd_epi16(lstep2[ 3], kOne);
-          lstep2[ 4] = _mm256_madd_epi16(lstep2[ 4], kOne);
-          lstep2[ 5] = _mm256_madd_epi16(lstep2[ 5], kOne);
-          lstep2[ 6] = _mm256_madd_epi16(lstep2[ 6], kOne);
-          lstep2[ 7] = _mm256_madd_epi16(lstep2[ 7], kOne);
-          lstep2[ 8] = _mm256_madd_epi16(lstep2[ 8], kOne);
-          lstep2[ 9] = _mm256_madd_epi16(lstep2[ 9], kOne);
-          lstep2[10] = _mm256_madd_epi16(lstep2[10], kOne);
-          lstep2[11] = _mm256_madd_epi16(lstep2[11], kOne);
-          lstep2[12] = _mm256_madd_epi16(lstep2[12], kOne);
-          lstep2[13] = _mm256_madd_epi16(lstep2[13], kOne);
-          lstep2[14] = _mm256_madd_epi16(lstep2[14], kOne);
-          lstep2[15] = _mm256_madd_epi16(lstep2[15], kOne);
-
-          lstep3[ 0] = _mm256_add_epi32(lstep2[14], lstep2[ 0]);
-          lstep3[ 1] = _mm256_add_epi32(lstep2[15], lstep2[ 1]);
-          lstep3[ 2] = _mm256_add_epi32(lstep2[12], lstep2[ 2]);
-          lstep3[ 3] = _mm256_add_epi32(lstep2[13], lstep2[ 3]);
-          lstep3[ 4] = _mm256_add_epi32(lstep2[10], lstep2[ 4]);
-          lstep3[ 5] = _mm256_add_epi32(lstep2[11], lstep2[ 5]);
-          lstep3[ 6] = _mm256_add_epi32(lstep2[ 8], lstep2[ 6]);
-          lstep3[ 7] = _mm256_add_epi32(lstep2[ 9], lstep2[ 7]);
-          lstep3[ 8] = _mm256_sub_epi32(lstep2[ 6], lstep2[ 8]);
-          lstep3[ 9] = _mm256_sub_epi32(lstep2[ 7], lstep2[ 9]);
-          lstep3[10] = _mm256_sub_epi32(lstep2[ 4], lstep2[10]);
-          lstep3[11] = _mm256_sub_epi32(lstep2[ 5], lstep2[11]);
-          lstep3[12] = _mm256_sub_epi32(lstep2[ 2], lstep2[12]);
-          lstep3[13] = _mm256_sub_epi32(lstep2[ 3], lstep2[13]);
-          lstep3[14] = _mm256_sub_epi32(lstep2[ 0], lstep2[14]);
-          lstep3[15] = _mm256_sub_epi32(lstep2[ 1], lstep2[15]);
-        }
-        {
-          const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
-          const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
-          const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
-          const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
-          const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
-          const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
-          const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
-          const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
-          const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
-          const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
-          const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
-          const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
-          // dct_const_round_shift
-          const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-          const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-          const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-          const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-          const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
-          lstep3[20] = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
-          lstep3[21] = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
-          lstep3[22] = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
-          lstep3[23] = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
-          lstep3[24] = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
-          lstep3[25] = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
-          lstep3[26] = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
-          lstep3[27] = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
-        }
-        {
-          lstep2[40] = _mm256_unpacklo_epi16(step2[20], kZero);
-          lstep2[41] = _mm256_unpackhi_epi16(step2[20], kZero);
-          lstep2[42] = _mm256_unpacklo_epi16(step2[21], kZero);
-          lstep2[43] = _mm256_unpackhi_epi16(step2[21], kZero);
-          lstep2[44] = _mm256_unpacklo_epi16(step2[22], kZero);
-          lstep2[45] = _mm256_unpackhi_epi16(step2[22], kZero);
-          lstep2[46] = _mm256_unpacklo_epi16(step2[23], kZero);
-          lstep2[47] = _mm256_unpackhi_epi16(step2[23], kZero);
-          lstep2[48] = _mm256_unpacklo_epi16(step2[24], kZero);
-          lstep2[49] = _mm256_unpackhi_epi16(step2[24], kZero);
-          lstep2[50] = _mm256_unpacklo_epi16(step2[25], kZero);
-          lstep2[51] = _mm256_unpackhi_epi16(step2[25], kZero);
-          lstep2[52] = _mm256_unpacklo_epi16(step2[26], kZero);
-          lstep2[53] = _mm256_unpackhi_epi16(step2[26], kZero);
-          lstep2[54] = _mm256_unpacklo_epi16(step2[27], kZero);
-          lstep2[55] = _mm256_unpackhi_epi16(step2[27], kZero);
-          lstep2[40] = _mm256_madd_epi16(lstep2[40], kOne);
-          lstep2[41] = _mm256_madd_epi16(lstep2[41], kOne);
-          lstep2[42] = _mm256_madd_epi16(lstep2[42], kOne);
-          lstep2[43] = _mm256_madd_epi16(lstep2[43], kOne);
-          lstep2[44] = _mm256_madd_epi16(lstep2[44], kOne);
-          lstep2[45] = _mm256_madd_epi16(lstep2[45], kOne);
-          lstep2[46] = _mm256_madd_epi16(lstep2[46], kOne);
-          lstep2[47] = _mm256_madd_epi16(lstep2[47], kOne);
-          lstep2[48] = _mm256_madd_epi16(lstep2[48], kOne);
-          lstep2[49] = _mm256_madd_epi16(lstep2[49], kOne);
-          lstep2[50] = _mm256_madd_epi16(lstep2[50], kOne);
-          lstep2[51] = _mm256_madd_epi16(lstep2[51], kOne);
-          lstep2[52] = _mm256_madd_epi16(lstep2[52], kOne);
-          lstep2[53] = _mm256_madd_epi16(lstep2[53], kOne);
-          lstep2[54] = _mm256_madd_epi16(lstep2[54], kOne);
-          lstep2[55] = _mm256_madd_epi16(lstep2[55], kOne);
-
-          lstep1[32] = _mm256_unpacklo_epi16(step1[16], kZero);
-          lstep1[33] = _mm256_unpackhi_epi16(step1[16], kZero);
-          lstep1[34] = _mm256_unpacklo_epi16(step1[17], kZero);
-          lstep1[35] = _mm256_unpackhi_epi16(step1[17], kZero);
-          lstep1[36] = _mm256_unpacklo_epi16(step1[18], kZero);
-          lstep1[37] = _mm256_unpackhi_epi16(step1[18], kZero);
-          lstep1[38] = _mm256_unpacklo_epi16(step1[19], kZero);
-          lstep1[39] = _mm256_unpackhi_epi16(step1[19], kZero);
-          lstep1[56] = _mm256_unpacklo_epi16(step1[28], kZero);
-          lstep1[57] = _mm256_unpackhi_epi16(step1[28], kZero);
-          lstep1[58] = _mm256_unpacklo_epi16(step1[29], kZero);
-          lstep1[59] = _mm256_unpackhi_epi16(step1[29], kZero);
-          lstep1[60] = _mm256_unpacklo_epi16(step1[30], kZero);
-          lstep1[61] = _mm256_unpackhi_epi16(step1[30], kZero);
-          lstep1[62] = _mm256_unpacklo_epi16(step1[31], kZero);
-          lstep1[63] = _mm256_unpackhi_epi16(step1[31], kZero);
-          lstep1[32] = _mm256_madd_epi16(lstep1[32], kOne);
-          lstep1[33] = _mm256_madd_epi16(lstep1[33], kOne);
-          lstep1[34] = _mm256_madd_epi16(lstep1[34], kOne);
-          lstep1[35] = _mm256_madd_epi16(lstep1[35], kOne);
-          lstep1[36] = _mm256_madd_epi16(lstep1[36], kOne);
-          lstep1[37] = _mm256_madd_epi16(lstep1[37], kOne);
-          lstep1[38] = _mm256_madd_epi16(lstep1[38], kOne);
-          lstep1[39] = _mm256_madd_epi16(lstep1[39], kOne);
-          lstep1[56] = _mm256_madd_epi16(lstep1[56], kOne);
-          lstep1[57] = _mm256_madd_epi16(lstep1[57], kOne);
-          lstep1[58] = _mm256_madd_epi16(lstep1[58], kOne);
-          lstep1[59] = _mm256_madd_epi16(lstep1[59], kOne);
-          lstep1[60] = _mm256_madd_epi16(lstep1[60], kOne);
-          lstep1[61] = _mm256_madd_epi16(lstep1[61], kOne);
-          lstep1[62] = _mm256_madd_epi16(lstep1[62], kOne);
-          lstep1[63] = _mm256_madd_epi16(lstep1[63], kOne);
-
-          lstep3[32] = _mm256_add_epi32(lstep2[46], lstep1[32]);
-          lstep3[33] = _mm256_add_epi32(lstep2[47], lstep1[33]);
-
-          lstep3[34] = _mm256_add_epi32(lstep2[44], lstep1[34]);
-          lstep3[35] = _mm256_add_epi32(lstep2[45], lstep1[35]);
-          lstep3[36] = _mm256_add_epi32(lstep2[42], lstep1[36]);
-          lstep3[37] = _mm256_add_epi32(lstep2[43], lstep1[37]);
-          lstep3[38] = _mm256_add_epi32(lstep2[40], lstep1[38]);
-          lstep3[39] = _mm256_add_epi32(lstep2[41], lstep1[39]);
-          lstep3[40] = _mm256_sub_epi32(lstep1[38], lstep2[40]);
-          lstep3[41] = _mm256_sub_epi32(lstep1[39], lstep2[41]);
-          lstep3[42] = _mm256_sub_epi32(lstep1[36], lstep2[42]);
-          lstep3[43] = _mm256_sub_epi32(lstep1[37], lstep2[43]);
-          lstep3[44] = _mm256_sub_epi32(lstep1[34], lstep2[44]);
-          lstep3[45] = _mm256_sub_epi32(lstep1[35], lstep2[45]);
-          lstep3[46] = _mm256_sub_epi32(lstep1[32], lstep2[46]);
-          lstep3[47] = _mm256_sub_epi32(lstep1[33], lstep2[47]);
-          lstep3[48] = _mm256_sub_epi32(lstep1[62], lstep2[48]);
-          lstep3[49] = _mm256_sub_epi32(lstep1[63], lstep2[49]);
-          lstep3[50] = _mm256_sub_epi32(lstep1[60], lstep2[50]);
-          lstep3[51] = _mm256_sub_epi32(lstep1[61], lstep2[51]);
-          lstep3[52] = _mm256_sub_epi32(lstep1[58], lstep2[52]);
-          lstep3[53] = _mm256_sub_epi32(lstep1[59], lstep2[53]);
-          lstep3[54] = _mm256_sub_epi32(lstep1[56], lstep2[54]);
-          lstep3[55] = _mm256_sub_epi32(lstep1[57], lstep2[55]);
-          lstep3[56] = _mm256_add_epi32(lstep2[54], lstep1[56]);
-          lstep3[57] = _mm256_add_epi32(lstep2[55], lstep1[57]);
-          lstep3[58] = _mm256_add_epi32(lstep2[52], lstep1[58]);
-          lstep3[59] = _mm256_add_epi32(lstep2[53], lstep1[59]);
-          lstep3[60] = _mm256_add_epi32(lstep2[50], lstep1[60]);
-          lstep3[61] = _mm256_add_epi32(lstep2[51], lstep1[61]);
-          lstep3[62] = _mm256_add_epi32(lstep2[48], lstep1[62]);
-          lstep3[63] = _mm256_add_epi32(lstep2[49], lstep1[63]);
-        }
-
-        // stage 4
-        {
-          // expanding to 32-bit length priori to addition operations
-          lstep2[16] = _mm256_unpacklo_epi16(step2[ 8], kZero);
-          lstep2[17] = _mm256_unpackhi_epi16(step2[ 8], kZero);
-          lstep2[18] = _mm256_unpacklo_epi16(step2[ 9], kZero);
-          lstep2[19] = _mm256_unpackhi_epi16(step2[ 9], kZero);
-          lstep2[28] = _mm256_unpacklo_epi16(step2[14], kZero);
-          lstep2[29] = _mm256_unpackhi_epi16(step2[14], kZero);
-          lstep2[30] = _mm256_unpacklo_epi16(step2[15], kZero);
-          lstep2[31] = _mm256_unpackhi_epi16(step2[15], kZero);
-          lstep2[16] = _mm256_madd_epi16(lstep2[16], kOne);
-          lstep2[17] = _mm256_madd_epi16(lstep2[17], kOne);
-          lstep2[18] = _mm256_madd_epi16(lstep2[18], kOne);
-          lstep2[19] = _mm256_madd_epi16(lstep2[19], kOne);
-          lstep2[28] = _mm256_madd_epi16(lstep2[28], kOne);
-          lstep2[29] = _mm256_madd_epi16(lstep2[29], kOne);
-          lstep2[30] = _mm256_madd_epi16(lstep2[30], kOne);
-          lstep2[31] = _mm256_madd_epi16(lstep2[31], kOne);
-
-          lstep1[ 0] = _mm256_add_epi32(lstep3[ 6], lstep3[ 0]);
-          lstep1[ 1] = _mm256_add_epi32(lstep3[ 7], lstep3[ 1]);
-          lstep1[ 2] = _mm256_add_epi32(lstep3[ 4], lstep3[ 2]);
-          lstep1[ 3] = _mm256_add_epi32(lstep3[ 5], lstep3[ 3]);
-          lstep1[ 4] = _mm256_sub_epi32(lstep3[ 2], lstep3[ 4]);
-          lstep1[ 5] = _mm256_sub_epi32(lstep3[ 3], lstep3[ 5]);
-          lstep1[ 6] = _mm256_sub_epi32(lstep3[ 0], lstep3[ 6]);
-          lstep1[ 7] = _mm256_sub_epi32(lstep3[ 1], lstep3[ 7]);
-          lstep1[16] = _mm256_add_epi32(lstep3[22], lstep2[16]);
-          lstep1[17] = _mm256_add_epi32(lstep3[23], lstep2[17]);
-          lstep1[18] = _mm256_add_epi32(lstep3[20], lstep2[18]);
-          lstep1[19] = _mm256_add_epi32(lstep3[21], lstep2[19]);
-          lstep1[20] = _mm256_sub_epi32(lstep2[18], lstep3[20]);
-          lstep1[21] = _mm256_sub_epi32(lstep2[19], lstep3[21]);
-          lstep1[22] = _mm256_sub_epi32(lstep2[16], lstep3[22]);
-          lstep1[23] = _mm256_sub_epi32(lstep2[17], lstep3[23]);
-          lstep1[24] = _mm256_sub_epi32(lstep2[30], lstep3[24]);
-          lstep1[25] = _mm256_sub_epi32(lstep2[31], lstep3[25]);
-          lstep1[26] = _mm256_sub_epi32(lstep2[28], lstep3[26]);
-          lstep1[27] = _mm256_sub_epi32(lstep2[29], lstep3[27]);
-          lstep1[28] = _mm256_add_epi32(lstep3[26], lstep2[28]);
-          lstep1[29] = _mm256_add_epi32(lstep3[27], lstep2[29]);
-          lstep1[30] = _mm256_add_epi32(lstep3[24], lstep2[30]);
-          lstep1[31] = _mm256_add_epi32(lstep3[25], lstep2[31]);
-        }
-        {
-        // to be continued...
-        //
-        const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64);
-        const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64);
-
-        u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]);
-        u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]);
-        u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]);
-        u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]);
-
-        // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
-        // instruction latency.
-        v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_m16);
-        v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_m16);
-        v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_m16);
-        v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_m16);
-        v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_p16);
-        v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_p16);
-        v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_p16);
-        v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_p16);
-
-        u[0] = k_packs_epi64_avx2(v[0], v[1]);
-        u[1] = k_packs_epi64_avx2(v[2], v[3]);
-        u[2] = k_packs_epi64_avx2(v[4], v[5]);
-        u[3] = k_packs_epi64_avx2(v[6], v[7]);
-
-        v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-        v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-        v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-        v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-
-        lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
-        lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
-        lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
-        lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
-        }
-        {
-          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
-          const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64);
-          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]);
-          u[10] = _mm256_unpacklo_epi32(lstep3[41], lstep3[55]);
-          u[11] = _mm256_unpackhi_epi32(lstep3[41], lstep3[55]);
-          u[12] = _mm256_unpacklo_epi32(lstep3[42], lstep3[52]);
-          u[13] = _mm256_unpackhi_epi32(lstep3[42], lstep3[52]);
-          u[14] = _mm256_unpacklo_epi32(lstep3[43], lstep3[53]);
-          u[15] = _mm256_unpackhi_epi32(lstep3[43], lstep3[53]);
-
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m08_p24);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m08_p24);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m08_p24);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m08_p24);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m08_p24);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m08_p24);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m08_p24);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m08_p24);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m24_m08);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m24_m08);
-          v[10] = k_madd_epi32_avx2(u[10], k32_m24_m08);
-          v[11] = k_madd_epi32_avx2(u[11], k32_m24_m08);
-          v[12] = k_madd_epi32_avx2(u[12], k32_m24_m08);
-          v[13] = k_madd_epi32_avx2(u[13], k32_m24_m08);
-          v[14] = k_madd_epi32_avx2(u[14], k32_m24_m08);
-          v[15] = k_madd_epi32_avx2(u[15], k32_m24_m08);
-          v[16] = k_madd_epi32_avx2(u[12], k32_m08_p24);
-          v[17] = k_madd_epi32_avx2(u[13], k32_m08_p24);
-          v[18] = k_madd_epi32_avx2(u[14], k32_m08_p24);
-          v[19] = k_madd_epi32_avx2(u[15], k32_m08_p24);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m08_p24);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m08_p24);
-          v[22] = k_madd_epi32_avx2(u[10], k32_m08_p24);
-          v[23] = k_madd_epi32_avx2(u[11], k32_m08_p24);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_p24_p08);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_p24_p08);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_p24_p08);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_p24_p08);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_p24_p08);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_p24_p08);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_p24_p08);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_p24_p08);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
-          u[10] = k_packs_epi64_avx2(v[20], v[21]);
-          u[11] = k_packs_epi64_avx2(v[22], v[23]);
-          u[12] = k_packs_epi64_avx2(v[24], v[25]);
-          u[13] = k_packs_epi64_avx2(v[26], v[27]);
-          u[14] = k_packs_epi64_avx2(v[28], v[29]);
-          u[15] = k_packs_epi64_avx2(v[30], v[31]);
-
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          lstep1[36] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep1[37] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep1[38] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep1[39] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep1[40] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep1[41] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep1[42] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep1[43] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep1[52] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep1[53] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
-          lstep1[54] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
-          lstep1[55] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
-          lstep1[56] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
-          lstep1[57] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
-          lstep1[58] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
-          lstep1[59] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
-        }
-        // stage 5
-        {
-          lstep2[ 8] = _mm256_add_epi32(lstep1[10], lstep3[ 8]);
-          lstep2[ 9] = _mm256_add_epi32(lstep1[11], lstep3[ 9]);
-          lstep2[10] = _mm256_sub_epi32(lstep3[ 8], lstep1[10]);
-          lstep2[11] = _mm256_sub_epi32(lstep3[ 9], lstep1[11]);
-          lstep2[12] = _mm256_sub_epi32(lstep3[14], lstep1[12]);
-          lstep2[13] = _mm256_sub_epi32(lstep3[15], lstep1[13]);
-          lstep2[14] = _mm256_add_epi32(lstep1[12], lstep3[14]);
-          lstep2[15] = _mm256_add_epi32(lstep1[13], lstep3[15]);
-        }
-        {
-          const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64);
-          const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64);
-          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
-          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
-
-          u[0] = _mm256_unpacklo_epi32(lstep1[0], lstep1[2]);
-          u[1] = _mm256_unpackhi_epi32(lstep1[0], lstep1[2]);
-          u[2] = _mm256_unpacklo_epi32(lstep1[1], lstep1[3]);
-          u[3] = _mm256_unpackhi_epi32(lstep1[1], lstep1[3]);
-          u[4] = _mm256_unpacklo_epi32(lstep1[4], lstep1[6]);
-          u[5] = _mm256_unpackhi_epi32(lstep1[4], lstep1[6]);
-          u[6] = _mm256_unpacklo_epi32(lstep1[5], lstep1[7]);
-          u[7] = _mm256_unpackhi_epi32(lstep1[5], lstep1[7]);
-
-          // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
-          // instruction latency.
-          v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_p16);
-          v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_p16);
-          v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_p16);
-          v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_p16);
-          v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_m16);
-          v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_m16);
-          v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_m16);
-          v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_m16);
-          v[ 8] = k_madd_epi32_avx2(u[4], k32_p24_p08);
-          v[ 9] = k_madd_epi32_avx2(u[5], k32_p24_p08);
-          v[10] = k_madd_epi32_avx2(u[6], k32_p24_p08);
-          v[11] = k_madd_epi32_avx2(u[7], k32_p24_p08);
-          v[12] = k_madd_epi32_avx2(u[4], k32_m08_p24);
-          v[13] = k_madd_epi32_avx2(u[5], k32_m08_p24);
-          v[14] = k_madd_epi32_avx2(u[6], k32_m08_p24);
-          v[15] = k_madd_epi32_avx2(u[7], k32_m08_p24);
-
-          u[0] = k_packs_epi64_avx2(v[0], v[1]);
-          u[1] = k_packs_epi64_avx2(v[2], v[3]);
-          u[2] = k_packs_epi64_avx2(v[4], v[5]);
-          u[3] = k_packs_epi64_avx2(v[6], v[7]);
-          u[4] = k_packs_epi64_avx2(v[8], v[9]);
-          u[5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[7] = k_packs_epi64_avx2(v[14], v[15]);
-
-          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
-          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
-          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
-          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
-
-          u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
-          u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
-          u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
-          u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
-          u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
-          u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
-          u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
-          u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
-
-          sign[0] = _mm256_cmpgt_epi32(kZero,u[0]);
-          sign[1] = _mm256_cmpgt_epi32(kZero,u[1]);
-          sign[2] = _mm256_cmpgt_epi32(kZero,u[2]);
-          sign[3] = _mm256_cmpgt_epi32(kZero,u[3]);
-          sign[4] = _mm256_cmpgt_epi32(kZero,u[4]);
-          sign[5] = _mm256_cmpgt_epi32(kZero,u[5]);
-          sign[6] = _mm256_cmpgt_epi32(kZero,u[6]);
-          sign[7] = _mm256_cmpgt_epi32(kZero,u[7]);
-
-          u[0] = _mm256_sub_epi32(u[0], sign[0]);
-          u[1] = _mm256_sub_epi32(u[1], sign[1]);
-          u[2] = _mm256_sub_epi32(u[2], sign[2]);
-          u[3] = _mm256_sub_epi32(u[3], sign[3]);
-          u[4] = _mm256_sub_epi32(u[4], sign[4]);
-          u[5] = _mm256_sub_epi32(u[5], sign[5]);
-          u[6] = _mm256_sub_epi32(u[6], sign[6]);
-          u[7] = _mm256_sub_epi32(u[7], sign[7]);
-
-          u[0] = _mm256_add_epi32(u[0], K32One);
-          u[1] = _mm256_add_epi32(u[1], K32One);
-          u[2] = _mm256_add_epi32(u[2], K32One);
-          u[3] = _mm256_add_epi32(u[3], K32One);
-          u[4] = _mm256_add_epi32(u[4], K32One);
-          u[5] = _mm256_add_epi32(u[5], K32One);
-          u[6] = _mm256_add_epi32(u[6], K32One);
-          u[7] = _mm256_add_epi32(u[7], K32One);
-
-          u[0] = _mm256_srai_epi32(u[0], 2);
-          u[1] = _mm256_srai_epi32(u[1], 2);
-          u[2] = _mm256_srai_epi32(u[2], 2);
-          u[3] = _mm256_srai_epi32(u[3], 2);
-          u[4] = _mm256_srai_epi32(u[4], 2);
-          u[5] = _mm256_srai_epi32(u[5], 2);
-          u[6] = _mm256_srai_epi32(u[6], 2);
-          u[7] = _mm256_srai_epi32(u[7], 2);
-
-          // Combine
-          out[ 0] = _mm256_packs_epi32(u[0], u[1]);
-          out[16] = _mm256_packs_epi32(u[2], u[3]);
-          out[ 8] = _mm256_packs_epi32(u[4], u[5]);
-          out[24] = _mm256_packs_epi32(u[6], u[7]);
-        }
-        {
-          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
-          const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64);
-          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
-
-          u[0] = _mm256_unpacklo_epi32(lstep1[18], lstep1[28]);
-          u[1] = _mm256_unpackhi_epi32(lstep1[18], lstep1[28]);
-          u[2] = _mm256_unpacklo_epi32(lstep1[19], lstep1[29]);
-          u[3] = _mm256_unpackhi_epi32(lstep1[19], lstep1[29]);
-          u[4] = _mm256_unpacklo_epi32(lstep1[20], lstep1[26]);
-          u[5] = _mm256_unpackhi_epi32(lstep1[20], lstep1[26]);
-          u[6] = _mm256_unpacklo_epi32(lstep1[21], lstep1[27]);
-          u[7] = _mm256_unpackhi_epi32(lstep1[21], lstep1[27]);
-
-          v[0] = k_madd_epi32_avx2(u[0], k32_m08_p24);
-          v[1] = k_madd_epi32_avx2(u[1], k32_m08_p24);
-          v[2] = k_madd_epi32_avx2(u[2], k32_m08_p24);
-          v[3] = k_madd_epi32_avx2(u[3], k32_m08_p24);
-          v[4] = k_madd_epi32_avx2(u[4], k32_m24_m08);
-          v[5] = k_madd_epi32_avx2(u[5], k32_m24_m08);
-          v[6] = k_madd_epi32_avx2(u[6], k32_m24_m08);
-          v[7] = k_madd_epi32_avx2(u[7], k32_m24_m08);
-          v[ 8] = k_madd_epi32_avx2(u[4], k32_m08_p24);
-          v[ 9] = k_madd_epi32_avx2(u[5], k32_m08_p24);
-          v[10] = k_madd_epi32_avx2(u[6], k32_m08_p24);
-          v[11] = k_madd_epi32_avx2(u[7], k32_m08_p24);
-          v[12] = k_madd_epi32_avx2(u[0], k32_p24_p08);
-          v[13] = k_madd_epi32_avx2(u[1], k32_p24_p08);
-          v[14] = k_madd_epi32_avx2(u[2], k32_p24_p08);
-          v[15] = k_madd_epi32_avx2(u[3], k32_p24_p08);
-
-          u[0] = k_packs_epi64_avx2(v[0], v[1]);
-          u[1] = k_packs_epi64_avx2(v[2], v[3]);
-          u[2] = k_packs_epi64_avx2(v[4], v[5]);
-          u[3] = k_packs_epi64_avx2(v[6], v[7]);
-          u[4] = k_packs_epi64_avx2(v[8], v[9]);
-          u[5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[7] = k_packs_epi64_avx2(v[14], v[15]);
-
-          u[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-          u[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-          u[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-          u[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-          u[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
-          u[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
-          u[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
-          u[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
-
-          lstep2[18] = _mm256_srai_epi32(u[0], DCT_CONST_BITS);
-          lstep2[19] = _mm256_srai_epi32(u[1], DCT_CONST_BITS);
-          lstep2[20] = _mm256_srai_epi32(u[2], DCT_CONST_BITS);
-          lstep2[21] = _mm256_srai_epi32(u[3], DCT_CONST_BITS);
-          lstep2[26] = _mm256_srai_epi32(u[4], DCT_CONST_BITS);
-          lstep2[27] = _mm256_srai_epi32(u[5], DCT_CONST_BITS);
-          lstep2[28] = _mm256_srai_epi32(u[6], DCT_CONST_BITS);
-          lstep2[29] = _mm256_srai_epi32(u[7], DCT_CONST_BITS);
-        }
-        {
-          lstep2[32] = _mm256_add_epi32(lstep1[38], lstep3[32]);
-          lstep2[33] = _mm256_add_epi32(lstep1[39], lstep3[33]);
-          lstep2[34] = _mm256_add_epi32(lstep1[36], lstep3[34]);
-          lstep2[35] = _mm256_add_epi32(lstep1[37], lstep3[35]);
-          lstep2[36] = _mm256_sub_epi32(lstep3[34], lstep1[36]);
-          lstep2[37] = _mm256_sub_epi32(lstep3[35], lstep1[37]);
-          lstep2[38] = _mm256_sub_epi32(lstep3[32], lstep1[38]);
-          lstep2[39] = _mm256_sub_epi32(lstep3[33], lstep1[39]);
-          lstep2[40] = _mm256_sub_epi32(lstep3[46], lstep1[40]);
-          lstep2[41] = _mm256_sub_epi32(lstep3[47], lstep1[41]);
-          lstep2[42] = _mm256_sub_epi32(lstep3[44], lstep1[42]);
-          lstep2[43] = _mm256_sub_epi32(lstep3[45], lstep1[43]);
-          lstep2[44] = _mm256_add_epi32(lstep1[42], lstep3[44]);
-          lstep2[45] = _mm256_add_epi32(lstep1[43], lstep3[45]);
-          lstep2[46] = _mm256_add_epi32(lstep1[40], lstep3[46]);
-          lstep2[47] = _mm256_add_epi32(lstep1[41], lstep3[47]);
-          lstep2[48] = _mm256_add_epi32(lstep1[54], lstep3[48]);
-          lstep2[49] = _mm256_add_epi32(lstep1[55], lstep3[49]);
-          lstep2[50] = _mm256_add_epi32(lstep1[52], lstep3[50]);
-          lstep2[51] = _mm256_add_epi32(lstep1[53], lstep3[51]);
-          lstep2[52] = _mm256_sub_epi32(lstep3[50], lstep1[52]);
-          lstep2[53] = _mm256_sub_epi32(lstep3[51], lstep1[53]);
-          lstep2[54] = _mm256_sub_epi32(lstep3[48], lstep1[54]);
-          lstep2[55] = _mm256_sub_epi32(lstep3[49], lstep1[55]);
-          lstep2[56] = _mm256_sub_epi32(lstep3[62], lstep1[56]);
-          lstep2[57] = _mm256_sub_epi32(lstep3[63], lstep1[57]);
-          lstep2[58] = _mm256_sub_epi32(lstep3[60], lstep1[58]);
-          lstep2[59] = _mm256_sub_epi32(lstep3[61], lstep1[59]);
-          lstep2[60] = _mm256_add_epi32(lstep1[58], lstep3[60]);
-          lstep2[61] = _mm256_add_epi32(lstep1[59], lstep3[61]);
-          lstep2[62] = _mm256_add_epi32(lstep1[56], lstep3[62]);
-          lstep2[63] = _mm256_add_epi32(lstep1[57], lstep3[63]);
-        }
-        // stage 6
-        {
-          const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64);
-          const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64);
-          const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64);
-
-          u[0] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[1] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[2] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[3] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]);
-          u[4] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]);
-          u[5] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
-          u[6] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
-          u[7] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]);
-          u[8] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]);
-          u[9] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
-          u[10] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
-          u[11] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]);
-          u[12] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[13] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[14] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[15] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]);
-
-          v[0] = k_madd_epi32_avx2(u[0], k32_p28_p04);
-          v[1] = k_madd_epi32_avx2(u[1], k32_p28_p04);
-          v[2] = k_madd_epi32_avx2(u[2], k32_p28_p04);
-          v[3] = k_madd_epi32_avx2(u[3], k32_p28_p04);
-          v[4] = k_madd_epi32_avx2(u[4], k32_p12_p20);
-          v[5] = k_madd_epi32_avx2(u[5], k32_p12_p20);
-          v[6] = k_madd_epi32_avx2(u[6], k32_p12_p20);
-          v[7] = k_madd_epi32_avx2(u[7], k32_p12_p20);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12);
-          v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
-          v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
-          v[12] = k_madd_epi32_avx2(u[12], k32_m04_p28);
-          v[13] = k_madd_epi32_avx2(u[13], k32_m04_p28);
-          v[14] = k_madd_epi32_avx2(u[14], k32_m04_p28);
-          v[15] = k_madd_epi32_avx2(u[15], k32_m04_p28);
-
-          u[0] = k_packs_epi64_avx2(v[0], v[1]);
-          u[1] = k_packs_epi64_avx2(v[2], v[3]);
-          u[2] = k_packs_epi64_avx2(v[4], v[5]);
-          u[3] = k_packs_epi64_avx2(v[6], v[7]);
-          u[4] = k_packs_epi64_avx2(v[8], v[9]);
-          u[5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[7] = k_packs_epi64_avx2(v[14], v[15]);
-
-          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
-          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
-          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
-          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
-
-          u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
-          u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
-          u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
-          u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
-          u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
-          u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
-          u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
-          u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
-
-          sign[0] = _mm256_cmpgt_epi32(kZero,u[0]);
-          sign[1] = _mm256_cmpgt_epi32(kZero,u[1]);
-          sign[2] = _mm256_cmpgt_epi32(kZero,u[2]);
-          sign[3] = _mm256_cmpgt_epi32(kZero,u[3]);
-          sign[4] = _mm256_cmpgt_epi32(kZero,u[4]);
-          sign[5] = _mm256_cmpgt_epi32(kZero,u[5]);
-          sign[6] = _mm256_cmpgt_epi32(kZero,u[6]);
-          sign[7] = _mm256_cmpgt_epi32(kZero,u[7]);
-
-          u[0] = _mm256_sub_epi32(u[0], sign[0]);
-          u[1] = _mm256_sub_epi32(u[1], sign[1]);
-          u[2] = _mm256_sub_epi32(u[2], sign[2]);
-          u[3] = _mm256_sub_epi32(u[3], sign[3]);
-          u[4] = _mm256_sub_epi32(u[4], sign[4]);
-          u[5] = _mm256_sub_epi32(u[5], sign[5]);
-          u[6] = _mm256_sub_epi32(u[6], sign[6]);
-          u[7] = _mm256_sub_epi32(u[7], sign[7]);
-
-          u[0] = _mm256_add_epi32(u[0], K32One);
-          u[1] = _mm256_add_epi32(u[1], K32One);
-          u[2] = _mm256_add_epi32(u[2], K32One);
-          u[3] = _mm256_add_epi32(u[3], K32One);
-          u[4] = _mm256_add_epi32(u[4], K32One);
-          u[5] = _mm256_add_epi32(u[5], K32One);
-          u[6] = _mm256_add_epi32(u[6], K32One);
-          u[7] = _mm256_add_epi32(u[7], K32One);
-
-          u[0] = _mm256_srai_epi32(u[0], 2);
-          u[1] = _mm256_srai_epi32(u[1], 2);
-          u[2] = _mm256_srai_epi32(u[2], 2);
-          u[3] = _mm256_srai_epi32(u[3], 2);
-          u[4] = _mm256_srai_epi32(u[4], 2);
-          u[5] = _mm256_srai_epi32(u[5], 2);
-          u[6] = _mm256_srai_epi32(u[6], 2);
-          u[7] = _mm256_srai_epi32(u[7], 2);
-
-          out[ 4] = _mm256_packs_epi32(u[0], u[1]);
-          out[20] = _mm256_packs_epi32(u[2], u[3]);
-          out[12] = _mm256_packs_epi32(u[4], u[5]);
-          out[28] = _mm256_packs_epi32(u[6], u[7]);
-        }
-        {
-          lstep3[16] = _mm256_add_epi32(lstep2[18], lstep1[16]);
-          lstep3[17] = _mm256_add_epi32(lstep2[19], lstep1[17]);
-          lstep3[18] = _mm256_sub_epi32(lstep1[16], lstep2[18]);
-          lstep3[19] = _mm256_sub_epi32(lstep1[17], lstep2[19]);
-          lstep3[20] = _mm256_sub_epi32(lstep1[22], lstep2[20]);
-          lstep3[21] = _mm256_sub_epi32(lstep1[23], lstep2[21]);
-          lstep3[22] = _mm256_add_epi32(lstep2[20], lstep1[22]);
-          lstep3[23] = _mm256_add_epi32(lstep2[21], lstep1[23]);
-          lstep3[24] = _mm256_add_epi32(lstep2[26], lstep1[24]);
-          lstep3[25] = _mm256_add_epi32(lstep2[27], lstep1[25]);
-          lstep3[26] = _mm256_sub_epi32(lstep1[24], lstep2[26]);
-          lstep3[27] = _mm256_sub_epi32(lstep1[25], lstep2[27]);
-          lstep3[28] = _mm256_sub_epi32(lstep1[30], lstep2[28]);
-          lstep3[29] = _mm256_sub_epi32(lstep1[31], lstep2[29]);
-          lstep3[30] = _mm256_add_epi32(lstep2[28], lstep1[30]);
-          lstep3[31] = _mm256_add_epi32(lstep2[29], lstep1[31]);
-        }
-        {
-          const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64);
-          const __m256i k32_m28_m04 = pair256_set_epi32(-cospi_28_64, -cospi_4_64);
-          const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m256i k32_m12_m20 = pair256_set_epi32(-cospi_12_64,
-                                                     -cospi_20_64);
-          const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64);
-          const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]);
-          u[10] = _mm256_unpacklo_epi32(lstep2[43], lstep2[53]);
-          u[11] = _mm256_unpackhi_epi32(lstep2[43], lstep2[53]);
-          u[12] = _mm256_unpacklo_epi32(lstep2[44], lstep2[50]);
-          u[13] = _mm256_unpackhi_epi32(lstep2[44], lstep2[50]);
-          u[14] = _mm256_unpacklo_epi32(lstep2[45], lstep2[51]);
-          u[15] = _mm256_unpackhi_epi32(lstep2[45], lstep2[51]);
-
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m04_p28);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m04_p28);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m04_p28);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m04_p28);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m28_m04);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m28_m04);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m28_m04);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m28_m04);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12);
-          v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
-          v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
-          v[12] = k_madd_epi32_avx2(u[12], k32_m12_m20);
-          v[13] = k_madd_epi32_avx2(u[13], k32_m12_m20);
-          v[14] = k_madd_epi32_avx2(u[14], k32_m12_m20);
-          v[15] = k_madd_epi32_avx2(u[15], k32_m12_m20);
-          v[16] = k_madd_epi32_avx2(u[12], k32_m20_p12);
-          v[17] = k_madd_epi32_avx2(u[13], k32_m20_p12);
-          v[18] = k_madd_epi32_avx2(u[14], k32_m20_p12);
-          v[19] = k_madd_epi32_avx2(u[15], k32_m20_p12);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_p12_p20);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_p12_p20);
-          v[22] = k_madd_epi32_avx2(u[10], k32_p12_p20);
-          v[23] = k_madd_epi32_avx2(u[11], k32_p12_p20);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m04_p28);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m04_p28);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m04_p28);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m04_p28);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_p28_p04);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_p28_p04);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_p28_p04);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_p28_p04);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
-          u[10] = k_packs_epi64_avx2(v[20], v[21]);
-          u[11] = k_packs_epi64_avx2(v[22], v[23]);
-          u[12] = k_packs_epi64_avx2(v[24], v[25]);
-          u[13] = k_packs_epi64_avx2(v[26], v[27]);
-          u[14] = k_packs_epi64_avx2(v[28], v[29]);
-          u[15] = k_packs_epi64_avx2(v[30], v[31]);
-
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          lstep3[34] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep3[35] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep3[36] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep3[37] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep3[42] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep3[43] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep3[44] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep3[45] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep3[50] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep3[51] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
-          lstep3[52] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
-          lstep3[53] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
-          lstep3[58] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
-          lstep3[59] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
-          lstep3[60] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
-          lstep3[61] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
-        }
-        // stage 7
-        {
-          const __m256i k32_p30_p02 = pair256_set_epi32(cospi_30_64, cospi_2_64);
-          const __m256i k32_p14_p18 = pair256_set_epi32(cospi_14_64, cospi_18_64);
-          const __m256i k32_p22_p10 = pair256_set_epi32(cospi_22_64, cospi_10_64);
-          const __m256i k32_p06_p26 = pair256_set_epi32(cospi_6_64,  cospi_26_64);
-          const __m256i k32_m26_p06 = pair256_set_epi32(-cospi_26_64, cospi_6_64);
-          const __m256i k32_m10_p22 = pair256_set_epi32(-cospi_10_64, cospi_22_64);
-          const __m256i k32_m18_p14 = pair256_set_epi32(-cospi_18_64, cospi_14_64);
-          const __m256i k32_m02_p30 = pair256_set_epi32(-cospi_2_64, cospi_30_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]);
-          u[10] = _mm256_unpacklo_epi32(lstep3[21], lstep3[27]);
-          u[11] = _mm256_unpackhi_epi32(lstep3[21], lstep3[27]);
-          u[12] = _mm256_unpacklo_epi32(lstep3[22], lstep3[24]);
-          u[13] = _mm256_unpackhi_epi32(lstep3[22], lstep3[24]);
-          u[14] = _mm256_unpacklo_epi32(lstep3[23], lstep3[25]);
-          u[15] = _mm256_unpackhi_epi32(lstep3[23], lstep3[25]);
-
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p30_p02);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p30_p02);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p30_p02);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p30_p02);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p14_p18);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p14_p18);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p14_p18);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p14_p18);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p22_p10);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p22_p10);
-          v[10] = k_madd_epi32_avx2(u[10], k32_p22_p10);
-          v[11] = k_madd_epi32_avx2(u[11], k32_p22_p10);
-          v[12] = k_madd_epi32_avx2(u[12], k32_p06_p26);
-          v[13] = k_madd_epi32_avx2(u[13], k32_p06_p26);
-          v[14] = k_madd_epi32_avx2(u[14], k32_p06_p26);
-          v[15] = k_madd_epi32_avx2(u[15], k32_p06_p26);
-          v[16] = k_madd_epi32_avx2(u[12], k32_m26_p06);
-          v[17] = k_madd_epi32_avx2(u[13], k32_m26_p06);
-          v[18] = k_madd_epi32_avx2(u[14], k32_m26_p06);
-          v[19] = k_madd_epi32_avx2(u[15], k32_m26_p06);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m10_p22);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m10_p22);
-          v[22] = k_madd_epi32_avx2(u[10], k32_m10_p22);
-          v[23] = k_madd_epi32_avx2(u[11], k32_m10_p22);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m18_p14);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m18_p14);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m18_p14);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m18_p14);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_m02_p30);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_m02_p30);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_m02_p30);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_m02_p30);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
-          u[10] = k_packs_epi64_avx2(v[20], v[21]);
-          u[11] = k_packs_epi64_avx2(v[22], v[23]);
-          u[12] = k_packs_epi64_avx2(v[24], v[25]);
-          u[13] = k_packs_epi64_avx2(v[26], v[27]);
-          u[14] = k_packs_epi64_avx2(v[28], v[29]);
-          u[15] = k_packs_epi64_avx2(v[30], v[31]);
-
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
-          u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
-          u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
-          u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
-          u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
-          u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
-          u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
-
-          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
-          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
-          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
-          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
-          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
-          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
-          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
-          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
-          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
-          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
-          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
-          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
-          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
-          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
-          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
-          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
-
-          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
-          u[10] = _mm256_sub_epi32(u[10], v[10]);
-          u[11] = _mm256_sub_epi32(u[11], v[11]);
-          u[12] = _mm256_sub_epi32(u[12], v[12]);
-          u[13] = _mm256_sub_epi32(u[13], v[13]);
-          u[14] = _mm256_sub_epi32(u[14], v[14]);
-          u[15] = _mm256_sub_epi32(u[15], v[15]);
-
-          v[ 0] = _mm256_add_epi32(u[ 0], K32One);
-          v[ 1] = _mm256_add_epi32(u[ 1], K32One);
-          v[ 2] = _mm256_add_epi32(u[ 2], K32One);
-          v[ 3] = _mm256_add_epi32(u[ 3], K32One);
-          v[ 4] = _mm256_add_epi32(u[ 4], K32One);
-          v[ 5] = _mm256_add_epi32(u[ 5], K32One);
-          v[ 6] = _mm256_add_epi32(u[ 6], K32One);
-          v[ 7] = _mm256_add_epi32(u[ 7], K32One);
-          v[ 8] = _mm256_add_epi32(u[ 8], K32One);
-          v[ 9] = _mm256_add_epi32(u[ 9], K32One);
-          v[10] = _mm256_add_epi32(u[10], K32One);
-          v[11] = _mm256_add_epi32(u[11], K32One);
-          v[12] = _mm256_add_epi32(u[12], K32One);
-          v[13] = _mm256_add_epi32(u[13], K32One);
-          v[14] = _mm256_add_epi32(u[14], K32One);
-          v[15] = _mm256_add_epi32(u[15], K32One);
-
-          u[ 0] = _mm256_srai_epi32(v[ 0], 2);
-          u[ 1] = _mm256_srai_epi32(v[ 1], 2);
-          u[ 2] = _mm256_srai_epi32(v[ 2], 2);
-          u[ 3] = _mm256_srai_epi32(v[ 3], 2);
-          u[ 4] = _mm256_srai_epi32(v[ 4], 2);
-          u[ 5] = _mm256_srai_epi32(v[ 5], 2);
-          u[ 6] = _mm256_srai_epi32(v[ 6], 2);
-          u[ 7] = _mm256_srai_epi32(v[ 7], 2);
-          u[ 8] = _mm256_srai_epi32(v[ 8], 2);
-          u[ 9] = _mm256_srai_epi32(v[ 9], 2);
-          u[10] = _mm256_srai_epi32(v[10], 2);
-          u[11] = _mm256_srai_epi32(v[11], 2);
-          u[12] = _mm256_srai_epi32(v[12], 2);
-          u[13] = _mm256_srai_epi32(v[13], 2);
-          u[14] = _mm256_srai_epi32(v[14], 2);
-          u[15] = _mm256_srai_epi32(v[15], 2);
-
-          out[ 2] = _mm256_packs_epi32(u[0], u[1]);
-          out[18] = _mm256_packs_epi32(u[2], u[3]);
-          out[10] = _mm256_packs_epi32(u[4], u[5]);
-          out[26] = _mm256_packs_epi32(u[6], u[7]);
-          out[ 6] = _mm256_packs_epi32(u[8], u[9]);
-          out[22] = _mm256_packs_epi32(u[10], u[11]);
-          out[14] = _mm256_packs_epi32(u[12], u[13]);
-          out[30] = _mm256_packs_epi32(u[14], u[15]);
-        }
-        {
-          lstep1[32] = _mm256_add_epi32(lstep3[34], lstep2[32]);
-          lstep1[33] = _mm256_add_epi32(lstep3[35], lstep2[33]);
-          lstep1[34] = _mm256_sub_epi32(lstep2[32], lstep3[34]);
-          lstep1[35] = _mm256_sub_epi32(lstep2[33], lstep3[35]);
-          lstep1[36] = _mm256_sub_epi32(lstep2[38], lstep3[36]);
-          lstep1[37] = _mm256_sub_epi32(lstep2[39], lstep3[37]);
-          lstep1[38] = _mm256_add_epi32(lstep3[36], lstep2[38]);
-          lstep1[39] = _mm256_add_epi32(lstep3[37], lstep2[39]);
-          lstep1[40] = _mm256_add_epi32(lstep3[42], lstep2[40]);
-          lstep1[41] = _mm256_add_epi32(lstep3[43], lstep2[41]);
-          lstep1[42] = _mm256_sub_epi32(lstep2[40], lstep3[42]);
-          lstep1[43] = _mm256_sub_epi32(lstep2[41], lstep3[43]);
-          lstep1[44] = _mm256_sub_epi32(lstep2[46], lstep3[44]);
-          lstep1[45] = _mm256_sub_epi32(lstep2[47], lstep3[45]);
-          lstep1[46] = _mm256_add_epi32(lstep3[44], lstep2[46]);
-          lstep1[47] = _mm256_add_epi32(lstep3[45], lstep2[47]);
-          lstep1[48] = _mm256_add_epi32(lstep3[50], lstep2[48]);
-          lstep1[49] = _mm256_add_epi32(lstep3[51], lstep2[49]);
-          lstep1[50] = _mm256_sub_epi32(lstep2[48], lstep3[50]);
-          lstep1[51] = _mm256_sub_epi32(lstep2[49], lstep3[51]);
-          lstep1[52] = _mm256_sub_epi32(lstep2[54], lstep3[52]);
-          lstep1[53] = _mm256_sub_epi32(lstep2[55], lstep3[53]);
-          lstep1[54] = _mm256_add_epi32(lstep3[52], lstep2[54]);
-          lstep1[55] = _mm256_add_epi32(lstep3[53], lstep2[55]);
-          lstep1[56] = _mm256_add_epi32(lstep3[58], lstep2[56]);
-          lstep1[57] = _mm256_add_epi32(lstep3[59], lstep2[57]);
-          lstep1[58] = _mm256_sub_epi32(lstep2[56], lstep3[58]);
-          lstep1[59] = _mm256_sub_epi32(lstep2[57], lstep3[59]);
-          lstep1[60] = _mm256_sub_epi32(lstep2[62], lstep3[60]);
-          lstep1[61] = _mm256_sub_epi32(lstep2[63], lstep3[61]);
-          lstep1[62] = _mm256_add_epi32(lstep3[60], lstep2[62]);
-          lstep1[63] = _mm256_add_epi32(lstep3[61], lstep2[63]);
-        }
-        // stage 8
-        {
-          const __m256i k32_p31_p01 = pair256_set_epi32(cospi_31_64, cospi_1_64);
-          const __m256i k32_p15_p17 = pair256_set_epi32(cospi_15_64, cospi_17_64);
-          const __m256i k32_p23_p09 = pair256_set_epi32(cospi_23_64, cospi_9_64);
-          const __m256i k32_p07_p25 = pair256_set_epi32(cospi_7_64, cospi_25_64);
-          const __m256i k32_m25_p07 = pair256_set_epi32(-cospi_25_64, cospi_7_64);
-          const __m256i k32_m09_p23 = pair256_set_epi32(-cospi_9_64, cospi_23_64);
-          const __m256i k32_m17_p15 = pair256_set_epi32(-cospi_17_64, cospi_15_64);
-          const __m256i k32_m01_p31 = pair256_set_epi32(-cospi_1_64, cospi_31_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]);
-          u[10] = _mm256_unpacklo_epi32(lstep1[37], lstep1[59]);
-          u[11] = _mm256_unpackhi_epi32(lstep1[37], lstep1[59]);
-          u[12] = _mm256_unpacklo_epi32(lstep1[38], lstep1[56]);
-          u[13] = _mm256_unpackhi_epi32(lstep1[38], lstep1[56]);
-          u[14] = _mm256_unpacklo_epi32(lstep1[39], lstep1[57]);
-          u[15] = _mm256_unpackhi_epi32(lstep1[39], lstep1[57]);
-
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p31_p01);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p31_p01);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p31_p01);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p31_p01);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p15_p17);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p15_p17);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p15_p17);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p15_p17);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p23_p09);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p23_p09);
-          v[10] = k_madd_epi32_avx2(u[10], k32_p23_p09);
-          v[11] = k_madd_epi32_avx2(u[11], k32_p23_p09);
-          v[12] = k_madd_epi32_avx2(u[12], k32_p07_p25);
-          v[13] = k_madd_epi32_avx2(u[13], k32_p07_p25);
-          v[14] = k_madd_epi32_avx2(u[14], k32_p07_p25);
-          v[15] = k_madd_epi32_avx2(u[15], k32_p07_p25);
-          v[16] = k_madd_epi32_avx2(u[12], k32_m25_p07);
-          v[17] = k_madd_epi32_avx2(u[13], k32_m25_p07);
-          v[18] = k_madd_epi32_avx2(u[14], k32_m25_p07);
-          v[19] = k_madd_epi32_avx2(u[15], k32_m25_p07);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m09_p23);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m09_p23);
-          v[22] = k_madd_epi32_avx2(u[10], k32_m09_p23);
-          v[23] = k_madd_epi32_avx2(u[11], k32_m09_p23);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m17_p15);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m17_p15);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m17_p15);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m17_p15);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_m01_p31);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_m01_p31);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_m01_p31);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_m01_p31);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
-          u[10] = k_packs_epi64_avx2(v[20], v[21]);
-          u[11] = k_packs_epi64_avx2(v[22], v[23]);
-          u[12] = k_packs_epi64_avx2(v[24], v[25]);
-          u[13] = k_packs_epi64_avx2(v[26], v[27]);
-          u[14] = k_packs_epi64_avx2(v[28], v[29]);
-          u[15] = k_packs_epi64_avx2(v[30], v[31]);
-
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
-          u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
-          u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
-          u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
-          u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
-          u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
-          u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
-
-          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
-          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
-          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
-          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
-          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
-          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
-          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
-          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
-          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
-          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
-          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
-          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
-          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
-          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
-          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
-          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
-
-          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
-          u[10] = _mm256_sub_epi32(u[10], v[10]);
-          u[11] = _mm256_sub_epi32(u[11], v[11]);
-          u[12] = _mm256_sub_epi32(u[12], v[12]);
-          u[13] = _mm256_sub_epi32(u[13], v[13]);
-          u[14] = _mm256_sub_epi32(u[14], v[14]);
-          u[15] = _mm256_sub_epi32(u[15], v[15]);
-
-          v[0] = _mm256_add_epi32(u[0], K32One);
-          v[1] = _mm256_add_epi32(u[1], K32One);
-          v[2] = _mm256_add_epi32(u[2], K32One);
-          v[3] = _mm256_add_epi32(u[3], K32One);
-          v[4] = _mm256_add_epi32(u[4], K32One);
-          v[5] = _mm256_add_epi32(u[5], K32One);
-          v[6] = _mm256_add_epi32(u[6], K32One);
-          v[7] = _mm256_add_epi32(u[7], K32One);
-          v[8] = _mm256_add_epi32(u[8], K32One);
-          v[9] = _mm256_add_epi32(u[9], K32One);
-          v[10] = _mm256_add_epi32(u[10], K32One);
-          v[11] = _mm256_add_epi32(u[11], K32One);
-          v[12] = _mm256_add_epi32(u[12], K32One);
-          v[13] = _mm256_add_epi32(u[13], K32One);
-          v[14] = _mm256_add_epi32(u[14], K32One);
-          v[15] = _mm256_add_epi32(u[15], K32One);
-
-          u[0] = _mm256_srai_epi32(v[0], 2);
-          u[1] = _mm256_srai_epi32(v[1], 2);
-          u[2] = _mm256_srai_epi32(v[2], 2);
-          u[3] = _mm256_srai_epi32(v[3], 2);
-          u[4] = _mm256_srai_epi32(v[4], 2);
-          u[5] = _mm256_srai_epi32(v[5], 2);
-          u[6] = _mm256_srai_epi32(v[6], 2);
-          u[7] = _mm256_srai_epi32(v[7], 2);
-          u[8] = _mm256_srai_epi32(v[8], 2);
-          u[9] = _mm256_srai_epi32(v[9], 2);
-          u[10] = _mm256_srai_epi32(v[10], 2);
-          u[11] = _mm256_srai_epi32(v[11], 2);
-          u[12] = _mm256_srai_epi32(v[12], 2);
-          u[13] = _mm256_srai_epi32(v[13], 2);
-          u[14] = _mm256_srai_epi32(v[14], 2);
-          u[15] = _mm256_srai_epi32(v[15], 2);
-
-          out[ 1] = _mm256_packs_epi32(u[0], u[1]);
-          out[17] = _mm256_packs_epi32(u[2], u[3]);
-          out[ 9] = _mm256_packs_epi32(u[4], u[5]);
-          out[25] = _mm256_packs_epi32(u[6], u[7]);
-          out[ 7] = _mm256_packs_epi32(u[8], u[9]);
-          out[23] = _mm256_packs_epi32(u[10], u[11]);
-          out[15] = _mm256_packs_epi32(u[12], u[13]);
-          out[31] = _mm256_packs_epi32(u[14], u[15]);
-        }
-        {
-          const __m256i k32_p27_p05 = pair256_set_epi32(cospi_27_64, cospi_5_64);
-          const __m256i k32_p11_p21 = pair256_set_epi32(cospi_11_64, cospi_21_64);
-          const __m256i k32_p19_p13 = pair256_set_epi32(cospi_19_64, cospi_13_64);
-          const __m256i k32_p03_p29 = pair256_set_epi32(cospi_3_64, cospi_29_64);
-          const __m256i k32_m29_p03 = pair256_set_epi32(-cospi_29_64, cospi_3_64);
-          const __m256i k32_m13_p19 = pair256_set_epi32(-cospi_13_64, cospi_19_64);
-          const __m256i k32_m21_p11 = pair256_set_epi32(-cospi_21_64, cospi_11_64);
-          const __m256i k32_m05_p27 = pair256_set_epi32(-cospi_5_64, cospi_27_64);
-
-          u[ 0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]);
-          u[ 1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]);
-          u[ 2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]);
-          u[ 3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]);
-          u[ 4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]);
-          u[ 5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]);
-          u[ 6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]);
-          u[ 7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]);
-          u[ 8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]);
-          u[ 9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]);
-          u[10] = _mm256_unpacklo_epi32(lstep1[45], lstep1[51]);
-          u[11] = _mm256_unpackhi_epi32(lstep1[45], lstep1[51]);
-          u[12] = _mm256_unpacklo_epi32(lstep1[46], lstep1[48]);
-          u[13] = _mm256_unpackhi_epi32(lstep1[46], lstep1[48]);
-          u[14] = _mm256_unpacklo_epi32(lstep1[47], lstep1[49]);
-          u[15] = _mm256_unpackhi_epi32(lstep1[47], lstep1[49]);
-
-          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p27_p05);
-          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p27_p05);
-          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p27_p05);
-          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p27_p05);
-          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p11_p21);
-          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p11_p21);
-          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p11_p21);
-          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p11_p21);
-          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p19_p13);
-          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p19_p13);
-          v[10] = k_madd_epi32_avx2(u[10], k32_p19_p13);
-          v[11] = k_madd_epi32_avx2(u[11], k32_p19_p13);
-          v[12] = k_madd_epi32_avx2(u[12], k32_p03_p29);
-          v[13] = k_madd_epi32_avx2(u[13], k32_p03_p29);
-          v[14] = k_madd_epi32_avx2(u[14], k32_p03_p29);
-          v[15] = k_madd_epi32_avx2(u[15], k32_p03_p29);
-          v[16] = k_madd_epi32_avx2(u[12], k32_m29_p03);
-          v[17] = k_madd_epi32_avx2(u[13], k32_m29_p03);
-          v[18] = k_madd_epi32_avx2(u[14], k32_m29_p03);
-          v[19] = k_madd_epi32_avx2(u[15], k32_m29_p03);
-          v[20] = k_madd_epi32_avx2(u[ 8], k32_m13_p19);
-          v[21] = k_madd_epi32_avx2(u[ 9], k32_m13_p19);
-          v[22] = k_madd_epi32_avx2(u[10], k32_m13_p19);
-          v[23] = k_madd_epi32_avx2(u[11], k32_m13_p19);
-          v[24] = k_madd_epi32_avx2(u[ 4], k32_m21_p11);
-          v[25] = k_madd_epi32_avx2(u[ 5], k32_m21_p11);
-          v[26] = k_madd_epi32_avx2(u[ 6], k32_m21_p11);
-          v[27] = k_madd_epi32_avx2(u[ 7], k32_m21_p11);
-          v[28] = k_madd_epi32_avx2(u[ 0], k32_m05_p27);
-          v[29] = k_madd_epi32_avx2(u[ 1], k32_m05_p27);
-          v[30] = k_madd_epi32_avx2(u[ 2], k32_m05_p27);
-          v[31] = k_madd_epi32_avx2(u[ 3], k32_m05_p27);
-
-          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
-          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
-          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
-          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
-          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
-          u[10] = k_packs_epi64_avx2(v[20], v[21]);
-          u[11] = k_packs_epi64_avx2(v[22], v[23]);
-          u[12] = k_packs_epi64_avx2(v[24], v[25]);
-          u[13] = k_packs_epi64_avx2(v[26], v[27]);
-          u[14] = k_packs_epi64_avx2(v[28], v[29]);
-          u[15] = k_packs_epi64_avx2(v[30], v[31]);
-
-          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
-          u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
-          u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
-          u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
-          u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
-          u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
-          u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
-
-          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
-          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
-          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
-          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
-          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
-          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
-          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
-          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
-          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
-          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
-          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
-          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
-          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
-          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
-          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
-          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
-
-          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
-          u[10] = _mm256_sub_epi32(u[10], v[10]);
-          u[11] = _mm256_sub_epi32(u[11], v[11]);
-          u[12] = _mm256_sub_epi32(u[12], v[12]);
-          u[13] = _mm256_sub_epi32(u[13], v[13]);
-          u[14] = _mm256_sub_epi32(u[14], v[14]);
-          u[15] = _mm256_sub_epi32(u[15], v[15]);
-
-          v[0] = _mm256_add_epi32(u[0], K32One);
-          v[1] = _mm256_add_epi32(u[1], K32One);
-          v[2] = _mm256_add_epi32(u[2], K32One);
-          v[3] = _mm256_add_epi32(u[3], K32One);
-          v[4] = _mm256_add_epi32(u[4], K32One);
-          v[5] = _mm256_add_epi32(u[5], K32One);
-          v[6] = _mm256_add_epi32(u[6], K32One);
-          v[7] = _mm256_add_epi32(u[7], K32One);
-          v[8] = _mm256_add_epi32(u[8], K32One);
-          v[9] = _mm256_add_epi32(u[9], K32One);
-          v[10] = _mm256_add_epi32(u[10], K32One);
-          v[11] = _mm256_add_epi32(u[11], K32One);
-          v[12] = _mm256_add_epi32(u[12], K32One);
-          v[13] = _mm256_add_epi32(u[13], K32One);
-          v[14] = _mm256_add_epi32(u[14], K32One);
-          v[15] = _mm256_add_epi32(u[15], K32One);
-
-          u[0] = _mm256_srai_epi32(v[0], 2);
-          u[1] = _mm256_srai_epi32(v[1], 2);
-          u[2] = _mm256_srai_epi32(v[2], 2);
-          u[3] = _mm256_srai_epi32(v[3], 2);
-          u[4] = _mm256_srai_epi32(v[4], 2);
-          u[5] = _mm256_srai_epi32(v[5], 2);
-          u[6] = _mm256_srai_epi32(v[6], 2);
-          u[7] = _mm256_srai_epi32(v[7], 2);
-          u[8] = _mm256_srai_epi32(v[8], 2);
-          u[9] = _mm256_srai_epi32(v[9], 2);
-          u[10] = _mm256_srai_epi32(v[10], 2);
-          u[11] = _mm256_srai_epi32(v[11], 2);
-          u[12] = _mm256_srai_epi32(v[12], 2);
-          u[13] = _mm256_srai_epi32(v[13], 2);
-          u[14] = _mm256_srai_epi32(v[14], 2);
-          u[15] = _mm256_srai_epi32(v[15], 2);
-
-          out[ 5] = _mm256_packs_epi32(u[0], u[1]);
-          out[21] = _mm256_packs_epi32(u[2], u[3]);
-          out[13] = _mm256_packs_epi32(u[4], u[5]);
-          out[29] = _mm256_packs_epi32(u[6], u[7]);
-          out[ 3] = _mm256_packs_epi32(u[8], u[9]);
-          out[19] = _mm256_packs_epi32(u[10], u[11]);
-          out[11] = _mm256_packs_epi32(u[12], u[13]);
-          out[27] = _mm256_packs_epi32(u[14], u[15]);
-        }
-      }
-#endif
-      // Transpose the results, do it as four 8x8 transposes.
-      {
-        int transpose_block;
-        int16_t *output_currStep,*output_nextStep;
-        if (0 == pass){
-                 output_currStep = &intermediate[column_start * 32];
-                 output_nextStep = &intermediate[(column_start + 8) * 32];
-        } else{
-                 output_currStep = &output_org[column_start * 32];
-                 output_nextStep = &output_org[(column_start + 8) * 32];
-        }
-        for (transpose_block = 0; transpose_block < 4; ++transpose_block) {
-          __m256i *this_out = &out[8 * transpose_block];
-          // 00  01  02  03  04  05  06  07  08  09  10  11  12  13  14  15
-          // 20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35
-          // 40  41  42  43  44  45  46  47  48  49  50  51  52  53  54  55
-          // 60  61  62  63  64  65  66  67  68  69  70  71  72  73  74  75
-          // 80  81  82  83  84  85  86  87  88  89  90  91  92  93  94  95
-          // 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
-          // 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
-          // 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
-          const __m256i tr0_0 = _mm256_unpacklo_epi16(this_out[0], this_out[1]);
-          const __m256i tr0_1 = _mm256_unpacklo_epi16(this_out[2], this_out[3]);
-          const __m256i tr0_2 = _mm256_unpackhi_epi16(this_out[0], this_out[1]);
-          const __m256i tr0_3 = _mm256_unpackhi_epi16(this_out[2], this_out[3]);
-          const __m256i tr0_4 = _mm256_unpacklo_epi16(this_out[4], this_out[5]);
-          const __m256i tr0_5 = _mm256_unpacklo_epi16(this_out[6], this_out[7]);
-          const __m256i tr0_6 = _mm256_unpackhi_epi16(this_out[4], this_out[5]);
-          const __m256i tr0_7 = _mm256_unpackhi_epi16(this_out[6], this_out[7]);
-          // 00  20  01  21  02  22  03  23  08  28  09  29  10  30  11  31
-          // 40  60  41  61  42  62  43  63  48  68  49  69  50  70  51  71
-          // 04  24  05  25  06  26  07  27  12  32  13  33  14  34  15  35
-          // 44  64  45  65  46  66  47  67  52  72  53  73  54  74  55  75
-          // 80  100 81  101 82  102 83  103 88  108 89  109 90  110 91  101
-          // 120 140 121 141 122 142 123 143 128 148 129 149 130 150 131 151
-          // 84  104 85  105 86  106 87  107 92  112 93  113 94  114 95  115
-          // 124 144 125 145 126 146 127 147 132 152 133 153 134 154 135 155
-
-          const __m256i tr1_0 = _mm256_unpacklo_epi32(tr0_0, tr0_1);
-          const __m256i tr1_1 = _mm256_unpacklo_epi32(tr0_2, tr0_3);
-          const __m256i tr1_2 = _mm256_unpackhi_epi32(tr0_0, tr0_1);
-          const __m256i tr1_3 = _mm256_unpackhi_epi32(tr0_2, tr0_3);
-          const __m256i tr1_4 = _mm256_unpacklo_epi32(tr0_4, tr0_5);
-          const __m256i tr1_5 = _mm256_unpacklo_epi32(tr0_6, tr0_7);
-          const __m256i tr1_6 = _mm256_unpackhi_epi32(tr0_4, tr0_5);
-          const __m256i tr1_7 = _mm256_unpackhi_epi32(tr0_6, tr0_7);
-          // 00 20  40  60  01 21  41  61  08 28  48  68  09 29  49  69
-          // 04 24  44  64  05 25  45  65  12 32  52  72  13 33  53  73
-          // 02 22  42  62  03 23  43  63  10 30  50  70  11 31  51  71
-          // 06 26  46  66  07 27  47  67  14 34  54  74  15 35  55  75
-          // 80 100 120 140 81 101 121 141 88 108 128 148 89 109 129 149
-          // 84 104 124 144 85 105 125 145 92 112 132 152 93 113 133 153
-          // 82 102 122 142 83 103 123 143 90 110 130 150 91 101 131 151
-          // 86 106 126 146 87 107 127 147 94 114 134 154 95 115 135 155
-          __m256i tr2_0 = _mm256_unpacklo_epi64(tr1_0, tr1_4);
-          __m256i tr2_1 = _mm256_unpackhi_epi64(tr1_0, tr1_4);
-          __m256i tr2_2 = _mm256_unpacklo_epi64(tr1_2, tr1_6);
-          __m256i tr2_3 = _mm256_unpackhi_epi64(tr1_2, tr1_6);
-          __m256i tr2_4 = _mm256_unpacklo_epi64(tr1_1, tr1_5);
-          __m256i tr2_5 = _mm256_unpackhi_epi64(tr1_1, tr1_5);
-          __m256i tr2_6 = _mm256_unpacklo_epi64(tr1_3, tr1_7);
-          __m256i tr2_7 = _mm256_unpackhi_epi64(tr1_3, tr1_7);
-          // 00 20 40 60 80 100 120 140 08 28 48 68 88 108 128 148
-          // 01 21 41 61 81 101 121 141 09 29 49 69 89 109 129 149
-          // 02 22 42 62 82 102 122 142 10 30 50 70 90 110 130 150
-          // 03 23 43 63 83 103 123 143 11 31 51 71 91 101 131 151
-          // 04 24 44 64 84 104 124 144 12 32 52 72 92 112 132 152
-          // 05 25 45 65 85 105 125 145 13 33 53 73 93 113 133 153
-          // 06 26 46 66 86 106 126 146 14 34 54 74 94 114 134 154
-          // 07 27 47 67 87 107 127 147 15 35 55 75 95 115 135 155
-          if (0 == pass) {
-            // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2;
-            // TODO(cd): see quality impact of only doing
-            //           output[j] = (output[j] + 1) >> 2;
-            //           which would remove the code between here ...
-            __m256i tr2_0_0 = _mm256_cmpgt_epi16(tr2_0, kZero);
-            __m256i tr2_1_0 = _mm256_cmpgt_epi16(tr2_1, kZero);
-            __m256i tr2_2_0 = _mm256_cmpgt_epi16(tr2_2, kZero);
-            __m256i tr2_3_0 = _mm256_cmpgt_epi16(tr2_3, kZero);
-            __m256i tr2_4_0 = _mm256_cmpgt_epi16(tr2_4, kZero);
-            __m256i tr2_5_0 = _mm256_cmpgt_epi16(tr2_5, kZero);
-            __m256i tr2_6_0 = _mm256_cmpgt_epi16(tr2_6, kZero);
-            __m256i tr2_7_0 = _mm256_cmpgt_epi16(tr2_7, kZero);
-            tr2_0 = _mm256_sub_epi16(tr2_0, tr2_0_0);
-            tr2_1 = _mm256_sub_epi16(tr2_1, tr2_1_0);
-            tr2_2 = _mm256_sub_epi16(tr2_2, tr2_2_0);
-            tr2_3 = _mm256_sub_epi16(tr2_3, tr2_3_0);
-            tr2_4 = _mm256_sub_epi16(tr2_4, tr2_4_0);
-            tr2_5 = _mm256_sub_epi16(tr2_5, tr2_5_0);
-            tr2_6 = _mm256_sub_epi16(tr2_6, tr2_6_0);
-            tr2_7 = _mm256_sub_epi16(tr2_7, tr2_7_0);
-            //           ... and here.
-            //           PS: also change code in vp9/encoder/vp9_dct.c
-            tr2_0 = _mm256_add_epi16(tr2_0, kOne);
-            tr2_1 = _mm256_add_epi16(tr2_1, kOne);
-            tr2_2 = _mm256_add_epi16(tr2_2, kOne);
-            tr2_3 = _mm256_add_epi16(tr2_3, kOne);
-            tr2_4 = _mm256_add_epi16(tr2_4, kOne);
-            tr2_5 = _mm256_add_epi16(tr2_5, kOne);
-            tr2_6 = _mm256_add_epi16(tr2_6, kOne);
-            tr2_7 = _mm256_add_epi16(tr2_7, kOne);
-            tr2_0 = _mm256_srai_epi16(tr2_0, 2);
-            tr2_1 = _mm256_srai_epi16(tr2_1, 2);
-            tr2_2 = _mm256_srai_epi16(tr2_2, 2);
-            tr2_3 = _mm256_srai_epi16(tr2_3, 2);
-            tr2_4 = _mm256_srai_epi16(tr2_4, 2);
-            tr2_5 = _mm256_srai_epi16(tr2_5, 2);
-            tr2_6 = _mm256_srai_epi16(tr2_6, 2);
-            tr2_7 = _mm256_srai_epi16(tr2_7, 2);
-          }
-          // Note: even though all these stores are aligned, using the aligned
-          //       intrinsic make the code slightly slower.
-          _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32), _mm256_castsi256_si128(tr2_0));
-          _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32), _mm256_castsi256_si128(tr2_1));
-          _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32), _mm256_castsi256_si128(tr2_2));
-          _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32), _mm256_castsi256_si128(tr2_3));
-          _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32), _mm256_castsi256_si128(tr2_4));
-          _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32), _mm256_castsi256_si128(tr2_5));
-          _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32), _mm256_castsi256_si128(tr2_6));
-          _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32), _mm256_castsi256_si128(tr2_7));
-
-          _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32), _mm256_extractf128_si256(tr2_0,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32), _mm256_extractf128_si256(tr2_1,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32), _mm256_extractf128_si256(tr2_2,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32), _mm256_extractf128_si256(tr2_3,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32), _mm256_extractf128_si256(tr2_4,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32), _mm256_extractf128_si256(tr2_5,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32), _mm256_extractf128_si256(tr2_6,1));
-          _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32), _mm256_extractf128_si256(tr2_7,1));
-          // Process next 8x8
-          output_currStep += 8;
-          output_nextStep += 8;
-        }
-      }
-    }
-  }
-}  // NOLINT
--- a/vp9/encoder/x86/vp9_dct32x32_sse2_impl.h
+++ /dev/null
@@ -1,3151 +1,0 @@
-/*
- *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <emmintrin.h>  // SSE2
-
-#include "vp9/encoder/vp9_dct.h"
-#include "vpx_dsp/txfm_common.h"
-#include "vpx_dsp/x86/txfm_common_sse2.h"
-#include "vpx_ports/mem.h"
-
-#if DCT_HIGH_BIT_DEPTH
-#define ADD_EPI16 _mm_adds_epi16
-#define SUB_EPI16 _mm_subs_epi16
-#if FDCT32x32_HIGH_PRECISION
-void vp9_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
-    int i, j;
-    for (i = 0; i < 32; ++i) {
-      tran_high_t temp_in[32], temp_out[32];
-      for (j = 0; j < 32; ++j)
-        temp_in[j] = intermediate[j * 32 + i];
-      vp9_fdct32(temp_in, temp_out, 0);
-      for (j = 0; j < 32; ++j)
-        out[j + i * 32] =
-            (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
-    }
-}
-  #define HIGH_FDCT32x32_2D_C vp9_highbd_fdct32x32_c
-  #define HIGH_FDCT32x32_2D_ROWS_C vp9_fdct32x32_rows_c
-#else
-void vp9_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
-    int i, j;
-    for (i = 0; i < 32; ++i) {
-      tran_high_t temp_in[32], temp_out[32];
-      for (j = 0; j < 32; ++j)
-        temp_in[j] = intermediate[j * 32 + i];
-      vp9_fdct32(temp_in, temp_out, 1);
-      for (j = 0; j < 32; ++j)
-        out[j + i * 32] = (tran_low_t)temp_out[j];
-    }
-}
-  #define HIGH_FDCT32x32_2D_C vp9_highbd_fdct32x32_rd_c
-  #define HIGH_FDCT32x32_2D_ROWS_C vp9_fdct32x32_rd_rows_c
-#endif  // FDCT32x32_HIGH_PRECISION
-#else
-#define ADD_EPI16 _mm_add_epi16
-#define SUB_EPI16 _mm_sub_epi16
-#endif  // DCT_HIGH_BIT_DEPTH
-
-
-void FDCT32x32_2D(const int16_t *input,
-                  tran_low_t *output_org, int stride) {
-  // Calculate pre-multiplied strides
-  const int str1 = stride;
-  const int str2 = 2 * stride;
-  const int str3 = 2 * stride + str1;
-  // We need an intermediate buffer between passes.
-  DECLARE_ALIGNED(16, int16_t, intermediate[32 * 32]);
-  // Constants
-  //    When we use them, in one case, they are all the same. In all others
-  //    it's a pair of them that we need to repeat four times. This is done
-  //    by constructing the 32 bit constant corresponding to that pair.
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
-  const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64,   cospi_24_64);
-  const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
-  const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64,  cospi_8_64);
-  const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64,  cospi_20_64);
-  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64,  cospi_12_64);
-  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64,   cospi_28_64);
-  const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64,  cospi_4_64);
-  const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
-  const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
-  const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64,  cospi_2_64);
-  const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64,  cospi_18_64);
-  const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64,  cospi_10_64);
-  const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64,   cospi_26_64);
-  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64,  cospi_6_64);
-  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64,  cospi_22_64);
-  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64,  cospi_14_64);
-  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64,   cospi_30_64);
-  const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64,  cospi_1_64);
-  const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64,  cospi_17_64);
-  const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64,  cospi_9_64);
-  const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64,   cospi_25_64);
-  const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64,  cospi_7_64);
-  const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64,   cospi_23_64);
-  const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64,  cospi_15_64);
-  const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64,   cospi_31_64);
-  const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64,  cospi_5_64);
-  const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64,  cospi_21_64);
-  const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64,  cospi_13_64);
-  const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64,   cospi_29_64);
-  const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64,  cospi_3_64);
-  const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64,  cospi_19_64);
-  const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64,  cospi_11_64);
-  const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64,   cospi_27_64);
-  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
-  const __m128i kZero = _mm_set1_epi16(0);
-  const __m128i kOne  = _mm_set1_epi16(1);
-  // Do the two transform/transpose passes
-  int pass;
-#if DCT_HIGH_BIT_DEPTH
-  int overflow;
-#endif
-  for (pass = 0; pass < 2; ++pass) {
-    // We process eight columns (transposed rows in second pass) at a time.
-    int column_start;
-    for (column_start = 0; column_start < 32; column_start += 8) {
-      __m128i step1[32];
-      __m128i step2[32];
-      __m128i step3[32];
-      __m128i out[32];
-      // Stage 1
-      // Note: even though all the loads below are aligned, using the aligned
-      //       intrinsic make the code slightly slower.
-      if (0 == pass) {
-        const int16_t *in  = &input[column_start];
-        // step1[i] =  (in[ 0 * stride] + in[(32 -  1) * stride]) << 2;
-        // Note: the next four blocks could be in a loop. That would help the
-        //       instruction cache but is actually slower.
-        {
-          const int16_t *ina =  in +  0 * str1;
-          const int16_t *inb =  in + 31 * str1;
-          __m128i *step1a = &step1[ 0];
-          __m128i *step1b = &step1[31];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
-          step1b[-3] = _mm_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
-        }
-        {
-          const int16_t *ina =  in +  4 * str1;
-          const int16_t *inb =  in + 27 * str1;
-          __m128i *step1a = &step1[ 4];
-          __m128i *step1b = &step1[27];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
-          step1b[-3] = _mm_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
-        }
-        {
-          const int16_t *ina =  in +  8 * str1;
-          const int16_t *inb =  in + 23 * str1;
-          __m128i *step1a = &step1[ 8];
-          __m128i *step1b = &step1[23];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
-          step1b[-3] = _mm_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
-        }
-        {
-          const int16_t *ina =  in + 12 * str1;
-          const int16_t *inb =  in + 19 * str1;
-          __m128i *step1a = &step1[12];
-          __m128i *step1b = &step1[19];
-          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
-          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
-          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
-          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
-          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
-          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
-          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
-          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
-          step1a[ 0] = _mm_add_epi16(ina0, inb0);
-          step1a[ 1] = _mm_add_epi16(ina1, inb1);
-          step1a[ 2] = _mm_add_epi16(ina2, inb2);
-          step1a[ 3] = _mm_add_epi16(ina3, inb3);
-          step1b[-3] = _mm_sub_epi16(ina3, inb3);
-          step1b[-2] = _mm_sub_epi16(ina2, inb2);
-          step1b[-1] = _mm_sub_epi16(ina1, inb1);
-          step1b[-0] = _mm_sub_epi16(ina0, inb0);
-          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
-          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
-          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
-          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
-          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
-          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
-          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
-          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
-        }
-      } else {
-        int16_t *in = &intermediate[column_start];
-        // step1[i] =  in[ 0 * 32] + in[(32 -  1) * 32];
-        // Note: using the same approach as above to have common offset is
-        //       counter-productive as all offsets can be calculated at compile
-        //       time.
-        // Note: the next four blocks could be in a loop. That would help the
-        //       instruction cache but is actually slower.
-        {
-          __m128i in00  = _mm_loadu_si128((const __m128i *)(in +  0 * 32));
-          __m128i in01  = _mm_loadu_si128((const __m128i *)(in +  1 * 32));
-          __m128i in02  = _mm_loadu_si128((const __m128i *)(in +  2 * 32));
-          __m128i in03  = _mm_loadu_si128((const __m128i *)(in +  3 * 32));
-          __m128i in28  = _mm_loadu_si128((const __m128i *)(in + 28 * 32));
-          __m128i in29  = _mm_loadu_si128((const __m128i *)(in + 29 * 32));
-          __m128i in30  = _mm_loadu_si128((const __m128i *)(in + 30 * 32));
-          __m128i in31  = _mm_loadu_si128((const __m128i *)(in + 31 * 32));
-          step1[0] = ADD_EPI16(in00, in31);
-          step1[1] = ADD_EPI16(in01, in30);
-          step1[2] = ADD_EPI16(in02, in29);
-          step1[3] = ADD_EPI16(in03, in28);
-          step1[28] = SUB_EPI16(in03, in28);
-          step1[29] = SUB_EPI16(in02, in29);
-          step1[30] = SUB_EPI16(in01, in30);
-          step1[31] = SUB_EPI16(in00, in31);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step1[0], &step1[1], &step1[2],
-                                             &step1[3], &step1[28], &step1[29],
-                                             &step1[30], &step1[31]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-        {
-          __m128i in04  = _mm_loadu_si128((const __m128i *)(in +  4 * 32));
-          __m128i in05  = _mm_loadu_si128((const __m128i *)(in +  5 * 32));
-          __m128i in06  = _mm_loadu_si128((const __m128i *)(in +  6 * 32));
-          __m128i in07  = _mm_loadu_si128((const __m128i *)(in +  7 * 32));
-          __m128i in24  = _mm_loadu_si128((const __m128i *)(in + 24 * 32));
-          __m128i in25  = _mm_loadu_si128((const __m128i *)(in + 25 * 32));
-          __m128i in26  = _mm_loadu_si128((const __m128i *)(in + 26 * 32));
-          __m128i in27  = _mm_loadu_si128((const __m128i *)(in + 27 * 32));
-          step1[4] = ADD_EPI16(in04, in27);
-          step1[5] = ADD_EPI16(in05, in26);
-          step1[6] = ADD_EPI16(in06, in25);
-          step1[7] = ADD_EPI16(in07, in24);
-          step1[24] = SUB_EPI16(in07, in24);
-          step1[25] = SUB_EPI16(in06, in25);
-          step1[26] = SUB_EPI16(in05, in26);
-          step1[27] = SUB_EPI16(in04, in27);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step1[4], &step1[5], &step1[6],
-                                             &step1[7], &step1[24], &step1[25],
-                                             &step1[26], &step1[27]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-        {
-          __m128i in08  = _mm_loadu_si128((const __m128i *)(in +  8 * 32));
-          __m128i in09  = _mm_loadu_si128((const __m128i *)(in +  9 * 32));
-          __m128i in10  = _mm_loadu_si128((const __m128i *)(in + 10 * 32));
-          __m128i in11  = _mm_loadu_si128((const __m128i *)(in + 11 * 32));
-          __m128i in20  = _mm_loadu_si128((const __m128i *)(in + 20 * 32));
-          __m128i in21  = _mm_loadu_si128((const __m128i *)(in + 21 * 32));
-          __m128i in22  = _mm_loadu_si128((const __m128i *)(in + 22 * 32));
-          __m128i in23  = _mm_loadu_si128((const __m128i *)(in + 23 * 32));
-          step1[8] = ADD_EPI16(in08, in23);
-          step1[9] = ADD_EPI16(in09, in22);
-          step1[10] = ADD_EPI16(in10, in21);
-          step1[11] = ADD_EPI16(in11, in20);
-          step1[20] = SUB_EPI16(in11, in20);
-          step1[21] = SUB_EPI16(in10, in21);
-          step1[22] = SUB_EPI16(in09, in22);
-          step1[23] = SUB_EPI16(in08, in23);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step1[8], &step1[9], &step1[10],
-                                             &step1[11], &step1[20], &step1[21],
-                                             &step1[22], &step1[23]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-        {
-          __m128i in12  = _mm_loadu_si128((const __m128i *)(in + 12 * 32));
-          __m128i in13  = _mm_loadu_si128((const __m128i *)(in + 13 * 32));
-          __m128i in14  = _mm_loadu_si128((const __m128i *)(in + 14 * 32));
-          __m128i in15  = _mm_loadu_si128((const __m128i *)(in + 15 * 32));
-          __m128i in16  = _mm_loadu_si128((const __m128i *)(in + 16 * 32));
-          __m128i in17  = _mm_loadu_si128((const __m128i *)(in + 17 * 32));
-          __m128i in18  = _mm_loadu_si128((const __m128i *)(in + 18 * 32));
-          __m128i in19  = _mm_loadu_si128((const __m128i *)(in + 19 * 32));
-          step1[12] = ADD_EPI16(in12, in19);
-          step1[13] = ADD_EPI16(in13, in18);
-          step1[14] = ADD_EPI16(in14, in17);
-          step1[15] = ADD_EPI16(in15, in16);
-          step1[16] = SUB_EPI16(in15, in16);
-          step1[17] = SUB_EPI16(in14, in17);
-          step1[18] = SUB_EPI16(in13, in18);
-          step1[19] = SUB_EPI16(in12, in19);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&step1[12], &step1[13], &step1[14],
-                                             &step1[15], &step1[16], &step1[17],
-                                             &step1[18], &step1[19]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-      }
-      // Stage 2
-      {
-        step2[0] = ADD_EPI16(step1[0], step1[15]);
-        step2[1] = ADD_EPI16(step1[1], step1[14]);
-        step2[2] = ADD_EPI16(step1[2], step1[13]);
-        step2[3] = ADD_EPI16(step1[3], step1[12]);
-        step2[4] = ADD_EPI16(step1[4], step1[11]);
-        step2[5] = ADD_EPI16(step1[5], step1[10]);
-        step2[6] = ADD_EPI16(step1[6], step1[ 9]);
-        step2[7] = ADD_EPI16(step1[7], step1[ 8]);
-        step2[8] = SUB_EPI16(step1[7], step1[ 8]);
-        step2[9] = SUB_EPI16(step1[6], step1[ 9]);
-        step2[10] = SUB_EPI16(step1[5], step1[10]);
-        step2[11] = SUB_EPI16(step1[4], step1[11]);
-        step2[12] = SUB_EPI16(step1[3], step1[12]);
-        step2[13] = SUB_EPI16(step1[2], step1[13]);
-        step2[14] = SUB_EPI16(step1[1], step1[14]);
-        step2[15] = SUB_EPI16(step1[0], step1[15]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step2[0], &step2[1], &step2[2], &step2[3],
-            &step2[4], &step2[5], &step2[6], &step2[7],
-            &step2[8], &step2[9], &step2[10], &step2[11],
-            &step2[12], &step2[13], &step2[14], &step2[15]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s2_20_0 = _mm_unpacklo_epi16(step1[27], step1[20]);
-        const __m128i s2_20_1 = _mm_unpackhi_epi16(step1[27], step1[20]);
-        const __m128i s2_21_0 = _mm_unpacklo_epi16(step1[26], step1[21]);
-        const __m128i s2_21_1 = _mm_unpackhi_epi16(step1[26], step1[21]);
-        const __m128i s2_22_0 = _mm_unpacklo_epi16(step1[25], step1[22]);
-        const __m128i s2_22_1 = _mm_unpackhi_epi16(step1[25], step1[22]);
-        const __m128i s2_23_0 = _mm_unpacklo_epi16(step1[24], step1[23]);
-        const __m128i s2_23_1 = _mm_unpackhi_epi16(step1[24], step1[23]);
-        const __m128i s2_20_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_m16);
-        const __m128i s2_20_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_m16);
-        const __m128i s2_21_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_m16);
-        const __m128i s2_21_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_m16);
-        const __m128i s2_22_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_m16);
-        const __m128i s2_22_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_m16);
-        const __m128i s2_23_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_m16);
-        const __m128i s2_23_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_m16);
-        const __m128i s2_24_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_p16);
-        const __m128i s2_24_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_p16);
-        const __m128i s2_25_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_p16);
-        const __m128i s2_25_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_p16);
-        const __m128i s2_26_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_p16);
-        const __m128i s2_26_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_p16);
-        const __m128i s2_27_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_p16);
-        const __m128i s2_27_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m128i s2_20_4 = _mm_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_20_5 = _mm_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_21_4 = _mm_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_21_5 = _mm_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_22_4 = _mm_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_22_5 = _mm_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_23_4 = _mm_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_23_5 = _mm_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_24_4 = _mm_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_24_5 = _mm_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_25_4 = _mm_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_25_5 = _mm_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_26_4 = _mm_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_26_5 = _mm_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_27_4 = _mm_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_27_5 = _mm_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS);
-        const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS);
-        const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS);
-        const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS);
-        const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS);
-        const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS);
-        const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS);
-        const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS);
-        const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS);
-        const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS);
-        const __m128i s2_25_6 = _mm_srai_epi32(s2_25_4, DCT_CONST_BITS);
-        const __m128i s2_25_7 = _mm_srai_epi32(s2_25_5, DCT_CONST_BITS);
-        const __m128i s2_26_6 = _mm_srai_epi32(s2_26_4, DCT_CONST_BITS);
-        const __m128i s2_26_7 = _mm_srai_epi32(s2_26_5, DCT_CONST_BITS);
-        const __m128i s2_27_6 = _mm_srai_epi32(s2_27_4, DCT_CONST_BITS);
-        const __m128i s2_27_7 = _mm_srai_epi32(s2_27_5, DCT_CONST_BITS);
-        // Combine
-        step2[20] = _mm_packs_epi32(s2_20_6, s2_20_7);
-        step2[21] = _mm_packs_epi32(s2_21_6, s2_21_7);
-        step2[22] = _mm_packs_epi32(s2_22_6, s2_22_7);
-        step2[23] = _mm_packs_epi32(s2_23_6, s2_23_7);
-        step2[24] = _mm_packs_epi32(s2_24_6, s2_24_7);
-        step2[25] = _mm_packs_epi32(s2_25_6, s2_25_7);
-        step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7);
-        step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step2[20], &step2[21], &step2[22],
-                                           &step2[23], &step2[24], &step2[25],
-                                           &step2[26], &step2[27]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-
-#if !FDCT32x32_HIGH_PRECISION
-      // dump the magnitude by half, hence the intermediate values are within
-      // the range of 16 bits.
-      if (1 == pass) {
-        __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero);
-        __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero);
-        __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero);
-        __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero);
-        __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero);
-        __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero);
-        __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero);
-        __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero);
-        __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero);
-        __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero);
-        __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero);
-        __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero);
-        __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero);
-        __m128i s3_13_0 = _mm_cmplt_epi16(step2[13], kZero);
-        __m128i s2_14_0 = _mm_cmplt_epi16(step2[14], kZero);
-        __m128i s2_15_0 = _mm_cmplt_epi16(step2[15], kZero);
-        __m128i s3_16_0 = _mm_cmplt_epi16(step1[16], kZero);
-        __m128i s3_17_0 = _mm_cmplt_epi16(step1[17], kZero);
-        __m128i s3_18_0 = _mm_cmplt_epi16(step1[18], kZero);
-        __m128i s3_19_0 = _mm_cmplt_epi16(step1[19], kZero);
-        __m128i s3_20_0 = _mm_cmplt_epi16(step2[20], kZero);
-        __m128i s3_21_0 = _mm_cmplt_epi16(step2[21], kZero);
-        __m128i s3_22_0 = _mm_cmplt_epi16(step2[22], kZero);
-        __m128i s3_23_0 = _mm_cmplt_epi16(step2[23], kZero);
-        __m128i s3_24_0 = _mm_cmplt_epi16(step2[24], kZero);
-        __m128i s3_25_0 = _mm_cmplt_epi16(step2[25], kZero);
-        __m128i s3_26_0 = _mm_cmplt_epi16(step2[26], kZero);
-        __m128i s3_27_0 = _mm_cmplt_epi16(step2[27], kZero);
-        __m128i s3_28_0 = _mm_cmplt_epi16(step1[28], kZero);
-        __m128i s3_29_0 = _mm_cmplt_epi16(step1[29], kZero);
-        __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero);
-        __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero);
-
-        step2[0] = SUB_EPI16(step2[ 0], s3_00_0);
-        step2[1] = SUB_EPI16(step2[ 1], s3_01_0);
-        step2[2] = SUB_EPI16(step2[ 2], s3_02_0);
-        step2[3] = SUB_EPI16(step2[ 3], s3_03_0);
-        step2[4] = SUB_EPI16(step2[ 4], s3_04_0);
-        step2[5] = SUB_EPI16(step2[ 5], s3_05_0);
-        step2[6] = SUB_EPI16(step2[ 6], s3_06_0);
-        step2[7] = SUB_EPI16(step2[ 7], s3_07_0);
-        step2[8] = SUB_EPI16(step2[ 8], s2_08_0);
-        step2[9] = SUB_EPI16(step2[ 9], s2_09_0);
-        step2[10] = SUB_EPI16(step2[10], s3_10_0);
-        step2[11] = SUB_EPI16(step2[11], s3_11_0);
-        step2[12] = SUB_EPI16(step2[12], s3_12_0);
-        step2[13] = SUB_EPI16(step2[13], s3_13_0);
-        step2[14] = SUB_EPI16(step2[14], s2_14_0);
-        step2[15] = SUB_EPI16(step2[15], s2_15_0);
-        step1[16] = SUB_EPI16(step1[16], s3_16_0);
-        step1[17] = SUB_EPI16(step1[17], s3_17_0);
-        step1[18] = SUB_EPI16(step1[18], s3_18_0);
-        step1[19] = SUB_EPI16(step1[19], s3_19_0);
-        step2[20] = SUB_EPI16(step2[20], s3_20_0);
-        step2[21] = SUB_EPI16(step2[21], s3_21_0);
-        step2[22] = SUB_EPI16(step2[22], s3_22_0);
-        step2[23] = SUB_EPI16(step2[23], s3_23_0);
-        step2[24] = SUB_EPI16(step2[24], s3_24_0);
-        step2[25] = SUB_EPI16(step2[25], s3_25_0);
-        step2[26] = SUB_EPI16(step2[26], s3_26_0);
-        step2[27] = SUB_EPI16(step2[27], s3_27_0);
-        step1[28] = SUB_EPI16(step1[28], s3_28_0);
-        step1[29] = SUB_EPI16(step1[29], s3_29_0);
-        step1[30] = SUB_EPI16(step1[30], s3_30_0);
-        step1[31] = SUB_EPI16(step1[31], s3_31_0);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x32(
-            &step2[0], &step2[1], &step2[2], &step2[3],
-            &step2[4], &step2[5], &step2[6], &step2[7],
-            &step2[8], &step2[9], &step2[10], &step2[11],
-            &step2[12], &step2[13], &step2[14], &step2[15],
-            &step1[16], &step1[17], &step1[18], &step1[19],
-            &step2[20], &step2[21], &step2[22], &step2[23],
-            &step2[24], &step2[25], &step2[26], &step2[27],
-            &step1[28], &step1[29], &step1[30], &step1[31]);
-        if (overflow) {
-          HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-        step2[0] = _mm_add_epi16(step2[ 0], kOne);
-        step2[1] = _mm_add_epi16(step2[ 1], kOne);
-        step2[2] = _mm_add_epi16(step2[ 2], kOne);
-        step2[3] = _mm_add_epi16(step2[ 3], kOne);
-        step2[4] = _mm_add_epi16(step2[ 4], kOne);
-        step2[5] = _mm_add_epi16(step2[ 5], kOne);
-        step2[6] = _mm_add_epi16(step2[ 6], kOne);
-        step2[7] = _mm_add_epi16(step2[ 7], kOne);
-        step2[8] = _mm_add_epi16(step2[ 8], kOne);
-        step2[9] = _mm_add_epi16(step2[ 9], kOne);
-        step2[10] = _mm_add_epi16(step2[10], kOne);
-        step2[11] = _mm_add_epi16(step2[11], kOne);
-        step2[12] = _mm_add_epi16(step2[12], kOne);
-        step2[13] = _mm_add_epi16(step2[13], kOne);
-        step2[14] = _mm_add_epi16(step2[14], kOne);
-        step2[15] = _mm_add_epi16(step2[15], kOne);
-        step1[16] = _mm_add_epi16(step1[16], kOne);
-        step1[17] = _mm_add_epi16(step1[17], kOne);
-        step1[18] = _mm_add_epi16(step1[18], kOne);
-        step1[19] = _mm_add_epi16(step1[19], kOne);
-        step2[20] = _mm_add_epi16(step2[20], kOne);
-        step2[21] = _mm_add_epi16(step2[21], kOne);
-        step2[22] = _mm_add_epi16(step2[22], kOne);
-        step2[23] = _mm_add_epi16(step2[23], kOne);
-        step2[24] = _mm_add_epi16(step2[24], kOne);
-        step2[25] = _mm_add_epi16(step2[25], kOne);
-        step2[26] = _mm_add_epi16(step2[26], kOne);
-        step2[27] = _mm_add_epi16(step2[27], kOne);
-        step1[28] = _mm_add_epi16(step1[28], kOne);
-        step1[29] = _mm_add_epi16(step1[29], kOne);
-        step1[30] = _mm_add_epi16(step1[30], kOne);
-        step1[31] = _mm_add_epi16(step1[31], kOne);
-
-        step2[0] = _mm_srai_epi16(step2[ 0], 2);
-        step2[1] = _mm_srai_epi16(step2[ 1], 2);
-        step2[2] = _mm_srai_epi16(step2[ 2], 2);
-        step2[3] = _mm_srai_epi16(step2[ 3], 2);
-        step2[4] = _mm_srai_epi16(step2[ 4], 2);
-        step2[5] = _mm_srai_epi16(step2[ 5], 2);
-        step2[6] = _mm_srai_epi16(step2[ 6], 2);
-        step2[7] = _mm_srai_epi16(step2[ 7], 2);
-        step2[8] = _mm_srai_epi16(step2[ 8], 2);
-        step2[9] = _mm_srai_epi16(step2[ 9], 2);
-        step2[10] = _mm_srai_epi16(step2[10], 2);
-        step2[11] = _mm_srai_epi16(step2[11], 2);
-        step2[12] = _mm_srai_epi16(step2[12], 2);
-        step2[13] = _mm_srai_epi16(step2[13], 2);
-        step2[14] = _mm_srai_epi16(step2[14], 2);
-        step2[15] = _mm_srai_epi16(step2[15], 2);
-        step1[16] = _mm_srai_epi16(step1[16], 2);
-        step1[17] = _mm_srai_epi16(step1[17], 2);
-        step1[18] = _mm_srai_epi16(step1[18], 2);
-        step1[19] = _mm_srai_epi16(step1[19], 2);
-        step2[20] = _mm_srai_epi16(step2[20], 2);
-        step2[21] = _mm_srai_epi16(step2[21], 2);
-        step2[22] = _mm_srai_epi16(step2[22], 2);
-        step2[23] = _mm_srai_epi16(step2[23], 2);
-        step2[24] = _mm_srai_epi16(step2[24], 2);
-        step2[25] = _mm_srai_epi16(step2[25], 2);
-        step2[26] = _mm_srai_epi16(step2[26], 2);
-        step2[27] = _mm_srai_epi16(step2[27], 2);
-        step1[28] = _mm_srai_epi16(step1[28], 2);
-        step1[29] = _mm_srai_epi16(step1[29], 2);
-        step1[30] = _mm_srai_epi16(step1[30], 2);
-        step1[31] = _mm_srai_epi16(step1[31], 2);
-      }
-#endif  // !FDCT32x32_HIGH_PRECISION
-
-#if FDCT32x32_HIGH_PRECISION
-      if (pass == 0) {
-#endif
-      // Stage 3
-      {
-        step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]);
-        step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]);
-        step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]);
-        step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]);
-        step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]);
-        step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]);
-        step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]);
-        step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2],
-                                           &step3[3], &step3[4], &step3[5],
-                                           &step3[6], &step3[7]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
-        const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
-        const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
-        const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
-        const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
-        const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
-        const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
-        const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
-        const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
-        const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
-        const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
-        const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
-        const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
-        const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
-        const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
-        const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
-        const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
-        const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
-        const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
-        // Combine
-        step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
-        step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
-        step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
-        step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step3[10], &step3[11],
-                                           &step3[12], &step3[13]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step3[16] = ADD_EPI16(step2[23], step1[16]);
-        step3[17] = ADD_EPI16(step2[22], step1[17]);
-        step3[18] = ADD_EPI16(step2[21], step1[18]);
-        step3[19] = ADD_EPI16(step2[20], step1[19]);
-        step3[20] = SUB_EPI16(step1[19], step2[20]);
-        step3[21] = SUB_EPI16(step1[18], step2[21]);
-        step3[22] = SUB_EPI16(step1[17], step2[22]);
-        step3[23] = SUB_EPI16(step1[16], step2[23]);
-        step3[24] = SUB_EPI16(step1[31], step2[24]);
-        step3[25] = SUB_EPI16(step1[30], step2[25]);
-        step3[26] = SUB_EPI16(step1[29], step2[26]);
-        step3[27] = SUB_EPI16(step1[28], step2[27]);
-        step3[28] = ADD_EPI16(step2[27], step1[28]);
-        step3[29] = ADD_EPI16(step2[26], step1[29]);
-        step3[30] = ADD_EPI16(step2[25], step1[30]);
-        step3[31] = ADD_EPI16(step2[24], step1[31]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step3[16], &step3[17], &step3[18], &step3[19],
-            &step3[20], &step3[21], &step3[22], &step3[23],
-            &step3[24], &step3[25], &step3[26], &step3[27],
-            &step3[28], &step3[29], &step3[30], &step3[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-
-      // Stage 4
-      {
-        step1[0] = ADD_EPI16(step3[ 3], step3[ 0]);
-        step1[1] = ADD_EPI16(step3[ 2], step3[ 1]);
-        step1[2] = SUB_EPI16(step3[ 1], step3[ 2]);
-        step1[3] = SUB_EPI16(step3[ 0], step3[ 3]);
-        step1[8] = ADD_EPI16(step3[11], step2[ 8]);
-        step1[9] = ADD_EPI16(step3[10], step2[ 9]);
-        step1[10] = SUB_EPI16(step2[ 9], step3[10]);
-        step1[11] = SUB_EPI16(step2[ 8], step3[11]);
-        step1[12] = SUB_EPI16(step2[15], step3[12]);
-        step1[13] = SUB_EPI16(step2[14], step3[13]);
-        step1[14] = ADD_EPI16(step3[13], step2[14]);
-        step1[15] = ADD_EPI16(step3[12], step2[15]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step1[0], &step1[1], &step1[2], &step1[3],
-            &step1[4], &step1[5], &step1[6], &step1[7],
-            &step1[8], &step1[9], &step1[10], &step1[11],
-            &step1[12], &step1[13], &step1[14], &step1[15]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
-        const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
-        const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
-        const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
-        const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
-        const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
-        // dct_const_round_shift
-        const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
-        const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
-        const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
-        const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
-        // Combine
-        step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
-        step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x2(&step1[5], &step1[6]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
-        const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
-        const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
-        const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
-        const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
-        const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
-        const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
-        const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
-        const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
-        const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
-        const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
-        const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
-        const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
-        const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
-        const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
-        const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
-        const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
-        const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
-        const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
-        const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
-        const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
-        const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
-        const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
-        const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
-        const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
-        const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
-        const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
-        const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
-        const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
-        const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
-        const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
-        const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
-        const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
-        const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
-        const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
-        const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
-        const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
-        const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
-        const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
-        // Combine
-        step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
-        step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
-        step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
-        step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
-        step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
-        step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
-        step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
-        step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20],
-                                           &step1[21], &step1[26], &step1[27],
-                                           &step1[28], &step1[29]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 5
-      {
-        step2[4] = ADD_EPI16(step1[5], step3[4]);
-        step2[5] = SUB_EPI16(step3[4], step1[5]);
-        step2[6] = SUB_EPI16(step3[7], step1[6]);
-        step2[7] = ADD_EPI16(step1[6], step3[7]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step2[4], &step2[5],
-                                           &step2[6], &step2[7]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
-        const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
-        const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
-        const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
-        const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
-        const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
-        const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
-        const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
-        const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
-        const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
-        const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
-        const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
-        // dct_const_round_shift
-        const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
-        const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
-        const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
-        const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
-        const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
-        const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
-        const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
-        const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
-        // Combine
-        out[ 0] = _mm_packs_epi32(out_00_6, out_00_7);
-        out[16] = _mm_packs_epi32(out_16_6, out_16_7);
-        out[ 8] = _mm_packs_epi32(out_08_6, out_08_7);
-        out[24] = _mm_packs_epi32(out_24_6, out_24_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&out[0], &out[16],
-                                           &out[8], &out[24]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]);
-        const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]);
-        const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
-        const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
-        const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
-        const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
-        const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
-        const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
-        const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
-        const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
-        const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
-        const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
-        // dct_const_round_shift
-        const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
-        const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
-        const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
-        const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
-        const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
-        const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
-        const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
-        const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
-        const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
-        const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
-        // Combine
-        step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7);
-        step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
-        step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
-        step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&step2[9], &step2[10],
-                                           &step2[13], &step2[14]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step2[16] = ADD_EPI16(step1[19], step3[16]);
-        step2[17] = ADD_EPI16(step1[18], step3[17]);
-        step2[18] = SUB_EPI16(step3[17], step1[18]);
-        step2[19] = SUB_EPI16(step3[16], step1[19]);
-        step2[20] = SUB_EPI16(step3[23], step1[20]);
-        step2[21] = SUB_EPI16(step3[22], step1[21]);
-        step2[22] = ADD_EPI16(step1[21], step3[22]);
-        step2[23] = ADD_EPI16(step1[20], step3[23]);
-        step2[24] = ADD_EPI16(step1[27], step3[24]);
-        step2[25] = ADD_EPI16(step1[26], step3[25]);
-        step2[26] = SUB_EPI16(step3[25], step1[26]);
-        step2[27] = SUB_EPI16(step3[24], step1[27]);
-        step2[28] = SUB_EPI16(step3[31], step1[28]);
-        step2[29] = SUB_EPI16(step3[30], step1[29]);
-        step2[30] = ADD_EPI16(step1[29], step3[30]);
-        step2[31] = ADD_EPI16(step1[28], step3[31]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step2[16], &step2[17], &step2[18], &step2[19],
-            &step2[20], &step2[21], &step2[22], &step2[23],
-            &step2[24], &step2[25], &step2[26], &step2[27],
-            &step2[28], &step2[29], &step2[30], &step2[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 6
-      {
-        const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
-        const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
-        const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
-        const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
-        const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
-        const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
-        const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
-        const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
-        const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
-        const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
-        const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
-        const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
-        const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
-        const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
-        const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
-        const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
-        // dct_const_round_shift
-        const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
-        const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
-        const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
-        const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
-        const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
-        const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
-        const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
-        const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
-        // Combine
-        out[4] = _mm_packs_epi32(out_04_6, out_04_7);
-        out[20] = _mm_packs_epi32(out_20_6, out_20_7);
-        out[12] = _mm_packs_epi32(out_12_6, out_12_7);
-        out[28] = _mm_packs_epi32(out_28_6, out_28_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x4(&out[4], &out[20],
-                                           &out[12], &out[28]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step3[8] = ADD_EPI16(step2[ 9], step1[ 8]);
-        step3[9] = SUB_EPI16(step1[ 8], step2[ 9]);
-        step3[10] = SUB_EPI16(step1[11], step2[10]);
-        step3[11] = ADD_EPI16(step2[10], step1[11]);
-        step3[12] = ADD_EPI16(step2[13], step1[12]);
-        step3[13] = SUB_EPI16(step1[12], step2[13]);
-        step3[14] = SUB_EPI16(step1[15], step2[14]);
-        step3[15] = ADD_EPI16(step2[14], step1[15]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10],
-                                           &step3[11], &step3[12], &step3[13],
-                                           &step3[14], &step3[15]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
-        const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
-        const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
-        const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
-        const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
-        const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
-        const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
-        const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
-        const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
-        const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
-        const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
-        const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
-        const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
-        const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
-        const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
-        const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
-        const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
-        const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
-        const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
-        const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
-        const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
-        const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
-        const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
-        const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
-        // dct_const_round_shift
-        const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
-        const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
-        const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
-        const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
-        const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
-        const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
-        const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
-        const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
-        const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
-        const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
-        const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
-        const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
-        const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
-        const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
-        const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
-        const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
-        const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
-        const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
-        // Combine
-        step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
-        step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
-        step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
-        step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
-        // Combine
-        step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
-        step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
-        step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
-        step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21],
-                                           &step3[22], &step3[25], &step3[26],
-                                           &step3[29], &step3[30]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Stage 7
-      {
-        const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]);
-        const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]);
-        const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]);
-        const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]);
-        const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
-        const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
-        const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
-        const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
-        const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
-        const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
-        const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
-        const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
-        const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
-        const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
-        const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
-        const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
-        const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
-        const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
-        const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
-        const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
-        const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
-        const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
-        const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
-        const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
-        // dct_const_round_shift
-        const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
-        const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
-        const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
-        const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
-        const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
-        const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
-        const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
-        const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
-        const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
-        const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
-        const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
-        const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
-        const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
-        const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
-        const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
-        const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
-        // Combine
-        out[ 2] = _mm_packs_epi32(out_02_6, out_02_7);
-        out[18] = _mm_packs_epi32(out_18_6, out_18_7);
-        out[10] = _mm_packs_epi32(out_10_6, out_10_7);
-        out[26] = _mm_packs_epi32(out_26_6, out_26_7);
-        out[ 6] = _mm_packs_epi32(out_06_6, out_06_7);
-        out[22] = _mm_packs_epi32(out_22_6, out_22_7);
-        out[14] = _mm_packs_epi32(out_14_6, out_14_7);
-        out[30] = _mm_packs_epi32(out_30_6, out_30_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
-                                           &out[26], &out[6], &out[22],
-                                           &out[14], &out[30]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        step1[16] = ADD_EPI16(step3[17], step2[16]);
-        step1[17] = SUB_EPI16(step2[16], step3[17]);
-        step1[18] = SUB_EPI16(step2[19], step3[18]);
-        step1[19] = ADD_EPI16(step3[18], step2[19]);
-        step1[20] = ADD_EPI16(step3[21], step2[20]);
-        step1[21] = SUB_EPI16(step2[20], step3[21]);
-        step1[22] = SUB_EPI16(step2[23], step3[22]);
-        step1[23] = ADD_EPI16(step3[22], step2[23]);
-        step1[24] = ADD_EPI16(step3[25], step2[24]);
-        step1[25] = SUB_EPI16(step2[24], step3[25]);
-        step1[26] = SUB_EPI16(step2[27], step3[26]);
-        step1[27] = ADD_EPI16(step3[26], step2[27]);
-        step1[28] = ADD_EPI16(step3[29], step2[28]);
-        step1[29] = SUB_EPI16(step2[28], step3[29]);
-        step1[30] = SUB_EPI16(step2[31], step3[30]);
-        step1[31] = ADD_EPI16(step3[30], step2[31]);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x16(
-            &step1[16], &step1[17], &step1[18], &step1[19],
-            &step1[20], &step1[21], &step1[22], &step1[23],
-            &step1[24], &step1[25], &step1[26], &step1[27],
-            &step1[28], &step1[29], &step1[30], &step1[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      // Final stage --- outputs indices are bit-reversed.
-      {
-        const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
-        const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
-        const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
-        const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
-        const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
-        const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
-        const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
-        const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
-        const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
-        const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
-        const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
-        const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
-        const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
-        const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
-        const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
-        const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
-        const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
-        const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
-        const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
-        const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
-        const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
-        const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
-        const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
-        const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
-        // dct_const_round_shift
-        const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
-        const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
-        const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
-        const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
-        const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
-        const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
-        const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
-        const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
-        const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
-        const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
-        const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
-        const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
-        const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
-        const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
-        const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
-        const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
-        // Combine
-        out[ 1] = _mm_packs_epi32(out_01_6, out_01_7);
-        out[17] = _mm_packs_epi32(out_17_6, out_17_7);
-        out[ 9] = _mm_packs_epi32(out_09_6, out_09_7);
-        out[25] = _mm_packs_epi32(out_25_6, out_25_7);
-        out[ 7] = _mm_packs_epi32(out_07_6, out_07_7);
-        out[23] = _mm_packs_epi32(out_23_6, out_23_7);
-        out[15] = _mm_packs_epi32(out_15_6, out_15_7);
-        out[31] = _mm_packs_epi32(out_31_6, out_31_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
-                                           &out[25], &out[7], &out[23],
-                                           &out[15], &out[31]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-      {
-        const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
-        const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
-        const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
-        const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
-        const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
-        const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
-        const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
-        const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
-        const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
-        const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
-        const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
-        const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
-        const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
-        const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
-        const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
-        const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
-        const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
-        const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
-        const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
-        const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
-        const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
-        const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
-        const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
-        const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
-        // dct_const_round_shift
-        const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
-        const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
-        const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
-        const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
-        const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
-        const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
-        const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
-        const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
-        const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
-        const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
-        const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
-        const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
-        const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
-        const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
-        const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
-        const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
-        const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
-        const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
-        // Combine
-        out[ 5] = _mm_packs_epi32(out_05_6, out_05_7);
-        out[21] = _mm_packs_epi32(out_21_6, out_21_7);
-        out[13] = _mm_packs_epi32(out_13_6, out_13_7);
-        out[29] = _mm_packs_epi32(out_29_6, out_29_7);
-        out[ 3] = _mm_packs_epi32(out_03_6, out_03_7);
-        out[19] = _mm_packs_epi32(out_19_6, out_19_7);
-        out[11] = _mm_packs_epi32(out_11_6, out_11_7);
-        out[27] = _mm_packs_epi32(out_27_6, out_27_7);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
-                                           &out[29], &out[3], &out[19],
-                                           &out[11], &out[27]);
-        if (overflow) {
-          if (pass == 0)
-            HIGH_FDCT32x32_2D_C(input, output_org, stride);
-          else
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-      }
-#if FDCT32x32_HIGH_PRECISION
-      } else {
-        __m128i lstep1[64], lstep2[64], lstep3[64];
-        __m128i u[32], v[32], sign[16];
-        const __m128i K32One = _mm_set_epi32(1, 1, 1, 1);
-        // start using 32-bit operations
-        // stage 3
-        {
-          // expanding to 32-bit length priori to addition operations
-          lstep2[ 0] = _mm_unpacklo_epi16(step2[ 0], kZero);
-          lstep2[ 1] = _mm_unpackhi_epi16(step2[ 0], kZero);
-          lstep2[ 2] = _mm_unpacklo_epi16(step2[ 1], kZero);
-          lstep2[ 3] = _mm_unpackhi_epi16(step2[ 1], kZero);
-          lstep2[ 4] = _mm_unpacklo_epi16(step2[ 2], kZero);
-          lstep2[ 5] = _mm_unpackhi_epi16(step2[ 2], kZero);
-          lstep2[ 6] = _mm_unpacklo_epi16(step2[ 3], kZero);
-          lstep2[ 7] = _mm_unpackhi_epi16(step2[ 3], kZero);
-          lstep2[ 8] = _mm_unpacklo_epi16(step2[ 4], kZero);
-          lstep2[ 9] = _mm_unpackhi_epi16(step2[ 4], kZero);
-          lstep2[10] = _mm_unpacklo_epi16(step2[ 5], kZero);
-          lstep2[11] = _mm_unpackhi_epi16(step2[ 5], kZero);
-          lstep2[12] = _mm_unpacklo_epi16(step2[ 6], kZero);
-          lstep2[13] = _mm_unpackhi_epi16(step2[ 6], kZero);
-          lstep2[14] = _mm_unpacklo_epi16(step2[ 7], kZero);
-          lstep2[15] = _mm_unpackhi_epi16(step2[ 7], kZero);
-          lstep2[ 0] = _mm_madd_epi16(lstep2[ 0], kOne);
-          lstep2[ 1] = _mm_madd_epi16(lstep2[ 1], kOne);
-          lstep2[ 2] = _mm_madd_epi16(lstep2[ 2], kOne);
-          lstep2[ 3] = _mm_madd_epi16(lstep2[ 3], kOne);
-          lstep2[ 4] = _mm_madd_epi16(lstep2[ 4], kOne);
-          lstep2[ 5] = _mm_madd_epi16(lstep2[ 5], kOne);
-          lstep2[ 6] = _mm_madd_epi16(lstep2[ 6], kOne);
-          lstep2[ 7] = _mm_madd_epi16(lstep2[ 7], kOne);
-          lstep2[ 8] = _mm_madd_epi16(lstep2[ 8], kOne);
-          lstep2[ 9] = _mm_madd_epi16(lstep2[ 9], kOne);
-          lstep2[10] = _mm_madd_epi16(lstep2[10], kOne);
-          lstep2[11] = _mm_madd_epi16(lstep2[11], kOne);
-          lstep2[12] = _mm_madd_epi16(lstep2[12], kOne);
-          lstep2[13] = _mm_madd_epi16(lstep2[13], kOne);
-          lstep2[14] = _mm_madd_epi16(lstep2[14], kOne);
-          lstep2[15] = _mm_madd_epi16(lstep2[15], kOne);
-
-          lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]);
-          lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]);
-          lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]);
-          lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]);
-          lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]);
-          lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]);
-          lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]);
-          lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]);
-          lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]);
-          lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]);
-          lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]);
-          lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]);
-          lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]);
-          lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]);
-          lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]);
-          lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]);
-        }
-        {
-          const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
-          const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
-          const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
-          const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
-          const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
-          const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
-          const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
-          const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
-          const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
-          const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
-          const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
-          const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
-          // dct_const_round_shift
-          const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
-          const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
-          const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
-          const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
-          const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
-          const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
-          const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
-          const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
-          lstep3[20] = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
-          lstep3[21] = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
-          lstep3[22] = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
-          lstep3[23] = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
-          lstep3[24] = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
-          lstep3[25] = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
-          lstep3[26] = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
-          lstep3[27] = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
-        }
-        {
-          lstep2[40] = _mm_unpacklo_epi16(step2[20], kZero);
-          lstep2[41] = _mm_unpackhi_epi16(step2[20], kZero);
-          lstep2[42] = _mm_unpacklo_epi16(step2[21], kZero);
-          lstep2[43] = _mm_unpackhi_epi16(step2[21], kZero);
-          lstep2[44] = _mm_unpacklo_epi16(step2[22], kZero);
-          lstep2[45] = _mm_unpackhi_epi16(step2[22], kZero);
-          lstep2[46] = _mm_unpacklo_epi16(step2[23], kZero);
-          lstep2[47] = _mm_unpackhi_epi16(step2[23], kZero);
-          lstep2[48] = _mm_unpacklo_epi16(step2[24], kZero);
-          lstep2[49] = _mm_unpackhi_epi16(step2[24], kZero);
-          lstep2[50] = _mm_unpacklo_epi16(step2[25], kZero);
-          lstep2[51] = _mm_unpackhi_epi16(step2[25], kZero);
-          lstep2[52] = _mm_unpacklo_epi16(step2[26], kZero);
-          lstep2[53] = _mm_unpackhi_epi16(step2[26], kZero);
-          lstep2[54] = _mm_unpacklo_epi16(step2[27], kZero);
-          lstep2[55] = _mm_unpackhi_epi16(step2[27], kZero);
-          lstep2[40] = _mm_madd_epi16(lstep2[40], kOne);
-          lstep2[41] = _mm_madd_epi16(lstep2[41], kOne);
-          lstep2[42] = _mm_madd_epi16(lstep2[42], kOne);
-          lstep2[43] = _mm_madd_epi16(lstep2[43], kOne);
-          lstep2[44] = _mm_madd_epi16(lstep2[44], kOne);
-          lstep2[45] = _mm_madd_epi16(lstep2[45], kOne);
-          lstep2[46] = _mm_madd_epi16(lstep2[46], kOne);
-          lstep2[47] = _mm_madd_epi16(lstep2[47], kOne);
-          lstep2[48] = _mm_madd_epi16(lstep2[48], kOne);
-          lstep2[49] = _mm_madd_epi16(lstep2[49], kOne);
-          lstep2[50] = _mm_madd_epi16(lstep2[50], kOne);
-          lstep2[51] = _mm_madd_epi16(lstep2[51], kOne);
-          lstep2[52] = _mm_madd_epi16(lstep2[52], kOne);
-          lstep2[53] = _mm_madd_epi16(lstep2[53], kOne);
-          lstep2[54] = _mm_madd_epi16(lstep2[54], kOne);
-          lstep2[55] = _mm_madd_epi16(lstep2[55], kOne);
-
-          lstep1[32] = _mm_unpacklo_epi16(step1[16], kZero);
-          lstep1[33] = _mm_unpackhi_epi16(step1[16], kZero);
-          lstep1[34] = _mm_unpacklo_epi16(step1[17], kZero);
-          lstep1[35] = _mm_unpackhi_epi16(step1[17], kZero);
-          lstep1[36] = _mm_unpacklo_epi16(step1[18], kZero);
-          lstep1[37] = _mm_unpackhi_epi16(step1[18], kZero);
-          lstep1[38] = _mm_unpacklo_epi16(step1[19], kZero);
-          lstep1[39] = _mm_unpackhi_epi16(step1[19], kZero);
-          lstep1[56] = _mm_unpacklo_epi16(step1[28], kZero);
-          lstep1[57] = _mm_unpackhi_epi16(step1[28], kZero);
-          lstep1[58] = _mm_unpacklo_epi16(step1[29], kZero);
-          lstep1[59] = _mm_unpackhi_epi16(step1[29], kZero);
-          lstep1[60] = _mm_unpacklo_epi16(step1[30], kZero);
-          lstep1[61] = _mm_unpackhi_epi16(step1[30], kZero);
-          lstep1[62] = _mm_unpacklo_epi16(step1[31], kZero);
-          lstep1[63] = _mm_unpackhi_epi16(step1[31], kZero);
-          lstep1[32] = _mm_madd_epi16(lstep1[32], kOne);
-          lstep1[33] = _mm_madd_epi16(lstep1[33], kOne);
-          lstep1[34] = _mm_madd_epi16(lstep1[34], kOne);
-          lstep1[35] = _mm_madd_epi16(lstep1[35], kOne);
-          lstep1[36] = _mm_madd_epi16(lstep1[36], kOne);
-          lstep1[37] = _mm_madd_epi16(lstep1[37], kOne);
-          lstep1[38] = _mm_madd_epi16(lstep1[38], kOne);
-          lstep1[39] = _mm_madd_epi16(lstep1[39], kOne);
-          lstep1[56] = _mm_madd_epi16(lstep1[56], kOne);
-          lstep1[57] = _mm_madd_epi16(lstep1[57], kOne);
-          lstep1[58] = _mm_madd_epi16(lstep1[58], kOne);
-          lstep1[59] = _mm_madd_epi16(lstep1[59], kOne);
-          lstep1[60] = _mm_madd_epi16(lstep1[60], kOne);
-          lstep1[61] = _mm_madd_epi16(lstep1[61], kOne);
-          lstep1[62] = _mm_madd_epi16(lstep1[62], kOne);
-          lstep1[63] = _mm_madd_epi16(lstep1[63], kOne);
-
-          lstep3[32] = _mm_add_epi32(lstep2[46], lstep1[32]);
-          lstep3[33] = _mm_add_epi32(lstep2[47], lstep1[33]);
-
-          lstep3[34] = _mm_add_epi32(lstep2[44], lstep1[34]);
-          lstep3[35] = _mm_add_epi32(lstep2[45], lstep1[35]);
-          lstep3[36] = _mm_add_epi32(lstep2[42], lstep1[36]);
-          lstep3[37] = _mm_add_epi32(lstep2[43], lstep1[37]);
-          lstep3[38] = _mm_add_epi32(lstep2[40], lstep1[38]);
-          lstep3[39] = _mm_add_epi32(lstep2[41], lstep1[39]);
-          lstep3[40] = _mm_sub_epi32(lstep1[38], lstep2[40]);
-          lstep3[41] = _mm_sub_epi32(lstep1[39], lstep2[41]);
-          lstep3[42] = _mm_sub_epi32(lstep1[36], lstep2[42]);
-          lstep3[43] = _mm_sub_epi32(lstep1[37], lstep2[43]);
-          lstep3[44] = _mm_sub_epi32(lstep1[34], lstep2[44]);
-          lstep3[45] = _mm_sub_epi32(lstep1[35], lstep2[45]);
-          lstep3[46] = _mm_sub_epi32(lstep1[32], lstep2[46]);
-          lstep3[47] = _mm_sub_epi32(lstep1[33], lstep2[47]);
-          lstep3[48] = _mm_sub_epi32(lstep1[62], lstep2[48]);
-          lstep3[49] = _mm_sub_epi32(lstep1[63], lstep2[49]);
-          lstep3[50] = _mm_sub_epi32(lstep1[60], lstep2[50]);
-          lstep3[51] = _mm_sub_epi32(lstep1[61], lstep2[51]);
-          lstep3[52] = _mm_sub_epi32(lstep1[58], lstep2[52]);
-          lstep3[53] = _mm_sub_epi32(lstep1[59], lstep2[53]);
-          lstep3[54] = _mm_sub_epi32(lstep1[56], lstep2[54]);
-          lstep3[55] = _mm_sub_epi32(lstep1[57], lstep2[55]);
-          lstep3[56] = _mm_add_epi32(lstep2[54], lstep1[56]);
-          lstep3[57] = _mm_add_epi32(lstep2[55], lstep1[57]);
-          lstep3[58] = _mm_add_epi32(lstep2[52], lstep1[58]);
-          lstep3[59] = _mm_add_epi32(lstep2[53], lstep1[59]);
-          lstep3[60] = _mm_add_epi32(lstep2[50], lstep1[60]);
-          lstep3[61] = _mm_add_epi32(lstep2[51], lstep1[61]);
-          lstep3[62] = _mm_add_epi32(lstep2[48], lstep1[62]);
-          lstep3[63] = _mm_add_epi32(lstep2[49], lstep1[63]);
-        }
-
-        // stage 4
-        {
-          // expanding to 32-bit length priori to addition operations
-          lstep2[16] = _mm_unpacklo_epi16(step2[ 8], kZero);
-          lstep2[17] = _mm_unpackhi_epi16(step2[ 8], kZero);
-          lstep2[18] = _mm_unpacklo_epi16(step2[ 9], kZero);
-          lstep2[19] = _mm_unpackhi_epi16(step2[ 9], kZero);
-          lstep2[28] = _mm_unpacklo_epi16(step2[14], kZero);
-          lstep2[29] = _mm_unpackhi_epi16(step2[14], kZero);
-          lstep2[30] = _mm_unpacklo_epi16(step2[15], kZero);
-          lstep2[31] = _mm_unpackhi_epi16(step2[15], kZero);
-          lstep2[16] = _mm_madd_epi16(lstep2[16], kOne);
-          lstep2[17] = _mm_madd_epi16(lstep2[17], kOne);
-          lstep2[18] = _mm_madd_epi16(lstep2[18], kOne);
-          lstep2[19] = _mm_madd_epi16(lstep2[19], kOne);
-          lstep2[28] = _mm_madd_epi16(lstep2[28], kOne);
-          lstep2[29] = _mm_madd_epi16(lstep2[29], kOne);
-          lstep2[30] = _mm_madd_epi16(lstep2[30], kOne);
-          lstep2[31] = _mm_madd_epi16(lstep2[31], kOne);
-
-          lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]);
-          lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]);
-          lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]);
-          lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]);
-          lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]);
-          lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]);
-          lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]);
-          lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]);
-          lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]);
-          lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]);
-          lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]);
-          lstep1[19] = _mm_add_epi32(lstep3[21], lstep2[19]);
-          lstep1[20] = _mm_sub_epi32(lstep2[18], lstep3[20]);
-          lstep1[21] = _mm_sub_epi32(lstep2[19], lstep3[21]);
-          lstep1[22] = _mm_sub_epi32(lstep2[16], lstep3[22]);
-          lstep1[23] = _mm_sub_epi32(lstep2[17], lstep3[23]);
-          lstep1[24] = _mm_sub_epi32(lstep2[30], lstep3[24]);
-          lstep1[25] = _mm_sub_epi32(lstep2[31], lstep3[25]);
-          lstep1[26] = _mm_sub_epi32(lstep2[28], lstep3[26]);
-          lstep1[27] = _mm_sub_epi32(lstep2[29], lstep3[27]);
-          lstep1[28] = _mm_add_epi32(lstep3[26], lstep2[28]);
-          lstep1[29] = _mm_add_epi32(lstep3[27], lstep2[29]);
-          lstep1[30] = _mm_add_epi32(lstep3[24], lstep2[30]);
-          lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]);
-        }
-        {
-        // to be continued...
-        //
-        const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
-        const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
-
-        u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]);
-        u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]);
-        u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]);
-        u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]);
-
-        // TODO(jingning): manually inline k_madd_epi32_ to further hide
-        // instruction latency.
-        v[0] = k_madd_epi32(u[0], k32_p16_m16);
-        v[1] = k_madd_epi32(u[1], k32_p16_m16);
-        v[2] = k_madd_epi32(u[2], k32_p16_m16);
-        v[3] = k_madd_epi32(u[3], k32_p16_m16);
-        v[4] = k_madd_epi32(u[0], k32_p16_p16);
-        v[5] = k_madd_epi32(u[1], k32_p16_p16);
-        v[6] = k_madd_epi32(u[2], k32_p16_p16);
-        v[7] = k_madd_epi32(u[3], k32_p16_p16);
-#if DCT_HIGH_BIT_DEPTH
-        overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3],
-                                            &v[4], &v[5], &v[6], &v[7], &kZero);
-        if (overflow) {
-          HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-          return;
-        }
-#endif  // DCT_HIGH_BIT_DEPTH
-        u[0] = k_packs_epi64(v[0], v[1]);
-        u[1] = k_packs_epi64(v[2], v[3]);
-        u[2] = k_packs_epi64(v[4], v[5]);
-        u[3] = k_packs_epi64(v[6], v[7]);
-
-        v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-        v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-        v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-        v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-
-        lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
-        lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
-        lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
-        lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
-        }
-        {
-          const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
-          const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
-          const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
-
-          u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]);
-          u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]);
-          u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]);
-          u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]);
-          u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]);
-          u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]);
-          u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]);
-          u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]);
-          u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]);
-          u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]);
-          u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]);
-          u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]);
-          u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]);
-          u[13] = _mm_unpackhi_epi32(lstep3[42], lstep3[52]);
-          u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]);
-          u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]);
-
-          v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24);
-          v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24);
-          v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24);
-          v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24);
-          v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24);
-          v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24);
-          v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24);
-          v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08);
-          v[10] = k_madd_epi32(u[10], k32_m24_m08);
-          v[11] = k_madd_epi32(u[11], k32_m24_m08);
-          v[12] = k_madd_epi32(u[12], k32_m24_m08);
-          v[13] = k_madd_epi32(u[13], k32_m24_m08);
-          v[14] = k_madd_epi32(u[14], k32_m24_m08);
-          v[15] = k_madd_epi32(u[15], k32_m24_m08);
-          v[16] = k_madd_epi32(u[12], k32_m08_p24);
-          v[17] = k_madd_epi32(u[13], k32_m08_p24);
-          v[18] = k_madd_epi32(u[14], k32_m08_p24);
-          v[19] = k_madd_epi32(u[15], k32_m08_p24);
-          v[20] = k_madd_epi32(u[ 8], k32_m08_p24);
-          v[21] = k_madd_epi32(u[ 9], k32_m08_p24);
-          v[22] = k_madd_epi32(u[10], k32_m08_p24);
-          v[23] = k_madd_epi32(u[11], k32_m08_p24);
-          v[24] = k_madd_epi32(u[ 4], k32_p24_p08);
-          v[25] = k_madd_epi32(u[ 5], k32_p24_p08);
-          v[26] = k_madd_epi32(u[ 6], k32_p24_p08);
-          v[27] = k_madd_epi32(u[ 7], k32_p24_p08);
-          v[28] = k_madd_epi32(u[ 0], k32_p24_p08);
-          v[29] = k_madd_epi32(u[ 1], k32_p24_p08);
-          v[30] = k_madd_epi32(u[ 2], k32_p24_p08);
-          v[31] = k_madd_epi32(u[ 3], k32_p24_p08);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
-          u[10] = k_packs_epi64(v[20], v[21]);
-          u[11] = k_packs_epi64(v[22], v[23]);
-          u[12] = k_packs_epi64(v[24], v[25]);
-          u[13] = k_packs_epi64(v[26], v[27]);
-          u[14] = k_packs_epi64(v[28], v[29]);
-          u[15] = k_packs_epi64(v[30], v[31]);
-
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
-          lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
-          lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
-          lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
-          lstep1[57] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
-          lstep1[58] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
-          lstep1[59] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
-        }
-        // stage 5
-        {
-          lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]);
-          lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]);
-          lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]);
-          lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]);
-          lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]);
-          lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]);
-          lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]);
-          lstep2[15] = _mm_add_epi32(lstep1[13], lstep3[15]);
-        }
-        {
-          const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
-          const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
-          const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
-          const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
-
-          u[0] = _mm_unpacklo_epi32(lstep1[0], lstep1[2]);
-          u[1] = _mm_unpackhi_epi32(lstep1[0], lstep1[2]);
-          u[2] = _mm_unpacklo_epi32(lstep1[1], lstep1[3]);
-          u[3] = _mm_unpackhi_epi32(lstep1[1], lstep1[3]);
-          u[4] = _mm_unpacklo_epi32(lstep1[4], lstep1[6]);
-          u[5] = _mm_unpackhi_epi32(lstep1[4], lstep1[6]);
-          u[6] = _mm_unpacklo_epi32(lstep1[5], lstep1[7]);
-          u[7] = _mm_unpackhi_epi32(lstep1[5], lstep1[7]);
-
-          // TODO(jingning): manually inline k_madd_epi32_ to further hide
-          // instruction latency.
-          v[ 0] = k_madd_epi32(u[0], k32_p16_p16);
-          v[ 1] = k_madd_epi32(u[1], k32_p16_p16);
-          v[ 2] = k_madd_epi32(u[2], k32_p16_p16);
-          v[ 3] = k_madd_epi32(u[3], k32_p16_p16);
-          v[ 4] = k_madd_epi32(u[0], k32_p16_m16);
-          v[ 5] = k_madd_epi32(u[1], k32_p16_m16);
-          v[ 6] = k_madd_epi32(u[2], k32_p16_m16);
-          v[ 7] = k_madd_epi32(u[3], k32_p16_m16);
-          v[ 8] = k_madd_epi32(u[4], k32_p24_p08);
-          v[ 9] = k_madd_epi32(u[5], k32_p24_p08);
-          v[10] = k_madd_epi32(u[6], k32_p24_p08);
-          v[11] = k_madd_epi32(u[7], k32_p24_p08);
-          v[12] = k_madd_epi32(u[4], k32_m08_p24);
-          v[13] = k_madd_epi32(u[5], k32_m08_p24);
-          v[14] = k_madd_epi32(u[6], k32_m08_p24);
-          v[15] = k_madd_epi32(u[7], k32_m08_p24);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[0] = k_packs_epi64(v[0], v[1]);
-          u[1] = k_packs_epi64(v[2], v[3]);
-          u[2] = k_packs_epi64(v[4], v[5]);
-          u[3] = k_packs_epi64(v[6], v[7]);
-          u[4] = k_packs_epi64(v[8], v[9]);
-          u[5] = k_packs_epi64(v[10], v[11]);
-          u[6] = k_packs_epi64(v[12], v[13]);
-          u[7] = k_packs_epi64(v[14], v[15]);
-
-          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
-          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
-          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
-          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
-
-          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
-          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
-          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
-          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
-          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
-          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
-          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
-          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
-
-          sign[0] = _mm_cmplt_epi32(u[0], kZero);
-          sign[1] = _mm_cmplt_epi32(u[1], kZero);
-          sign[2] = _mm_cmplt_epi32(u[2], kZero);
-          sign[3] = _mm_cmplt_epi32(u[3], kZero);
-          sign[4] = _mm_cmplt_epi32(u[4], kZero);
-          sign[5] = _mm_cmplt_epi32(u[5], kZero);
-          sign[6] = _mm_cmplt_epi32(u[6], kZero);
-          sign[7] = _mm_cmplt_epi32(u[7], kZero);
-
-          u[0] = _mm_sub_epi32(u[0], sign[0]);
-          u[1] = _mm_sub_epi32(u[1], sign[1]);
-          u[2] = _mm_sub_epi32(u[2], sign[2]);
-          u[3] = _mm_sub_epi32(u[3], sign[3]);
-          u[4] = _mm_sub_epi32(u[4], sign[4]);
-          u[5] = _mm_sub_epi32(u[5], sign[5]);
-          u[6] = _mm_sub_epi32(u[6], sign[6]);
-          u[7] = _mm_sub_epi32(u[7], sign[7]);
-
-          u[0] = _mm_add_epi32(u[0], K32One);
-          u[1] = _mm_add_epi32(u[1], K32One);
-          u[2] = _mm_add_epi32(u[2], K32One);
-          u[3] = _mm_add_epi32(u[3], K32One);
-          u[4] = _mm_add_epi32(u[4], K32One);
-          u[5] = _mm_add_epi32(u[5], K32One);
-          u[6] = _mm_add_epi32(u[6], K32One);
-          u[7] = _mm_add_epi32(u[7], K32One);
-
-          u[0] = _mm_srai_epi32(u[0], 2);
-          u[1] = _mm_srai_epi32(u[1], 2);
-          u[2] = _mm_srai_epi32(u[2], 2);
-          u[3] = _mm_srai_epi32(u[3], 2);
-          u[4] = _mm_srai_epi32(u[4], 2);
-          u[5] = _mm_srai_epi32(u[5], 2);
-          u[6] = _mm_srai_epi32(u[6], 2);
-          u[7] = _mm_srai_epi32(u[7], 2);
-
-          // Combine
-          out[ 0] = _mm_packs_epi32(u[0], u[1]);
-          out[16] = _mm_packs_epi32(u[2], u[3]);
-          out[ 8] = _mm_packs_epi32(u[4], u[5]);
-          out[24] = _mm_packs_epi32(u[6], u[7]);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&out[0], &out[16],
-                                             &out[8], &out[24]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-        {
-          const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
-          const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
-          const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
-
-          u[0] = _mm_unpacklo_epi32(lstep1[18], lstep1[28]);
-          u[1] = _mm_unpackhi_epi32(lstep1[18], lstep1[28]);
-          u[2] = _mm_unpacklo_epi32(lstep1[19], lstep1[29]);
-          u[3] = _mm_unpackhi_epi32(lstep1[19], lstep1[29]);
-          u[4] = _mm_unpacklo_epi32(lstep1[20], lstep1[26]);
-          u[5] = _mm_unpackhi_epi32(lstep1[20], lstep1[26]);
-          u[6] = _mm_unpacklo_epi32(lstep1[21], lstep1[27]);
-          u[7] = _mm_unpackhi_epi32(lstep1[21], lstep1[27]);
-
-          v[0] = k_madd_epi32(u[0], k32_m08_p24);
-          v[1] = k_madd_epi32(u[1], k32_m08_p24);
-          v[2] = k_madd_epi32(u[2], k32_m08_p24);
-          v[3] = k_madd_epi32(u[3], k32_m08_p24);
-          v[4] = k_madd_epi32(u[4], k32_m24_m08);
-          v[5] = k_madd_epi32(u[5], k32_m24_m08);
-          v[6] = k_madd_epi32(u[6], k32_m24_m08);
-          v[7] = k_madd_epi32(u[7], k32_m24_m08);
-          v[ 8] = k_madd_epi32(u[4], k32_m08_p24);
-          v[ 9] = k_madd_epi32(u[5], k32_m08_p24);
-          v[10] = k_madd_epi32(u[6], k32_m08_p24);
-          v[11] = k_madd_epi32(u[7], k32_m08_p24);
-          v[12] = k_madd_epi32(u[0], k32_p24_p08);
-          v[13] = k_madd_epi32(u[1], k32_p24_p08);
-          v[14] = k_madd_epi32(u[2], k32_p24_p08);
-          v[15] = k_madd_epi32(u[3], k32_p24_p08);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[0] = k_packs_epi64(v[0], v[1]);
-          u[1] = k_packs_epi64(v[2], v[3]);
-          u[2] = k_packs_epi64(v[4], v[5]);
-          u[3] = k_packs_epi64(v[6], v[7]);
-          u[4] = k_packs_epi64(v[8], v[9]);
-          u[5] = k_packs_epi64(v[10], v[11]);
-          u[6] = k_packs_epi64(v[12], v[13]);
-          u[7] = k_packs_epi64(v[14], v[15]);
-
-          u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-          u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-          u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-          u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-          u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
-          u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
-          u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
-          u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
-
-          lstep2[18] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
-          lstep2[19] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
-          lstep2[20] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
-          lstep2[21] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
-          lstep2[26] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
-          lstep2[27] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
-          lstep2[28] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
-          lstep2[29] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
-        }
-        {
-          lstep2[32] = _mm_add_epi32(lstep1[38], lstep3[32]);
-          lstep2[33] = _mm_add_epi32(lstep1[39], lstep3[33]);
-          lstep2[34] = _mm_add_epi32(lstep1[36], lstep3[34]);
-          lstep2[35] = _mm_add_epi32(lstep1[37], lstep3[35]);
-          lstep2[36] = _mm_sub_epi32(lstep3[34], lstep1[36]);
-          lstep2[37] = _mm_sub_epi32(lstep3[35], lstep1[37]);
-          lstep2[38] = _mm_sub_epi32(lstep3[32], lstep1[38]);
-          lstep2[39] = _mm_sub_epi32(lstep3[33], lstep1[39]);
-          lstep2[40] = _mm_sub_epi32(lstep3[46], lstep1[40]);
-          lstep2[41] = _mm_sub_epi32(lstep3[47], lstep1[41]);
-          lstep2[42] = _mm_sub_epi32(lstep3[44], lstep1[42]);
-          lstep2[43] = _mm_sub_epi32(lstep3[45], lstep1[43]);
-          lstep2[44] = _mm_add_epi32(lstep1[42], lstep3[44]);
-          lstep2[45] = _mm_add_epi32(lstep1[43], lstep3[45]);
-          lstep2[46] = _mm_add_epi32(lstep1[40], lstep3[46]);
-          lstep2[47] = _mm_add_epi32(lstep1[41], lstep3[47]);
-          lstep2[48] = _mm_add_epi32(lstep1[54], lstep3[48]);
-          lstep2[49] = _mm_add_epi32(lstep1[55], lstep3[49]);
-          lstep2[50] = _mm_add_epi32(lstep1[52], lstep3[50]);
-          lstep2[51] = _mm_add_epi32(lstep1[53], lstep3[51]);
-          lstep2[52] = _mm_sub_epi32(lstep3[50], lstep1[52]);
-          lstep2[53] = _mm_sub_epi32(lstep3[51], lstep1[53]);
-          lstep2[54] = _mm_sub_epi32(lstep3[48], lstep1[54]);
-          lstep2[55] = _mm_sub_epi32(lstep3[49], lstep1[55]);
-          lstep2[56] = _mm_sub_epi32(lstep3[62], lstep1[56]);
-          lstep2[57] = _mm_sub_epi32(lstep3[63], lstep1[57]);
-          lstep2[58] = _mm_sub_epi32(lstep3[60], lstep1[58]);
-          lstep2[59] = _mm_sub_epi32(lstep3[61], lstep1[59]);
-          lstep2[60] = _mm_add_epi32(lstep1[58], lstep3[60]);
-          lstep2[61] = _mm_add_epi32(lstep1[59], lstep3[61]);
-          lstep2[62] = _mm_add_epi32(lstep1[56], lstep3[62]);
-          lstep2[63] = _mm_add_epi32(lstep1[57], lstep3[63]);
-        }
-        // stage 6
-        {
-          const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
-          const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
-          const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
-
-          u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
-          u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
-          u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
-          u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
-          u[7] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
-          u[8] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
-          u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
-          u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
-          u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
-          u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
-          u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
-          u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
-          u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
-
-          v[0] = k_madd_epi32(u[0], k32_p28_p04);
-          v[1] = k_madd_epi32(u[1], k32_p28_p04);
-          v[2] = k_madd_epi32(u[2], k32_p28_p04);
-          v[3] = k_madd_epi32(u[3], k32_p28_p04);
-          v[4] = k_madd_epi32(u[4], k32_p12_p20);
-          v[5] = k_madd_epi32(u[5], k32_p12_p20);
-          v[6] = k_madd_epi32(u[6], k32_p12_p20);
-          v[7] = k_madd_epi32(u[7], k32_p12_p20);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
-          v[10] = k_madd_epi32(u[10], k32_m20_p12);
-          v[11] = k_madd_epi32(u[11], k32_m20_p12);
-          v[12] = k_madd_epi32(u[12], k32_m04_p28);
-          v[13] = k_madd_epi32(u[13], k32_m04_p28);
-          v[14] = k_madd_epi32(u[14], k32_m04_p28);
-          v[15] = k_madd_epi32(u[15], k32_m04_p28);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_16(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[0] = k_packs_epi64(v[0], v[1]);
-          u[1] = k_packs_epi64(v[2], v[3]);
-          u[2] = k_packs_epi64(v[4], v[5]);
-          u[3] = k_packs_epi64(v[6], v[7]);
-          u[4] = k_packs_epi64(v[8], v[9]);
-          u[5] = k_packs_epi64(v[10], v[11]);
-          u[6] = k_packs_epi64(v[12], v[13]);
-          u[7] = k_packs_epi64(v[14], v[15]);
-
-          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
-          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
-          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
-          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
-          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
-          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
-          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
-          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
-
-          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
-          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
-          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
-          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
-          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
-          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
-          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
-          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
-
-          sign[0] = _mm_cmplt_epi32(u[0], kZero);
-          sign[1] = _mm_cmplt_epi32(u[1], kZero);
-          sign[2] = _mm_cmplt_epi32(u[2], kZero);
-          sign[3] = _mm_cmplt_epi32(u[3], kZero);
-          sign[4] = _mm_cmplt_epi32(u[4], kZero);
-          sign[5] = _mm_cmplt_epi32(u[5], kZero);
-          sign[6] = _mm_cmplt_epi32(u[6], kZero);
-          sign[7] = _mm_cmplt_epi32(u[7], kZero);
-
-          u[0] = _mm_sub_epi32(u[0], sign[0]);
-          u[1] = _mm_sub_epi32(u[1], sign[1]);
-          u[2] = _mm_sub_epi32(u[2], sign[2]);
-          u[3] = _mm_sub_epi32(u[3], sign[3]);
-          u[4] = _mm_sub_epi32(u[4], sign[4]);
-          u[5] = _mm_sub_epi32(u[5], sign[5]);
-          u[6] = _mm_sub_epi32(u[6], sign[6]);
-          u[7] = _mm_sub_epi32(u[7], sign[7]);
-
-          u[0] = _mm_add_epi32(u[0], K32One);
-          u[1] = _mm_add_epi32(u[1], K32One);
-          u[2] = _mm_add_epi32(u[2], K32One);
-          u[3] = _mm_add_epi32(u[3], K32One);
-          u[4] = _mm_add_epi32(u[4], K32One);
-          u[5] = _mm_add_epi32(u[5], K32One);
-          u[6] = _mm_add_epi32(u[6], K32One);
-          u[7] = _mm_add_epi32(u[7], K32One);
-
-          u[0] = _mm_srai_epi32(u[0], 2);
-          u[1] = _mm_srai_epi32(u[1], 2);
-          u[2] = _mm_srai_epi32(u[2], 2);
-          u[3] = _mm_srai_epi32(u[3], 2);
-          u[4] = _mm_srai_epi32(u[4], 2);
-          u[5] = _mm_srai_epi32(u[5], 2);
-          u[6] = _mm_srai_epi32(u[6], 2);
-          u[7] = _mm_srai_epi32(u[7], 2);
-
-          out[ 4] = _mm_packs_epi32(u[0], u[1]);
-          out[20] = _mm_packs_epi32(u[2], u[3]);
-          out[12] = _mm_packs_epi32(u[4], u[5]);
-          out[28] = _mm_packs_epi32(u[6], u[7]);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x4(&out[4], &out[20],
-                                             &out[12], &out[28]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-        {
-          lstep3[16] = _mm_add_epi32(lstep2[18], lstep1[16]);
-          lstep3[17] = _mm_add_epi32(lstep2[19], lstep1[17]);
-          lstep3[18] = _mm_sub_epi32(lstep1[16], lstep2[18]);
-          lstep3[19] = _mm_sub_epi32(lstep1[17], lstep2[19]);
-          lstep3[20] = _mm_sub_epi32(lstep1[22], lstep2[20]);
-          lstep3[21] = _mm_sub_epi32(lstep1[23], lstep2[21]);
-          lstep3[22] = _mm_add_epi32(lstep2[20], lstep1[22]);
-          lstep3[23] = _mm_add_epi32(lstep2[21], lstep1[23]);
-          lstep3[24] = _mm_add_epi32(lstep2[26], lstep1[24]);
-          lstep3[25] = _mm_add_epi32(lstep2[27], lstep1[25]);
-          lstep3[26] = _mm_sub_epi32(lstep1[24], lstep2[26]);
-          lstep3[27] = _mm_sub_epi32(lstep1[25], lstep2[27]);
-          lstep3[28] = _mm_sub_epi32(lstep1[30], lstep2[28]);
-          lstep3[29] = _mm_sub_epi32(lstep1[31], lstep2[29]);
-          lstep3[30] = _mm_add_epi32(lstep2[28], lstep1[30]);
-          lstep3[31] = _mm_add_epi32(lstep2[29], lstep1[31]);
-        }
-        {
-          const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
-          const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64);
-          const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
-          const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64,
-                                                     -cospi_20_64);
-          const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
-          const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
-
-          u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]);
-          u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]);
-          u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]);
-          u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]);
-          u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]);
-          u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]);
-          u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]);
-          u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]);
-          u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]);
-          u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]);
-          u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]);
-          u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]);
-          u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]);
-          u[13] = _mm_unpackhi_epi32(lstep2[44], lstep2[50]);
-          u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]);
-          u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]);
-
-          v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28);
-          v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28);
-          v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28);
-          v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28);
-          v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04);
-          v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04);
-          v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04);
-          v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04);
-          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
-          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
-          v[10] = k_madd_epi32(u[10], k32_m20_p12);
-          v[11] = k_madd_epi32(u[11], k32_m20_p12);
-          v[12] = k_madd_epi32(u[12], k32_m12_m20);
-          v[13] = k_madd_epi32(u[13], k32_m12_m20);
-          v[14] = k_madd_epi32(u[14], k32_m12_m20);
-          v[15] = k_madd_epi32(u[15], k32_m12_m20);
-          v[16] = k_madd_epi32(u[12], k32_m20_p12);
-          v[17] = k_madd_epi32(u[13], k32_m20_p12);
-          v[18] = k_madd_epi32(u[14], k32_m20_p12);
-          v[19] = k_madd_epi32(u[15], k32_m20_p12);
-          v[20] = k_madd_epi32(u[ 8], k32_p12_p20);
-          v[21] = k_madd_epi32(u[ 9], k32_p12_p20);
-          v[22] = k_madd_epi32(u[10], k32_p12_p20);
-          v[23] = k_madd_epi32(u[11], k32_p12_p20);
-          v[24] = k_madd_epi32(u[ 4], k32_m04_p28);
-          v[25] = k_madd_epi32(u[ 5], k32_m04_p28);
-          v[26] = k_madd_epi32(u[ 6], k32_m04_p28);
-          v[27] = k_madd_epi32(u[ 7], k32_m04_p28);
-          v[28] = k_madd_epi32(u[ 0], k32_p28_p04);
-          v[29] = k_madd_epi32(u[ 1], k32_p28_p04);
-          v[30] = k_madd_epi32(u[ 2], k32_p28_p04);
-          v[31] = k_madd_epi32(u[ 3], k32_p28_p04);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
-          u[10] = k_packs_epi64(v[20], v[21]);
-          u[11] = k_packs_epi64(v[22], v[23]);
-          u[12] = k_packs_epi64(v[24], v[25]);
-          u[13] = k_packs_epi64(v[26], v[27]);
-          u[14] = k_packs_epi64(v[28], v[29]);
-          u[15] = k_packs_epi64(v[30], v[31]);
-
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
-          lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
-          lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
-          lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
-          lstep3[59] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
-          lstep3[60] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
-          lstep3[61] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
-        }
-        // stage 7
-        {
-          const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64);
-          const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64);
-          const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64);
-          const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64,  cospi_26_64);
-          const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64);
-          const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64);
-          const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64);
-          const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64);
-
-          u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]);
-          u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]);
-          u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]);
-          u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]);
-          u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]);
-          u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]);
-          u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]);
-          u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]);
-          u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]);
-          u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]);
-          u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]);
-          u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]);
-          u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]);
-          u[13] = _mm_unpackhi_epi32(lstep3[22], lstep3[24]);
-          u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]);
-          u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]);
-
-          v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10);
-          v[10] = k_madd_epi32(u[10], k32_p22_p10);
-          v[11] = k_madd_epi32(u[11], k32_p22_p10);
-          v[12] = k_madd_epi32(u[12], k32_p06_p26);
-          v[13] = k_madd_epi32(u[13], k32_p06_p26);
-          v[14] = k_madd_epi32(u[14], k32_p06_p26);
-          v[15] = k_madd_epi32(u[15], k32_p06_p26);
-          v[16] = k_madd_epi32(u[12], k32_m26_p06);
-          v[17] = k_madd_epi32(u[13], k32_m26_p06);
-          v[18] = k_madd_epi32(u[14], k32_m26_p06);
-          v[19] = k_madd_epi32(u[15], k32_m26_p06);
-          v[20] = k_madd_epi32(u[ 8], k32_m10_p22);
-          v[21] = k_madd_epi32(u[ 9], k32_m10_p22);
-          v[22] = k_madd_epi32(u[10], k32_m10_p22);
-          v[23] = k_madd_epi32(u[11], k32_m10_p22);
-          v[24] = k_madd_epi32(u[ 4], k32_m18_p14);
-          v[25] = k_madd_epi32(u[ 5], k32_m18_p14);
-          v[26] = k_madd_epi32(u[ 6], k32_m18_p14);
-          v[27] = k_madd_epi32(u[ 7], k32_m18_p14);
-          v[28] = k_madd_epi32(u[ 0], k32_m02_p30);
-          v[29] = k_madd_epi32(u[ 1], k32_m02_p30);
-          v[30] = k_madd_epi32(u[ 2], k32_m02_p30);
-          v[31] = k_madd_epi32(u[ 3], k32_m02_p30);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
-          u[10] = k_packs_epi64(v[20], v[21]);
-          u[11] = k_packs_epi64(v[22], v[23]);
-          u[12] = k_packs_epi64(v[24], v[25]);
-          u[13] = k_packs_epi64(v[26], v[27]);
-          u[14] = k_packs_epi64(v[28], v[29]);
-          u[15] = k_packs_epi64(v[30], v[31]);
-
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
-          u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
-          u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
-          u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
-          u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
-          u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
-          u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
-
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
-          v[10] = _mm_cmplt_epi32(u[10], kZero);
-          v[11] = _mm_cmplt_epi32(u[11], kZero);
-          v[12] = _mm_cmplt_epi32(u[12], kZero);
-          v[13] = _mm_cmplt_epi32(u[13], kZero);
-          v[14] = _mm_cmplt_epi32(u[14], kZero);
-          v[15] = _mm_cmplt_epi32(u[15], kZero);
-
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
-          u[10] = _mm_sub_epi32(u[10], v[10]);
-          u[11] = _mm_sub_epi32(u[11], v[11]);
-          u[12] = _mm_sub_epi32(u[12], v[12]);
-          u[13] = _mm_sub_epi32(u[13], v[13]);
-          u[14] = _mm_sub_epi32(u[14], v[14]);
-          u[15] = _mm_sub_epi32(u[15], v[15]);
-
-          v[ 0] = _mm_add_epi32(u[ 0], K32One);
-          v[ 1] = _mm_add_epi32(u[ 1], K32One);
-          v[ 2] = _mm_add_epi32(u[ 2], K32One);
-          v[ 3] = _mm_add_epi32(u[ 3], K32One);
-          v[ 4] = _mm_add_epi32(u[ 4], K32One);
-          v[ 5] = _mm_add_epi32(u[ 5], K32One);
-          v[ 6] = _mm_add_epi32(u[ 6], K32One);
-          v[ 7] = _mm_add_epi32(u[ 7], K32One);
-          v[ 8] = _mm_add_epi32(u[ 8], K32One);
-          v[ 9] = _mm_add_epi32(u[ 9], K32One);
-          v[10] = _mm_add_epi32(u[10], K32One);
-          v[11] = _mm_add_epi32(u[11], K32One);
-          v[12] = _mm_add_epi32(u[12], K32One);
-          v[13] = _mm_add_epi32(u[13], K32One);
-          v[14] = _mm_add_epi32(u[14], K32One);
-          v[15] = _mm_add_epi32(u[15], K32One);
-
-          u[ 0] = _mm_srai_epi32(v[ 0], 2);
-          u[ 1] = _mm_srai_epi32(v[ 1], 2);
-          u[ 2] = _mm_srai_epi32(v[ 2], 2);
-          u[ 3] = _mm_srai_epi32(v[ 3], 2);
-          u[ 4] = _mm_srai_epi32(v[ 4], 2);
-          u[ 5] = _mm_srai_epi32(v[ 5], 2);
-          u[ 6] = _mm_srai_epi32(v[ 6], 2);
-          u[ 7] = _mm_srai_epi32(v[ 7], 2);
-          u[ 8] = _mm_srai_epi32(v[ 8], 2);
-          u[ 9] = _mm_srai_epi32(v[ 9], 2);
-          u[10] = _mm_srai_epi32(v[10], 2);
-          u[11] = _mm_srai_epi32(v[11], 2);
-          u[12] = _mm_srai_epi32(v[12], 2);
-          u[13] = _mm_srai_epi32(v[13], 2);
-          u[14] = _mm_srai_epi32(v[14], 2);
-          u[15] = _mm_srai_epi32(v[15], 2);
-
-          out[ 2] = _mm_packs_epi32(u[0], u[1]);
-          out[18] = _mm_packs_epi32(u[2], u[3]);
-          out[10] = _mm_packs_epi32(u[4], u[5]);
-          out[26] = _mm_packs_epi32(u[6], u[7]);
-          out[ 6] = _mm_packs_epi32(u[8], u[9]);
-          out[22] = _mm_packs_epi32(u[10], u[11]);
-          out[14] = _mm_packs_epi32(u[12], u[13]);
-          out[30] = _mm_packs_epi32(u[14], u[15]);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
-                                             &out[26], &out[6], &out[22],
-                                             &out[14], &out[30]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-        {
-          lstep1[32] = _mm_add_epi32(lstep3[34], lstep2[32]);
-          lstep1[33] = _mm_add_epi32(lstep3[35], lstep2[33]);
-          lstep1[34] = _mm_sub_epi32(lstep2[32], lstep3[34]);
-          lstep1[35] = _mm_sub_epi32(lstep2[33], lstep3[35]);
-          lstep1[36] = _mm_sub_epi32(lstep2[38], lstep3[36]);
-          lstep1[37] = _mm_sub_epi32(lstep2[39], lstep3[37]);
-          lstep1[38] = _mm_add_epi32(lstep3[36], lstep2[38]);
-          lstep1[39] = _mm_add_epi32(lstep3[37], lstep2[39]);
-          lstep1[40] = _mm_add_epi32(lstep3[42], lstep2[40]);
-          lstep1[41] = _mm_add_epi32(lstep3[43], lstep2[41]);
-          lstep1[42] = _mm_sub_epi32(lstep2[40], lstep3[42]);
-          lstep1[43] = _mm_sub_epi32(lstep2[41], lstep3[43]);
-          lstep1[44] = _mm_sub_epi32(lstep2[46], lstep3[44]);
-          lstep1[45] = _mm_sub_epi32(lstep2[47], lstep3[45]);
-          lstep1[46] = _mm_add_epi32(lstep3[44], lstep2[46]);
-          lstep1[47] = _mm_add_epi32(lstep3[45], lstep2[47]);
-          lstep1[48] = _mm_add_epi32(lstep3[50], lstep2[48]);
-          lstep1[49] = _mm_add_epi32(lstep3[51], lstep2[49]);
-          lstep1[50] = _mm_sub_epi32(lstep2[48], lstep3[50]);
-          lstep1[51] = _mm_sub_epi32(lstep2[49], lstep3[51]);
-          lstep1[52] = _mm_sub_epi32(lstep2[54], lstep3[52]);
-          lstep1[53] = _mm_sub_epi32(lstep2[55], lstep3[53]);
-          lstep1[54] = _mm_add_epi32(lstep3[52], lstep2[54]);
-          lstep1[55] = _mm_add_epi32(lstep3[53], lstep2[55]);
-          lstep1[56] = _mm_add_epi32(lstep3[58], lstep2[56]);
-          lstep1[57] = _mm_add_epi32(lstep3[59], lstep2[57]);
-          lstep1[58] = _mm_sub_epi32(lstep2[56], lstep3[58]);
-          lstep1[59] = _mm_sub_epi32(lstep2[57], lstep3[59]);
-          lstep1[60] = _mm_sub_epi32(lstep2[62], lstep3[60]);
-          lstep1[61] = _mm_sub_epi32(lstep2[63], lstep3[61]);
-          lstep1[62] = _mm_add_epi32(lstep3[60], lstep2[62]);
-          lstep1[63] = _mm_add_epi32(lstep3[61], lstep2[63]);
-        }
-        // stage 8
-        {
-          const __m128i k32_p31_p01 = pair_set_epi32(cospi_31_64, cospi_1_64);
-          const __m128i k32_p15_p17 = pair_set_epi32(cospi_15_64, cospi_17_64);
-          const __m128i k32_p23_p09 = pair_set_epi32(cospi_23_64, cospi_9_64);
-          const __m128i k32_p07_p25 = pair_set_epi32(cospi_7_64, cospi_25_64);
-          const __m128i k32_m25_p07 = pair_set_epi32(-cospi_25_64, cospi_7_64);
-          const __m128i k32_m09_p23 = pair_set_epi32(-cospi_9_64, cospi_23_64);
-          const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64);
-          const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64);
-
-          u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]);
-          u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]);
-          u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]);
-          u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]);
-          u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]);
-          u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]);
-          u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]);
-          u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]);
-          u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]);
-          u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]);
-          u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]);
-          u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]);
-          u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]);
-          u[13] = _mm_unpackhi_epi32(lstep1[38], lstep1[56]);
-          u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]);
-          u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]);
-
-          v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09);
-          v[10] = k_madd_epi32(u[10], k32_p23_p09);
-          v[11] = k_madd_epi32(u[11], k32_p23_p09);
-          v[12] = k_madd_epi32(u[12], k32_p07_p25);
-          v[13] = k_madd_epi32(u[13], k32_p07_p25);
-          v[14] = k_madd_epi32(u[14], k32_p07_p25);
-          v[15] = k_madd_epi32(u[15], k32_p07_p25);
-          v[16] = k_madd_epi32(u[12], k32_m25_p07);
-          v[17] = k_madd_epi32(u[13], k32_m25_p07);
-          v[18] = k_madd_epi32(u[14], k32_m25_p07);
-          v[19] = k_madd_epi32(u[15], k32_m25_p07);
-          v[20] = k_madd_epi32(u[ 8], k32_m09_p23);
-          v[21] = k_madd_epi32(u[ 9], k32_m09_p23);
-          v[22] = k_madd_epi32(u[10], k32_m09_p23);
-          v[23] = k_madd_epi32(u[11], k32_m09_p23);
-          v[24] = k_madd_epi32(u[ 4], k32_m17_p15);
-          v[25] = k_madd_epi32(u[ 5], k32_m17_p15);
-          v[26] = k_madd_epi32(u[ 6], k32_m17_p15);
-          v[27] = k_madd_epi32(u[ 7], k32_m17_p15);
-          v[28] = k_madd_epi32(u[ 0], k32_m01_p31);
-          v[29] = k_madd_epi32(u[ 1], k32_m01_p31);
-          v[30] = k_madd_epi32(u[ 2], k32_m01_p31);
-          v[31] = k_madd_epi32(u[ 3], k32_m01_p31);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
-          u[10] = k_packs_epi64(v[20], v[21]);
-          u[11] = k_packs_epi64(v[22], v[23]);
-          u[12] = k_packs_epi64(v[24], v[25]);
-          u[13] = k_packs_epi64(v[26], v[27]);
-          u[14] = k_packs_epi64(v[28], v[29]);
-          u[15] = k_packs_epi64(v[30], v[31]);
-
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
-          u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
-          u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
-          u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
-          u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
-          u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
-          u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
-
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
-          v[10] = _mm_cmplt_epi32(u[10], kZero);
-          v[11] = _mm_cmplt_epi32(u[11], kZero);
-          v[12] = _mm_cmplt_epi32(u[12], kZero);
-          v[13] = _mm_cmplt_epi32(u[13], kZero);
-          v[14] = _mm_cmplt_epi32(u[14], kZero);
-          v[15] = _mm_cmplt_epi32(u[15], kZero);
-
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
-          u[10] = _mm_sub_epi32(u[10], v[10]);
-          u[11] = _mm_sub_epi32(u[11], v[11]);
-          u[12] = _mm_sub_epi32(u[12], v[12]);
-          u[13] = _mm_sub_epi32(u[13], v[13]);
-          u[14] = _mm_sub_epi32(u[14], v[14]);
-          u[15] = _mm_sub_epi32(u[15], v[15]);
-
-          v[0] = _mm_add_epi32(u[0], K32One);
-          v[1] = _mm_add_epi32(u[1], K32One);
-          v[2] = _mm_add_epi32(u[2], K32One);
-          v[3] = _mm_add_epi32(u[3], K32One);
-          v[4] = _mm_add_epi32(u[4], K32One);
-          v[5] = _mm_add_epi32(u[5], K32One);
-          v[6] = _mm_add_epi32(u[6], K32One);
-          v[7] = _mm_add_epi32(u[7], K32One);
-          v[8] = _mm_add_epi32(u[8], K32One);
-          v[9] = _mm_add_epi32(u[9], K32One);
-          v[10] = _mm_add_epi32(u[10], K32One);
-          v[11] = _mm_add_epi32(u[11], K32One);
-          v[12] = _mm_add_epi32(u[12], K32One);
-          v[13] = _mm_add_epi32(u[13], K32One);
-          v[14] = _mm_add_epi32(u[14], K32One);
-          v[15] = _mm_add_epi32(u[15], K32One);
-
-          u[0] = _mm_srai_epi32(v[0], 2);
-          u[1] = _mm_srai_epi32(v[1], 2);
-          u[2] = _mm_srai_epi32(v[2], 2);
-          u[3] = _mm_srai_epi32(v[3], 2);
-          u[4] = _mm_srai_epi32(v[4], 2);
-          u[5] = _mm_srai_epi32(v[5], 2);
-          u[6] = _mm_srai_epi32(v[6], 2);
-          u[7] = _mm_srai_epi32(v[7], 2);
-          u[8] = _mm_srai_epi32(v[8], 2);
-          u[9] = _mm_srai_epi32(v[9], 2);
-          u[10] = _mm_srai_epi32(v[10], 2);
-          u[11] = _mm_srai_epi32(v[11], 2);
-          u[12] = _mm_srai_epi32(v[12], 2);
-          u[13] = _mm_srai_epi32(v[13], 2);
-          u[14] = _mm_srai_epi32(v[14], 2);
-          u[15] = _mm_srai_epi32(v[15], 2);
-
-          out[ 1] = _mm_packs_epi32(u[0], u[1]);
-          out[17] = _mm_packs_epi32(u[2], u[3]);
-          out[ 9] = _mm_packs_epi32(u[4], u[5]);
-          out[25] = _mm_packs_epi32(u[6], u[7]);
-          out[ 7] = _mm_packs_epi32(u[8], u[9]);
-          out[23] = _mm_packs_epi32(u[10], u[11]);
-          out[15] = _mm_packs_epi32(u[12], u[13]);
-          out[31] = _mm_packs_epi32(u[14], u[15]);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
-                                             &out[25], &out[7], &out[23],
-                                             &out[15], &out[31]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-        {
-          const __m128i k32_p27_p05 = pair_set_epi32(cospi_27_64, cospi_5_64);
-          const __m128i k32_p11_p21 = pair_set_epi32(cospi_11_64, cospi_21_64);
-          const __m128i k32_p19_p13 = pair_set_epi32(cospi_19_64, cospi_13_64);
-          const __m128i k32_p03_p29 = pair_set_epi32(cospi_3_64, cospi_29_64);
-          const __m128i k32_m29_p03 = pair_set_epi32(-cospi_29_64, cospi_3_64);
-          const __m128i k32_m13_p19 = pair_set_epi32(-cospi_13_64, cospi_19_64);
-          const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64);
-          const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64);
-
-          u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]);
-          u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]);
-          u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]);
-          u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]);
-          u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]);
-          u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]);
-          u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]);
-          u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]);
-          u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]);
-          u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]);
-          u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]);
-          u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]);
-          u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]);
-          u[13] = _mm_unpackhi_epi32(lstep1[46], lstep1[48]);
-          u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]);
-          u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]);
-
-          v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05);
-          v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05);
-          v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05);
-          v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05);
-          v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21);
-          v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21);
-          v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21);
-          v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21);
-          v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13);
-          v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13);
-          v[10] = k_madd_epi32(u[10], k32_p19_p13);
-          v[11] = k_madd_epi32(u[11], k32_p19_p13);
-          v[12] = k_madd_epi32(u[12], k32_p03_p29);
-          v[13] = k_madd_epi32(u[13], k32_p03_p29);
-          v[14] = k_madd_epi32(u[14], k32_p03_p29);
-          v[15] = k_madd_epi32(u[15], k32_p03_p29);
-          v[16] = k_madd_epi32(u[12], k32_m29_p03);
-          v[17] = k_madd_epi32(u[13], k32_m29_p03);
-          v[18] = k_madd_epi32(u[14], k32_m29_p03);
-          v[19] = k_madd_epi32(u[15], k32_m29_p03);
-          v[20] = k_madd_epi32(u[ 8], k32_m13_p19);
-          v[21] = k_madd_epi32(u[ 9], k32_m13_p19);
-          v[22] = k_madd_epi32(u[10], k32_m13_p19);
-          v[23] = k_madd_epi32(u[11], k32_m13_p19);
-          v[24] = k_madd_epi32(u[ 4], k32_m21_p11);
-          v[25] = k_madd_epi32(u[ 5], k32_m21_p11);
-          v[26] = k_madd_epi32(u[ 6], k32_m21_p11);
-          v[27] = k_madd_epi32(u[ 7], k32_m21_p11);
-          v[28] = k_madd_epi32(u[ 0], k32_m05_p27);
-          v[29] = k_madd_epi32(u[ 1], k32_m05_p27);
-          v[30] = k_madd_epi32(u[ 2], k32_m05_p27);
-          v[31] = k_madd_epi32(u[ 3], k32_m05_p27);
-
-#if DCT_HIGH_BIT_DEPTH
-          overflow = k_check_epi32_overflow_32(
-              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
-              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
-              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
-              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
-              &kZero);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
-          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
-          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
-          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
-          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
-          u[ 5] = k_packs_epi64(v[10], v[11]);
-          u[ 6] = k_packs_epi64(v[12], v[13]);
-          u[ 7] = k_packs_epi64(v[14], v[15]);
-          u[ 8] = k_packs_epi64(v[16], v[17]);
-          u[ 9] = k_packs_epi64(v[18], v[19]);
-          u[10] = k_packs_epi64(v[20], v[21]);
-          u[11] = k_packs_epi64(v[22], v[23]);
-          u[12] = k_packs_epi64(v[24], v[25]);
-          u[13] = k_packs_epi64(v[26], v[27]);
-          u[14] = k_packs_epi64(v[28], v[29]);
-          u[15] = k_packs_epi64(v[30], v[31]);
-
-          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
-          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
-          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
-          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
-          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
-          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
-          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
-          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
-          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
-          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
-          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
-          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
-          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
-          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
-          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
-          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
-
-          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
-          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
-          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
-          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
-          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
-          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
-          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
-          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
-          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
-          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
-          u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
-          u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
-          u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
-          u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
-          u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
-          u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
-
-          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
-          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
-          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
-          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
-          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
-          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
-          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
-          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
-          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
-          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
-          v[10] = _mm_cmplt_epi32(u[10], kZero);
-          v[11] = _mm_cmplt_epi32(u[11], kZero);
-          v[12] = _mm_cmplt_epi32(u[12], kZero);
-          v[13] = _mm_cmplt_epi32(u[13], kZero);
-          v[14] = _mm_cmplt_epi32(u[14], kZero);
-          v[15] = _mm_cmplt_epi32(u[15], kZero);
-
-          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
-          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
-          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
-          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
-          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
-          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
-          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
-          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
-          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
-          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
-          u[10] = _mm_sub_epi32(u[10], v[10]);
-          u[11] = _mm_sub_epi32(u[11], v[11]);
-          u[12] = _mm_sub_epi32(u[12], v[12]);
-          u[13] = _mm_sub_epi32(u[13], v[13]);
-          u[14] = _mm_sub_epi32(u[14], v[14]);
-          u[15] = _mm_sub_epi32(u[15], v[15]);
-
-          v[0] = _mm_add_epi32(u[0], K32One);
-          v[1] = _mm_add_epi32(u[1], K32One);
-          v[2] = _mm_add_epi32(u[2], K32One);
-          v[3] = _mm_add_epi32(u[3], K32One);
-          v[4] = _mm_add_epi32(u[4], K32One);
-          v[5] = _mm_add_epi32(u[5], K32One);
-          v[6] = _mm_add_epi32(u[6], K32One);
-          v[7] = _mm_add_epi32(u[7], K32One);
-          v[8] = _mm_add_epi32(u[8], K32One);
-          v[9] = _mm_add_epi32(u[9], K32One);
-          v[10] = _mm_add_epi32(u[10], K32One);
-          v[11] = _mm_add_epi32(u[11], K32One);
-          v[12] = _mm_add_epi32(u[12], K32One);
-          v[13] = _mm_add_epi32(u[13], K32One);
-          v[14] = _mm_add_epi32(u[14], K32One);
-          v[15] = _mm_add_epi32(u[15], K32One);
-
-          u[0] = _mm_srai_epi32(v[0], 2);
-          u[1] = _mm_srai_epi32(v[1], 2);
-          u[2] = _mm_srai_epi32(v[2], 2);
-          u[3] = _mm_srai_epi32(v[3], 2);
-          u[4] = _mm_srai_epi32(v[4], 2);
-          u[5] = _mm_srai_epi32(v[5], 2);
-          u[6] = _mm_srai_epi32(v[6], 2);
-          u[7] = _mm_srai_epi32(v[7], 2);
-          u[8] = _mm_srai_epi32(v[8], 2);
-          u[9] = _mm_srai_epi32(v[9], 2);
-          u[10] = _mm_srai_epi32(v[10], 2);
-          u[11] = _mm_srai_epi32(v[11], 2);
-          u[12] = _mm_srai_epi32(v[12], 2);
-          u[13] = _mm_srai_epi32(v[13], 2);
-          u[14] = _mm_srai_epi32(v[14], 2);
-          u[15] = _mm_srai_epi32(v[15], 2);
-
-          out[ 5] = _mm_packs_epi32(u[0], u[1]);
-          out[21] = _mm_packs_epi32(u[2], u[3]);
-          out[13] = _mm_packs_epi32(u[4], u[5]);
-          out[29] = _mm_packs_epi32(u[6], u[7]);
-          out[ 3] = _mm_packs_epi32(u[8], u[9]);
-          out[19] = _mm_packs_epi32(u[10], u[11]);
-          out[11] = _mm_packs_epi32(u[12], u[13]);
-          out[27] = _mm_packs_epi32(u[14], u[15]);
-#if DCT_HIGH_BIT_DEPTH
-          overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
-                                             &out[29], &out[3], &out[19],
-                                             &out[11], &out[27]);
-          if (overflow) {
-            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
-            return;
-          }
-#endif  // DCT_HIGH_BIT_DEPTH
-        }
-      }
-#endif  // FDCT32x32_HIGH_PRECISION
-      // Transpose the results, do it as four 8x8 transposes.
-      {
-        int transpose_block;
-        int16_t *output0 = &intermediate[column_start * 32];
-        tran_low_t *output1 = &output_org[column_start * 32];
-        for (transpose_block = 0; transpose_block < 4; ++transpose_block) {
-          __m128i *this_out = &out[8 * transpose_block];
-          // 00 01 02 03 04 05 06 07
-          // 10 11 12 13 14 15 16 17
-          // 20 21 22 23 24 25 26 27
-          // 30 31 32 33 34 35 36 37
-          // 40 41 42 43 44 45 46 47
-          // 50 51 52 53 54 55 56 57
-          // 60 61 62 63 64 65 66 67
-          // 70 71 72 73 74 75 76 77
-          const __m128i tr0_0 = _mm_unpacklo_epi16(this_out[0], this_out[1]);
-          const __m128i tr0_1 = _mm_unpacklo_epi16(this_out[2], this_out[3]);
-          const __m128i tr0_2 = _mm_unpackhi_epi16(this_out[0], this_out[1]);
-          const __m128i tr0_3 = _mm_unpackhi_epi16(this_out[2], this_out[3]);
-          const __m128i tr0_4 = _mm_unpacklo_epi16(this_out[4], this_out[5]);
-          const __m128i tr0_5 = _mm_unpacklo_epi16(this_out[6], this_out[7]);
-          const __m128i tr0_6 = _mm_unpackhi_epi16(this_out[4], this_out[5]);
-          const __m128i tr0_7 = _mm_unpackhi_epi16(this_out[6], this_out[7]);
-          // 00 10 01 11 02 12 03 13
-          // 20 30 21 31 22 32 23 33
-          // 04 14 05 15 06 16 07 17
-          // 24 34 25 35 26 36 27 37
-          // 40 50 41 51 42 52 43 53
-          // 60 70 61 71 62 72 63 73
-          // 54 54 55 55 56 56 57 57
-          // 64 74 65 75 66 76 67 77
-          const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
-          const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
-          const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
-          const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
-          const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
-          const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
-          const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
-          const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
-          // 00 10 20 30 01 11 21 31
-          // 40 50 60 70 41 51 61 71
-          // 02 12 22 32 03 13 23 33
-          // 42 52 62 72 43 53 63 73
-          // 04 14 24 34 05 15 21 36
-          // 44 54 64 74 45 55 61 76
-          // 06 16 26 36 07 17 27 37
-          // 46 56 66 76 47 57 67 77
-          __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
-          __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
-          __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
-          __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
-          __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
-          __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
-          __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
-          __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
-          // 00 10 20 30 40 50 60 70
-          // 01 11 21 31 41 51 61 71
-          // 02 12 22 32 42 52 62 72
-          // 03 13 23 33 43 53 63 73
-          // 04 14 24 34 44 54 64 74
-          // 05 15 25 35 45 55 65 75
-          // 06 16 26 36 46 56 66 76
-          // 07 17 27 37 47 57 67 77
-          if (0 == pass) {
-            // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2;
-            // TODO(cd): see quality impact of only doing
-            //           output[j] = (output[j] + 1) >> 2;
-            //           which would remove the code between here ...
-            __m128i tr2_0_0 = _mm_cmpgt_epi16(tr2_0, kZero);
-            __m128i tr2_1_0 = _mm_cmpgt_epi16(tr2_1, kZero);
-            __m128i tr2_2_0 = _mm_cmpgt_epi16(tr2_2, kZero);
-            __m128i tr2_3_0 = _mm_cmpgt_epi16(tr2_3, kZero);
-            __m128i tr2_4_0 = _mm_cmpgt_epi16(tr2_4, kZero);
-            __m128i tr2_5_0 = _mm_cmpgt_epi16(tr2_5, kZero);
-            __m128i tr2_6_0 = _mm_cmpgt_epi16(tr2_6, kZero);
-            __m128i tr2_7_0 = _mm_cmpgt_epi16(tr2_7, kZero);
-            tr2_0 = _mm_sub_epi16(tr2_0, tr2_0_0);
-            tr2_1 = _mm_sub_epi16(tr2_1, tr2_1_0);
-            tr2_2 = _mm_sub_epi16(tr2_2, tr2_2_0);
-            tr2_3 = _mm_sub_epi16(tr2_3, tr2_3_0);
-            tr2_4 = _mm_sub_epi16(tr2_4, tr2_4_0);
-            tr2_5 = _mm_sub_epi16(tr2_5, tr2_5_0);
-            tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0);
-            tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0);
-            //           ... and here.
-            //           PS: also change code in vp9/encoder/vp9_dct.c
-            tr2_0 = _mm_add_epi16(tr2_0, kOne);
-            tr2_1 = _mm_add_epi16(tr2_1, kOne);
-            tr2_2 = _mm_add_epi16(tr2_2, kOne);
-            tr2_3 = _mm_add_epi16(tr2_3, kOne);
-            tr2_4 = _mm_add_epi16(tr2_4, kOne);
-            tr2_5 = _mm_add_epi16(tr2_5, kOne);
-            tr2_6 = _mm_add_epi16(tr2_6, kOne);
-            tr2_7 = _mm_add_epi16(tr2_7, kOne);
-            tr2_0 = _mm_srai_epi16(tr2_0, 2);
-            tr2_1 = _mm_srai_epi16(tr2_1, 2);
-            tr2_2 = _mm_srai_epi16(tr2_2, 2);
-            tr2_3 = _mm_srai_epi16(tr2_3, 2);
-            tr2_4 = _mm_srai_epi16(tr2_4, 2);
-            tr2_5 = _mm_srai_epi16(tr2_5, 2);
-            tr2_6 = _mm_srai_epi16(tr2_6, 2);
-            tr2_7 = _mm_srai_epi16(tr2_7, 2);
-          }
-          // Note: even though all these stores are aligned, using the aligned
-          //       intrinsic make the code slightly slower.
-          if (pass == 0) {
-            _mm_storeu_si128((__m128i *)(output0 + 0 * 32), tr2_0);
-            _mm_storeu_si128((__m128i *)(output0 + 1 * 32), tr2_1);
-            _mm_storeu_si128((__m128i *)(output0 + 2 * 32), tr2_2);
-            _mm_storeu_si128((__m128i *)(output0 + 3 * 32), tr2_3);
-            _mm_storeu_si128((__m128i *)(output0 + 4 * 32), tr2_4);
-            _mm_storeu_si128((__m128i *)(output0 + 5 * 32), tr2_5);
-            _mm_storeu_si128((__m128i *)(output0 + 6 * 32), tr2_6);
-            _mm_storeu_si128((__m128i *)(output0 + 7 * 32), tr2_7);
-            // Process next 8x8
-            output0 += 8;
-          } else {
-            storeu_output(&tr2_0, (output1 + 0 * 32));
-            storeu_output(&tr2_1, (output1 + 1 * 32));
-            storeu_output(&tr2_2, (output1 + 2 * 32));
-            storeu_output(&tr2_3, (output1 + 3 * 32));
-            storeu_output(&tr2_4, (output1 + 4 * 32));
-            storeu_output(&tr2_5, (output1 + 5 * 32));
-            storeu_output(&tr2_6, (output1 + 6 * 32));
-            storeu_output(&tr2_7, (output1 + 7 * 32));
-            // Process next 8x8
-            output1 += 8;
-          }
-        }
-      }
-    }
-  }
-}  // NOLINT
-
-#undef ADD_EPI16
-#undef SUB_EPI16
-#undef HIGH_FDCT32x32_2D_C
-#undef HIGH_FDCT32x32_2D_ROWS_C
--- a/vp9/encoder/x86/vp9_dct_avx2.c
+++ /dev/null
@@ -1,26 +1,0 @@
-/*
- *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <immintrin.h>  // AVX2
-#include "vp9/common/vp9_idct.h"  // for cospi constants
-#include "vpx_ports/mem.h"
-
-
-#define FDCT32x32_2D_AVX2 vp9_fdct32x32_rd_avx2
-#define FDCT32x32_HIGH_PRECISION 0
-#include "vp9/encoder/x86/vp9_dct32x32_avx2_impl.h"
-#undef  FDCT32x32_2D_AVX2
-#undef  FDCT32x32_HIGH_PRECISION
-
-#define FDCT32x32_2D_AVX2 vp9_fdct32x32_avx2
-#define FDCT32x32_HIGH_PRECISION 1
-#include "vp9/encoder/x86/vp9_dct32x32_avx2_impl.h" // NOLINT
-#undef  FDCT32x32_2D_AVX2
-#undef  FDCT32x32_HIGH_PRECISION
--- a/vp9/encoder/x86/vp9_dct_sse2.c
+++ b/vp9/encoder/x86/vp9_dct_sse2.c
@@ -2266,47 +2266,3 @@
   in1 = _mm_srai_epi32(in1, 3);
   store_output(&in1, output);
 }
-
-/*
- * The DCTnxn functions are defined using the macros below. The main code for
- * them is in separate files (vp9/encoder/x86/vp9_dct_sse2_impl.h &
- * vp9/encoder/x86/vp9_dct32x32_sse2_impl.h) which are used by both the 8 bit code
- * and the high bit depth code.
- */
-
-#define DCT_HIGH_BIT_DEPTH 0
-
-#define FDCT32x32_2D vp9_fdct32x32_rd_sse2
-#define FDCT32x32_HIGH_PRECISION 0
-#include "vp9/encoder/x86/vp9_dct32x32_sse2_impl.h"
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-
-#define FDCT32x32_2D vp9_fdct32x32_sse2
-#define FDCT32x32_HIGH_PRECISION 1
-#include "vp9/encoder/x86/vp9_dct32x32_sse2_impl.h" // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-
-#undef  DCT_HIGH_BIT_DEPTH
-
-
-#if CONFIG_VP9_HIGHBITDEPTH
-
-#define DCT_HIGH_BIT_DEPTH 1
-
-#define FDCT32x32_2D vp9_highbd_fdct32x32_rd_sse2
-#define FDCT32x32_HIGH_PRECISION 0
-#include "vp9/encoder/x86/vp9_dct32x32_sse2_impl.h" // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-
-#define FDCT32x32_2D vp9_highbd_fdct32x32_sse2
-#define FDCT32x32_HIGH_PRECISION 1
-#include "vp9/encoder/x86/vp9_dct32x32_sse2_impl.h" // NOLINT
-#undef  FDCT32x32_2D
-#undef  FDCT32x32_HIGH_PRECISION
-
-#undef  DCT_HIGH_BIT_DEPTH
-
-#endif  // CONFIG_VP9_HIGHBITDEPTH
--- a/vp9/vp9cx.mk
+++ b/vp9/vp9cx.mk
@@ -118,14 +118,11 @@
 
 VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c
 VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_dct_ssse3.c
-VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct32x32_sse2_impl.h
 
 ifeq ($(CONFIG_VP9_TEMPORAL_DENOISING),yes)
 VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_denoiser_sse2.c
 endif
 
-VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_dct32x32_avx2_impl.h
-VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_dct_avx2.c
 VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_error_intrin_avx2.c
 
 ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
--- a/vpx_dsp/fwd_txfm.c
+++ b/vpx_dsp/fwd_txfm.c
@@ -343,6 +343,410 @@
   }
 }
 
+static INLINE tran_high_t dct_32_round(tran_high_t input) {
+  tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
+  // TODO(debargha, peter.derivaz): Find new bounds for this assert,
+  // and make the bounds consts.
+  // assert(-131072 <= rv && rv <= 131071);
+  return rv;
+}
+
+static INLINE tran_high_t half_round_shift(tran_high_t input) {
+  tran_high_t rv = (input + 1 + (input < 0)) >> 2;
+  return rv;
+}
+
+void vp9_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+  tran_high_t step[32];
+  // Stage 1
+  step[0] = input[0] + input[(32 - 1)];
+  step[1] = input[1] + input[(32 - 2)];
+  step[2] = input[2] + input[(32 - 3)];
+  step[3] = input[3] + input[(32 - 4)];
+  step[4] = input[4] + input[(32 - 5)];
+  step[5] = input[5] + input[(32 - 6)];
+  step[6] = input[6] + input[(32 - 7)];
+  step[7] = input[7] + input[(32 - 8)];
+  step[8] = input[8] + input[(32 - 9)];
+  step[9] = input[9] + input[(32 - 10)];
+  step[10] = input[10] + input[(32 - 11)];
+  step[11] = input[11] + input[(32 - 12)];
+  step[12] = input[12] + input[(32 - 13)];
+  step[13] = input[13] + input[(32 - 14)];
+  step[14] = input[14] + input[(32 - 15)];
+  step[15] = input[15] + input[(32 - 16)];
+  step[16] = -input[16] + input[(32 - 17)];
+  step[17] = -input[17] + input[(32 - 18)];
+  step[18] = -input[18] + input[(32 - 19)];
+  step[19] = -input[19] + input[(32 - 20)];
+  step[20] = -input[20] + input[(32 - 21)];
+  step[21] = -input[21] + input[(32 - 22)];
+  step[22] = -input[22] + input[(32 - 23)];
+  step[23] = -input[23] + input[(32 - 24)];
+  step[24] = -input[24] + input[(32 - 25)];
+  step[25] = -input[25] + input[(32 - 26)];
+  step[26] = -input[26] + input[(32 - 27)];
+  step[27] = -input[27] + input[(32 - 28)];
+  step[28] = -input[28] + input[(32 - 29)];
+  step[29] = -input[29] + input[(32 - 30)];
+  step[30] = -input[30] + input[(32 - 31)];
+  step[31] = -input[31] + input[(32 - 32)];
+
+  // Stage 2
+  output[0] = step[0] + step[16 - 1];
+  output[1] = step[1] + step[16 - 2];
+  output[2] = step[2] + step[16 - 3];
+  output[3] = step[3] + step[16 - 4];
+  output[4] = step[4] + step[16 - 5];
+  output[5] = step[5] + step[16 - 6];
+  output[6] = step[6] + step[16 - 7];
+  output[7] = step[7] + step[16 - 8];
+  output[8] = -step[8] + step[16 - 9];
+  output[9] = -step[9] + step[16 - 10];
+  output[10] = -step[10] + step[16 - 11];
+  output[11] = -step[11] + step[16 - 12];
+  output[12] = -step[12] + step[16 - 13];
+  output[13] = -step[13] + step[16 - 14];
+  output[14] = -step[14] + step[16 - 15];
+  output[15] = -step[15] + step[16 - 16];
+
+  output[16] = step[16];
+  output[17] = step[17];
+  output[18] = step[18];
+  output[19] = step[19];
+
+  output[20] = dct_32_round((-step[20] + step[27]) * cospi_16_64);
+  output[21] = dct_32_round((-step[21] + step[26]) * cospi_16_64);
+  output[22] = dct_32_round((-step[22] + step[25]) * cospi_16_64);
+  output[23] = dct_32_round((-step[23] + step[24]) * cospi_16_64);
+
+  output[24] = dct_32_round((step[24] + step[23]) * cospi_16_64);
+  output[25] = dct_32_round((step[25] + step[22]) * cospi_16_64);
+  output[26] = dct_32_round((step[26] + step[21]) * cospi_16_64);
+  output[27] = dct_32_round((step[27] + step[20]) * cospi_16_64);
+
+  output[28] = step[28];
+  output[29] = step[29];
+  output[30] = step[30];
+  output[31] = step[31];
+
+  // dump the magnitude by 4, hence the intermediate values are within
+  // the range of 16 bits.
+  if (round) {
+    output[0] = half_round_shift(output[0]);
+    output[1] = half_round_shift(output[1]);
+    output[2] = half_round_shift(output[2]);
+    output[3] = half_round_shift(output[3]);
+    output[4] = half_round_shift(output[4]);
+    output[5] = half_round_shift(output[5]);
+    output[6] = half_round_shift(output[6]);
+    output[7] = half_round_shift(output[7]);
+    output[8] = half_round_shift(output[8]);
+    output[9] = half_round_shift(output[9]);
+    output[10] = half_round_shift(output[10]);
+    output[11] = half_round_shift(output[11]);
+    output[12] = half_round_shift(output[12]);
+    output[13] = half_round_shift(output[13]);
+    output[14] = half_round_shift(output[14]);
+    output[15] = half_round_shift(output[15]);
+
+    output[16] = half_round_shift(output[16]);
+    output[17] = half_round_shift(output[17]);
+    output[18] = half_round_shift(output[18]);
+    output[19] = half_round_shift(output[19]);
+    output[20] = half_round_shift(output[20]);
+    output[21] = half_round_shift(output[21]);
+    output[22] = half_round_shift(output[22]);
+    output[23] = half_round_shift(output[23]);
+    output[24] = half_round_shift(output[24]);
+    output[25] = half_round_shift(output[25]);
+    output[26] = half_round_shift(output[26]);
+    output[27] = half_round_shift(output[27]);
+    output[28] = half_round_shift(output[28]);
+    output[29] = half_round_shift(output[29]);
+    output[30] = half_round_shift(output[30]);
+    output[31] = half_round_shift(output[31]);
+  }
+
+  // Stage 3
+  step[0] = output[0] + output[(8 - 1)];
+  step[1] = output[1] + output[(8 - 2)];
+  step[2] = output[2] + output[(8 - 3)];
+  step[3] = output[3] + output[(8 - 4)];
+  step[4] = -output[4] + output[(8 - 5)];
+  step[5] = -output[5] + output[(8 - 6)];
+  step[6] = -output[6] + output[(8 - 7)];
+  step[7] = -output[7] + output[(8 - 8)];
+  step[8] = output[8];
+  step[9] = output[9];
+  step[10] = dct_32_round((-output[10] + output[13]) * cospi_16_64);
+  step[11] = dct_32_round((-output[11] + output[12]) * cospi_16_64);
+  step[12] = dct_32_round((output[12] + output[11]) * cospi_16_64);
+  step[13] = dct_32_round((output[13] + output[10]) * cospi_16_64);
+  step[14] = output[14];
+  step[15] = output[15];
+
+  step[16] = output[16] + output[23];
+  step[17] = output[17] + output[22];
+  step[18] = output[18] + output[21];
+  step[19] = output[19] + output[20];
+  step[20] = -output[20] + output[19];
+  step[21] = -output[21] + output[18];
+  step[22] = -output[22] + output[17];
+  step[23] = -output[23] + output[16];
+  step[24] = -output[24] + output[31];
+  step[25] = -output[25] + output[30];
+  step[26] = -output[26] + output[29];
+  step[27] = -output[27] + output[28];
+  step[28] = output[28] + output[27];
+  step[29] = output[29] + output[26];
+  step[30] = output[30] + output[25];
+  step[31] = output[31] + output[24];
+
+  // Stage 4
+  output[0] = step[0] + step[3];
+  output[1] = step[1] + step[2];
+  output[2] = -step[2] + step[1];
+  output[3] = -step[3] + step[0];
+  output[4] = step[4];
+  output[5] = dct_32_round((-step[5] + step[6]) * cospi_16_64);
+  output[6] = dct_32_round((step[6] + step[5]) * cospi_16_64);
+  output[7] = step[7];
+  output[8] = step[8] + step[11];
+  output[9] = step[9] + step[10];
+  output[10] = -step[10] + step[9];
+  output[11] = -step[11] + step[8];
+  output[12] = -step[12] + step[15];
+  output[13] = -step[13] + step[14];
+  output[14] = step[14] + step[13];
+  output[15] = step[15] + step[12];
+
+  output[16] = step[16];
+  output[17] = step[17];
+  output[18] = dct_32_round(step[18] * -cospi_8_64 + step[29] * cospi_24_64);
+  output[19] = dct_32_round(step[19] * -cospi_8_64 + step[28] * cospi_24_64);
+  output[20] = dct_32_round(step[20] * -cospi_24_64 + step[27] * -cospi_8_64);
+  output[21] = dct_32_round(step[21] * -cospi_24_64 + step[26] * -cospi_8_64);
+  output[22] = step[22];
+  output[23] = step[23];
+  output[24] = step[24];
+  output[25] = step[25];
+  output[26] = dct_32_round(step[26] * cospi_24_64 + step[21] * -cospi_8_64);
+  output[27] = dct_32_round(step[27] * cospi_24_64 + step[20] * -cospi_8_64);
+  output[28] = dct_32_round(step[28] * cospi_8_64 + step[19] * cospi_24_64);
+  output[29] = dct_32_round(step[29] * cospi_8_64 + step[18] * cospi_24_64);
+  output[30] = step[30];
+  output[31] = step[31];
+
+  // Stage 5
+  step[0] = dct_32_round((output[0] + output[1]) * cospi_16_64);
+  step[1] = dct_32_round((-output[1] + output[0]) * cospi_16_64);
+  step[2] = dct_32_round(output[2] * cospi_24_64 + output[3] * cospi_8_64);
+  step[3] = dct_32_round(output[3] * cospi_24_64 - output[2] * cospi_8_64);
+  step[4] = output[4] + output[5];
+  step[5] = -output[5] + output[4];
+  step[6] = -output[6] + output[7];
+  step[7] = output[7] + output[6];
+  step[8] = output[8];
+  step[9] = dct_32_round(output[9] * -cospi_8_64 + output[14] * cospi_24_64);
+  step[10] = dct_32_round(output[10] * -cospi_24_64 + output[13] * -cospi_8_64);
+  step[11] = output[11];
+  step[12] = output[12];
+  step[13] = dct_32_round(output[13] * cospi_24_64 + output[10] * -cospi_8_64);
+  step[14] = dct_32_round(output[14] * cospi_8_64 + output[9] * cospi_24_64);
+  step[15] = output[15];
+
+  step[16] = output[16] + output[19];
+  step[17] = output[17] + output[18];
+  step[18] = -output[18] + output[17];
+  step[19] = -output[19] + output[16];
+  step[20] = -output[20] + output[23];
+  step[21] = -output[21] + output[22];
+  step[22] = output[22] + output[21];
+  step[23] = output[23] + output[20];
+  step[24] = output[24] + output[27];
+  step[25] = output[25] + output[26];
+  step[26] = -output[26] + output[25];
+  step[27] = -output[27] + output[24];
+  step[28] = -output[28] + output[31];
+  step[29] = -output[29] + output[30];
+  step[30] = output[30] + output[29];
+  step[31] = output[31] + output[28];
+
+  // Stage 6
+  output[0] = step[0];
+  output[1] = step[1];
+  output[2] = step[2];
+  output[3] = step[3];
+  output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64);
+  output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64);
+  output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64);
+  output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64);
+  output[8] = step[8] + step[9];
+  output[9] = -step[9] + step[8];
+  output[10] = -step[10] + step[11];
+  output[11] = step[11] + step[10];
+  output[12] = step[12] + step[13];
+  output[13] = -step[13] + step[12];
+  output[14] = -step[14] + step[15];
+  output[15] = step[15] + step[14];
+
+  output[16] = step[16];
+  output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64);
+  output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64);
+  output[19] = step[19];
+  output[20] = step[20];
+  output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64);
+  output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64);
+  output[23] = step[23];
+  output[24] = step[24];
+  output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64);
+  output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64);
+  output[27] = step[27];
+  output[28] = step[28];
+  output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64);
+  output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64);
+  output[31] = step[31];
+
+  // Stage 7
+  step[0] = output[0];
+  step[1] = output[1];
+  step[2] = output[2];
+  step[3] = output[3];
+  step[4] = output[4];
+  step[5] = output[5];
+  step[6] = output[6];
+  step[7] = output[7];
+  step[8] = dct_32_round(output[8] * cospi_30_64 + output[15] * cospi_2_64);
+  step[9] = dct_32_round(output[9] * cospi_14_64 + output[14] * cospi_18_64);
+  step[10] = dct_32_round(output[10] * cospi_22_64 + output[13] * cospi_10_64);
+  step[11] = dct_32_round(output[11] * cospi_6_64 + output[12] * cospi_26_64);
+  step[12] = dct_32_round(output[12] * cospi_6_64 + output[11] * -cospi_26_64);
+  step[13] = dct_32_round(output[13] * cospi_22_64 + output[10] * -cospi_10_64);
+  step[14] = dct_32_round(output[14] * cospi_14_64 + output[9] * -cospi_18_64);
+  step[15] = dct_32_round(output[15] * cospi_30_64 + output[8] * -cospi_2_64);
+
+  step[16] = output[16] + output[17];
+  step[17] = -output[17] + output[16];
+  step[18] = -output[18] + output[19];
+  step[19] = output[19] + output[18];
+  step[20] = output[20] + output[21];
+  step[21] = -output[21] + output[20];
+  step[22] = -output[22] + output[23];
+  step[23] = output[23] + output[22];
+  step[24] = output[24] + output[25];
+  step[25] = -output[25] + output[24];
+  step[26] = -output[26] + output[27];
+  step[27] = output[27] + output[26];
+  step[28] = output[28] + output[29];
+  step[29] = -output[29] + output[28];
+  step[30] = -output[30] + output[31];
+  step[31] = output[31] + output[30];
+
+  // Final stage --- outputs indices are bit-reversed.
+  output[0]  = step[0];
+  output[16] = step[1];
+  output[8]  = step[2];
+  output[24] = step[3];
+  output[4]  = step[4];
+  output[20] = step[5];
+  output[12] = step[6];
+  output[28] = step[7];
+  output[2]  = step[8];
+  output[18] = step[9];
+  output[10] = step[10];
+  output[26] = step[11];
+  output[6]  = step[12];
+  output[22] = step[13];
+  output[14] = step[14];
+  output[30] = step[15];
+
+  output[1]  = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
+  output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64);
+  output[9]  = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
+  output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64);
+  output[5]  = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
+  output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64);
+  output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64);
+  output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64);
+  output[3]  = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
+  output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64);
+  output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64);
+  output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64);
+  output[7]  = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
+  output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64);
+  output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64);
+  output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
+}
+
+void vp9_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+  int r, c;
+  tran_low_t sum = 0;
+  for (r = 0; r < 32; ++r)
+    for (c = 0; c < 32; ++c)
+      sum += input[r * stride + c];
+
+  output[0] = sum >> 3;
+  output[1] = 0;
+}
+
+void vp9_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+  int i, j;
+  tran_high_t output[32 * 32];
+
+  // Columns
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j)
+      temp_in[j] = input[j * stride + i] * 4;
+    vp9_fdct32(temp_in, temp_out, 0);
+    for (j = 0; j < 32; ++j)
+      output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+  }
+
+  // Rows
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j)
+      temp_in[j] = output[j + i * 32];
+    vp9_fdct32(temp_in, temp_out, 0);
+    for (j = 0; j < 32; ++j)
+      out[j + i * 32] =
+          (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
+  }
+}
+
+// Note that although we use dct_32_round in dct32 computation flow,
+// this 2d fdct32x32 for rate-distortion optimization loop is operating
+// within 16 bits precision.
+void vp9_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+  int i, j;
+  tran_high_t output[32 * 32];
+
+  // Columns
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j)
+      temp_in[j] = input[j * stride + i] * 4;
+    vp9_fdct32(temp_in, temp_out, 0);
+    for (j = 0; j < 32; ++j)
+      // TODO(cd): see quality impact of only doing
+      //           output[j * 32 + i] = (temp_out[j] + 1) >> 2;
+      //           PS: also change code in vp9/encoder/x86/vp9_dct_sse2.c
+      output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+  }
+
+  // Rows
+  for (i = 0; i < 32; ++i) {
+    tran_high_t temp_in[32], temp_out[32];
+    for (j = 0; j < 32; ++j)
+      temp_in[j] = output[j + i * 32];
+    vp9_fdct32(temp_in, temp_out, 1);
+    for (j = 0; j < 32; ++j)
+      out[j + i * 32] = (tran_low_t)temp_out[j];
+  }
+}
+
 #if CONFIG_VP9_HIGHBITDEPTH
 void vp9_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
                           int stride) {
@@ -357,5 +761,14 @@
 void vp9_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
                             int stride) {
   vp9_fdct16x16_c(input, output, stride);
+}
+
+void vp9_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+  vp9_fdct32x32_c(input, out, stride);
+}
+
+void vp9_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
+                               int stride) {
+  vp9_fdct32x32_rd_c(input, out, stride);
 }
 #endif  // CONFIG_VP9_HIGHBITDEPTH
--- a/vpx_dsp/fwd_txfm.h
+++ b/vpx_dsp/fwd_txfm.h
@@ -8,6 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#ifndef VPX_DSP_FWD_TXFM_H_
+#define VPX_DSP_FWD_TXFM_H_
+
 #include "vpx_dsp/txfm_common.h"
 
 static INLINE tran_high_t fdct_round_shift(tran_high_t input) {
@@ -17,3 +20,6 @@
   // assert(INT16_MIN <= rv && rv <= INT16_MAX);
   return rv;
 }
+
+void vp9_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif  // VPX_DSP_FWD_TXFM_H_
--- /dev/null
+++ b/vpx_dsp/mips/fwd_dct32x32_msa.c
@@ -1,0 +1,933 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_dsp/mips/fwd_txfm_msa.h"
+
+static void fdct8x32_1d_column_load_butterfly(const int16_t *input,
+                                              int32_t src_stride,
+                                              int16_t *temp_buff) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+  v8i16 step0, step1, step2, step3;
+  v8i16 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
+  v8i16 step0_1, step1_1, step2_1, step3_1;
+
+  /* 1st and 2nd set */
+  LD_SH4(input, src_stride, in0, in1, in2, in3);
+  LD_SH4(input + (28 * src_stride), src_stride, in4, in5, in6, in7);
+  LD_SH4(input + (4 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1);
+  LD_SH4(input + (24 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1);
+  SLLI_4V(in0, in1, in2, in3, 2);
+  SLLI_4V(in4, in5, in6, in7, 2);
+  SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
+  SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
+  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,
+              step0, step1, step2, step3, in4, in5, in6, in7);
+  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
+              step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
+  ST_SH4(step0, step1, step2, step3, temp_buff, 8);
+  ST_SH4(in4, in5, in6, in7, temp_buff + (28 * 8), 8);
+  ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (4 * 8), 8);
+  ST_SH4(in4_1, in5_1, in6_1, in7_1, temp_buff + (24 * 8), 8);
+
+  /* 3rd and 4th set */
+  LD_SH4(input + (8 * src_stride), src_stride, in0, in1, in2, in3);
+  LD_SH4(input + (20 * src_stride), src_stride, in4, in5, in6, in7);
+  LD_SH4(input + (12 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1);
+  LD_SH4(input + (16 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1);
+  SLLI_4V(in0, in1, in2, in3, 2);
+  SLLI_4V(in4, in5, in6, in7, 2);
+  SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2);
+  SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2);
+  BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7,
+              step0, step1, step2, step3, in4, in5, in6, in7);
+  BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
+              step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1);
+  ST_SH4(step0, step1, step2, step3, temp_buff + (8 * 8), 8);
+  ST_SH4(in4, in5, in6, in7, temp_buff + (20 * 8), 8);
+  ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (12 * 8), 8);
+  ST_SH4(in4_1, in5_1, in6_1, in7_1, temp_buff + (15 * 8) + 8, 8);
+}
+
+static void fdct8x32_1d_column_even_store(int16_t *input, int16_t *temp) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
+  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+  v8i16 temp0, temp1;
+
+  /* fdct even */
+  LD_SH4(input, 8, in0, in1, in2, in3);
+  LD_SH4(input + 96, 8, in12, in13, in14, in15);
+  BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15,
+              vec0, vec1, vec2, vec3, in12, in13, in14, in15);
+  LD_SH4(input + 32, 8, in4, in5, in6, in7);
+  LD_SH4(input + 64, 8, in8, in9, in10, in11);
+  BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11,
+              vec4, vec5, vec6, vec7, in8, in9, in10, in11);
+
+  /* Stage 3 */
+  ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
+  BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
+  DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp);
+  ST_SH(temp1, temp + 512);
+
+  DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp + 256);
+  ST_SH(temp1, temp + 768);
+
+  SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4);
+  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
+  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp + 128);
+  ST_SH(temp1, temp + 896);
+
+  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
+  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp + 640);
+  ST_SH(temp1, temp + 384);
+
+  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
+  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+  ADD2(in0, in1, in2, in3, vec0, vec7);
+  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp + 64);
+  ST_SH(temp1, temp + 960);
+
+  SUB2(in0, in1, in2, in3, in0, in2);
+  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp + 576);
+  ST_SH(temp1, temp + 448);
+
+  SUB2(in9, vec2, in14, vec5, vec2, vec5);
+  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
+  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp + 320);
+  ST_SH(temp1, temp + 704);
+
+  ADD2(in3, in2, in0, in1, vec3, vec4);
+  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
+  FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
+  ST_SH(temp0, temp + 192);
+  ST_SH(temp1, temp + 832);
+}
+
+static void fdct8x32_1d_column_odd_store(int16_t *input, int16_t *temp_ptr) {
+  v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
+  v8i16 in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
+
+  in20 = LD_SH(input + 32);
+  in21 = LD_SH(input + 40);
+  in26 = LD_SH(input + 80);
+  in27 = LD_SH(input + 88);
+
+  DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
+  DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
+
+  in18 = LD_SH(input + 16);
+  in19 = LD_SH(input + 24);
+  in28 = LD_SH(input + 96);
+  in29 = LD_SH(input + 104);
+
+  vec4 = in19 - in20;
+  ST_SH(vec4, input + 32);
+  vec4 = in18 - in21;
+  ST_SH(vec4, input + 40);
+  vec4 = in29 - in26;
+  ST_SH(vec4, input + 80);
+  vec4 = in28 - in27;
+  ST_SH(vec4, input + 88);
+
+  in21 = in18 + in21;
+  in20 = in19 + in20;
+  in27 = in28 + in27;
+  in26 = in29 + in26;
+
+  LD_SH4(input + 48, 8, in22, in23, in24, in25);
+  DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
+  DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
+
+  in16 = LD_SH(input);
+  in17 = LD_SH(input + 8);
+  in30 = LD_SH(input + 112);
+  in31 = LD_SH(input + 120);
+
+  vec4 = in17 - in22;
+  ST_SH(vec4, input + 16);
+  vec4 = in16 - in23;
+  ST_SH(vec4, input + 24);
+  vec4 = in31 - in24;
+  ST_SH(vec4, input + 96);
+  vec4 = in30 - in25;
+  ST_SH(vec4, input + 104);
+
+  ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
+  DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
+  DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
+  ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
+  DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
+  ADD2(in27, in26, in25, in24, in23, in20);
+  DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec5, temp_ptr);
+  ST_SH(vec4, temp_ptr + 960);
+
+  SUB2(in27, in26, in25, in24, in22, in21);
+  DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec5, temp_ptr + 448);
+  ST_SH(vec4, temp_ptr + 512);
+
+  SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
+  DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
+  SUB2(in26, in27, in24, in25, in23, in20);
+  DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec4, temp_ptr + 704);
+  ST_SH(vec5, temp_ptr + 256);
+
+  ADD2(in26, in27, in24, in25, in22, in21);
+  DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec4, temp_ptr + 192);
+  ST_SH(vec5, temp_ptr + 768);
+
+  LD_SH4(input + 16, 8, in22, in23, in20, in21);
+  LD_SH4(input + 80, 8, in26, in27, in24, in25);
+  in16 = in20;
+  in17 = in21;
+  DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
+  DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
+  SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
+  DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
+  ADD2(in28, in29, in31, in30, in16, in19);
+  DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec5, temp_ptr + 832);
+  ST_SH(vec4, temp_ptr + 128);
+
+  SUB2(in28, in29, in31, in30, in17, in18);
+  DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec5, temp_ptr + 320);
+  ST_SH(vec4, temp_ptr + 640);
+  ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
+  DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
+  SUB2(in29, in28, in30, in31, in16, in19);
+  DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec5, temp_ptr + 576);
+  ST_SH(vec4, temp_ptr + 384);
+
+  ADD2(in29, in28, in30, in31, in17, in18);
+  DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
+  FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
+  ST_SH(vec5, temp_ptr + 64);
+  ST_SH(vec4, temp_ptr + 896);
+}
+
+static void fdct8x32_1d_column(const int16_t *input, int32_t src_stride,
+                               int16_t *tmp_buf, int16_t *tmp_buf_big) {
+  fdct8x32_1d_column_load_butterfly(input, src_stride, tmp_buf);
+  fdct8x32_1d_column_even_store(tmp_buf, tmp_buf_big);
+  fdct8x32_1d_column_odd_store(tmp_buf + 128, (tmp_buf_big + 32));
+}
+
+static void fdct8x32_1d_row_load_butterfly(int16_t *temp_buff,
+                                           int16_t *output) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
+  v8i16 step0, step1, step2, step3, step4, step5, step6, step7;
+
+  LD_SH8(temp_buff, 32, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
+                     in8, in9, in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
+               in8, in9, in10, in11, in12, in13, in14, in15,
+               step0, step1, step2, step3, step4, step5, step6, step7,
+               in8, in9, in10, in11, in12, in13, in14, in15);
+  ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, output, 8);
+  ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 24 * 8), 8);
+
+  /* 2nd set */
+  LD_SH8(temp_buff + 8, 32, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(temp_buff + 16, 32, in8, in9, in10, in11, in12, in13, in14, in15);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
+                     in8, in9, in10, in11, in12, in13, in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
+               in8, in9, in10, in11, in12, in13, in14, in15,
+               step0, step1, step2, step3, step4, step5, step6, step7,
+               in8, in9, in10, in11, in12, in13, in14, in15);
+  ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7,
+         (output + 8 * 8), 8);
+  ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 16 * 8), 8);
+}
+
+static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr,
+                                    int16_t *out) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
+  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+  v4i32 vec0_l, vec1_l, vec2_l, vec3_l, vec4_l, vec5_l, vec6_l, vec7_l;
+  v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec4_r, vec5_r, vec6_r, vec7_r;
+  v4i32 tmp0_w, tmp1_w, tmp2_w, tmp3_w;
+
+  /* fdct32 even */
+  /* stage 2 */
+  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(input + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
+
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
+               in8, in9, in10, in11, in12, in13, in14, in15,
+               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
+               in8, in9, in10, in11, in12, in13, in14, in15);
+  ST_SH8(vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, interm_ptr, 8);
+  ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, interm_ptr + 64, 8);
+
+  /* Stage 3 */
+  UNPCK_SH_SW(vec0, vec0_l, vec0_r);
+  UNPCK_SH_SW(vec1, vec1_l, vec1_r);
+  UNPCK_SH_SW(vec2, vec2_l, vec2_r);
+  UNPCK_SH_SW(vec3, vec3_l, vec3_r);
+  UNPCK_SH_SW(vec4, vec4_l, vec4_r);
+  UNPCK_SH_SW(vec5, vec5_l, vec5_r);
+  UNPCK_SH_SW(vec6, vec6_l, vec6_r);
+  UNPCK_SH_SW(vec7, vec7_l, vec7_r);
+  ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r,
+       tmp0_w, tmp1_w, tmp2_w, tmp3_w);
+  BUTTERFLY_4(tmp0_w, tmp1_w, tmp2_w, tmp3_w, vec4_r, vec6_r, vec7_r, vec5_r);
+  ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l,
+       vec0_r, vec1_r, vec2_r, vec3_r);
+
+  tmp3_w = vec0_r + vec3_r;
+  vec0_r = vec0_r - vec3_r;
+  vec3_r = vec1_r + vec2_r;
+  vec1_r = vec1_r - vec2_r;
+
+  DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64,
+                    cospi_16_64, vec4_r, tmp3_w, vec6_r, vec3_r);
+  FDCT32_POSTPROC_NEG_W(vec4_r);
+  FDCT32_POSTPROC_NEG_W(tmp3_w);
+  FDCT32_POSTPROC_NEG_W(vec6_r);
+  FDCT32_POSTPROC_NEG_W(vec3_r);
+  PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
+  ST_SH2(vec5, vec4, out, 8);
+
+  DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64,
+                    cospi_8_64, vec4_r, tmp3_w, vec6_r, vec3_r);
+  FDCT32_POSTPROC_NEG_W(vec4_r);
+  FDCT32_POSTPROC_NEG_W(tmp3_w);
+  FDCT32_POSTPROC_NEG_W(vec6_r);
+  FDCT32_POSTPROC_NEG_W(vec3_r);
+  PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
+  ST_SH2(vec5, vec4, out + 16, 8);
+
+  LD_SH8(interm_ptr, 8, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7);
+  SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
+  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
+  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, in5, in4);
+  FDCT_POSTPROC_2V_NEG_H(in4, in5);
+  ST_SH(in4, out + 32);
+  ST_SH(in5, out + 56);
+
+  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
+  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4);
+  FDCT_POSTPROC_2V_NEG_H(in4, in5);
+  ST_SH(in4, out + 40);
+  ST_SH(in5, out + 48);
+
+  LD_SH8(interm_ptr + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
+  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
+  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+  ADD2(in0, in1, in2, in3, vec0, vec7);
+  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, in5, in4);
+  FDCT_POSTPROC_2V_NEG_H(in4, in5);
+  ST_SH(in4, out + 64);
+  ST_SH(in5, out + 120);
+
+  SUB2(in0, in1, in2, in3, in0, in2);
+  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, in5, in4);
+  FDCT_POSTPROC_2V_NEG_H(in4, in5);
+  ST_SH(in4, out + 72);
+  ST_SH(in5, out + 112);
+
+  SUB2(in9, vec2, in14, vec5, vec2, vec5);
+  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
+  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, in5, in4);
+  FDCT_POSTPROC_2V_NEG_H(in4, in5);
+  ST_SH(in4, out + 80);
+  ST_SH(in5, out + 104);
+
+  ADD2(in3, in2, in0, in1, vec3, vec4);
+  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, in4, in5);
+  FDCT_POSTPROC_2V_NEG_H(in4, in5);
+  ST_SH(in4, out + 96);
+  ST_SH(in5, out + 88);
+}
+
+static void fdct8x32_1d_row_even(int16_t *temp, int16_t *out) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
+  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
+
+  /* fdct32 even */
+  /* stage 2 */
+  LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
+
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
+               in8, in9, in10, in11, in12, in13, in14, in15,
+               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
+               in8, in9, in10, in11, in12, in13, in14, in15);
+
+  /* Stage 3 */
+  ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
+  BUTTERFLY_4(in0, in1, in2, in3, temp0, in4, in1, in0);
+  DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out);
+  ST_SH(temp1, out + 8);
+
+  DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out + 16);
+  ST_SH(temp1, out + 24);
+
+  SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
+  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
+  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out + 32);
+  ST_SH(temp1, out + 56);
+
+  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
+  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out + 40);
+  ST_SH(temp1, out + 48);
+
+  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
+  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+  ADD2(in0, in1, in2, in3, vec0, vec7);
+  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out + 64);
+  ST_SH(temp1, out + 120);
+
+  SUB2(in0, in1, in2, in3, in0, in2);
+  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out + 72);
+  ST_SH(temp1, out + 112);
+
+  SUB2(in9, vec2, in14, vec5, vec2, vec5);
+  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5)
+  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out + 80);
+  ST_SH(temp1, out + 104);
+
+  ADD2(in3, in2, in0, in1, vec3, vec4);
+  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
+  FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
+  ST_SH(temp0, out + 96);
+  ST_SH(temp1, out + 88);
+}
+
+static void fdct8x32_1d_row_odd(int16_t *temp, int16_t *interm_ptr,
+                                int16_t *out) {
+  v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
+  v8i16 in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
+
+  in20 = LD_SH(temp + 32);
+  in21 = LD_SH(temp + 40);
+  in26 = LD_SH(temp + 80);
+  in27 = LD_SH(temp + 88);
+
+  DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
+  DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
+
+  in18 = LD_SH(temp + 16);
+  in19 = LD_SH(temp + 24);
+  in28 = LD_SH(temp + 96);
+  in29 = LD_SH(temp + 104);
+
+  vec4 = in19 - in20;
+  ST_SH(vec4, interm_ptr + 32);
+  vec4 = in18 - in21;
+  ST_SH(vec4, interm_ptr + 88);
+  vec4 = in28 - in27;
+  ST_SH(vec4, interm_ptr + 56);
+  vec4 = in29 - in26;
+  ST_SH(vec4, interm_ptr + 64);
+
+  ADD4(in18, in21, in19, in20, in28, in27, in29, in26, in21, in20, in27, in26);
+
+  in22 = LD_SH(temp + 48);
+  in23 = LD_SH(temp + 56);
+  in24 = LD_SH(temp + 64);
+  in25 = LD_SH(temp + 72);
+
+  DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
+  DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
+
+  in16 = LD_SH(temp);
+  in17 = LD_SH(temp + 8);
+  in30 = LD_SH(temp + 112);
+  in31 = LD_SH(temp + 120);
+
+  vec4 = in17 - in22;
+  ST_SH(vec4, interm_ptr + 40);
+  vec4 = in30 - in25;
+  ST_SH(vec4, interm_ptr + 48);
+  vec4 = in31 - in24;
+  ST_SH(vec4, interm_ptr + 72);
+  vec4 = in16 - in23;
+  ST_SH(vec4, interm_ptr + 80);
+
+  ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
+  DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
+  DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
+
+  ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
+  DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
+  ADD2(in27, in26, in25, in24, in23, in20);
+
+  DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec5, out);
+  ST_SH(vec4, out + 120);
+
+  SUB2(in27, in26, in25, in24, in22, in21);
+
+  DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec5, out + 112);
+  ST_SH(vec4, out + 8);
+
+  SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
+  DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
+  SUB2(in26, in27, in24, in25, in23, in20);
+
+  DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec4, out + 16);
+  ST_SH(vec5, out + 104);
+
+  ADD2(in26, in27, in24, in25, in22, in21);
+  DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec4, out + 24);
+  ST_SH(vec5, out + 96);
+
+  in20 = LD_SH(interm_ptr + 32);
+  in21 = LD_SH(interm_ptr + 88);
+  in27 = LD_SH(interm_ptr + 56);
+  in26 = LD_SH(interm_ptr + 64);
+
+  in16 = in20;
+  in17 = in21;
+  DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
+  DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
+
+  in22 = LD_SH(interm_ptr + 40);
+  in25 = LD_SH(interm_ptr + 48);
+  in24 = LD_SH(interm_ptr + 72);
+  in23 = LD_SH(interm_ptr + 80);
+
+  SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
+  DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
+  ADD2(in28, in29, in31, in30, in16, in19);
+  DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec5, out + 32);
+  ST_SH(vec4, out + 88);
+
+  SUB2(in28, in29, in31, in30, in17, in18);
+  DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec5, out + 40);
+  ST_SH(vec4, out + 80);
+
+  ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
+  DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
+  SUB2(in29, in28, in30, in31, in16, in19);
+
+  DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec5, out + 72);
+  ST_SH(vec4, out + 48);
+
+  ADD2(in29, in28, in30, in31, in17, in18);
+
+  DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
+  FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
+  ST_SH(vec4, out + 56);
+  ST_SH(vec5, out + 64);
+}
+
+static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+  v8i16 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
+
+  /* 1st set */
+  in0 = LD_SH(temp);
+  in4 = LD_SH(temp + 32);
+  in2 = LD_SH(temp + 64);
+  in6 = LD_SH(temp + 96);
+  in1 = LD_SH(temp + 128);
+  in7 = LD_SH(temp + 152);
+  in3 = LD_SH(temp + 192);
+  in5 = LD_SH(temp + 216);
+
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
+
+  /* 2nd set */
+  in0_1 = LD_SH(temp + 16);
+  in1_1 = LD_SH(temp + 232);
+  in2_1 = LD_SH(temp + 80);
+  in3_1 = LD_SH(temp + 168);
+  in4_1 = LD_SH(temp + 48);
+  in5_1 = LD_SH(temp + 176);
+  in6_1 = LD_SH(temp + 112);
+  in7_1 = LD_SH(temp + 240);
+
+  ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 32);
+  TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
+                     in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
+
+  /* 3rd set */
+  in0 = LD_SH(temp + 8);
+  in1 = LD_SH(temp + 136);
+  in2 = LD_SH(temp + 72);
+  in3 = LD_SH(temp + 200);
+  in4 = LD_SH(temp + 40);
+  in5 = LD_SH(temp + 208);
+  in6 = LD_SH(temp + 104);
+  in7 = LD_SH(temp + 144);
+
+  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
+         output + 8, 32);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
+                     in0, in1, in2, in3, in4, in5, in6, in7);
+  ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output + 16, 32);
+
+  /* 4th set */
+  in0_1 = LD_SH(temp + 24);
+  in1_1 = LD_SH(temp + 224);
+  in2_1 = LD_SH(temp + 88);
+  in3_1 = LD_SH(temp + 160);
+  in4_1 = LD_SH(temp + 56);
+  in5_1 = LD_SH(temp + 184);
+  in6_1 = LD_SH(temp + 120);
+  in7_1 = LD_SH(temp + 248);
+
+  TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
+                     in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
+  ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
+         output + 24, 32);
+}
+
+static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf,
+                            int16_t *output) {
+  fdct8x32_1d_row_load_butterfly(temp, temp_buf);
+  fdct8x32_1d_row_even(temp_buf, temp_buf);
+  fdct8x32_1d_row_odd(temp_buf + 128, temp, temp_buf + 128);
+  fdct8x32_1d_row_transpose_store(temp_buf, output);
+}
+
+static void fdct32x8_1d_row_4x(int16_t *tmp_buf_big, int16_t *tmp_buf,
+                               int16_t *output) {
+  fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
+  fdct8x32_1d_row_even_4x(tmp_buf, tmp_buf_big, tmp_buf);
+  fdct8x32_1d_row_odd(tmp_buf + 128, tmp_buf_big, tmp_buf + 128);
+  fdct8x32_1d_row_transpose_store(tmp_buf, output);
+}
+
+void vp9_fdct32x32_msa(const int16_t *input, int16_t *output,
+                       int32_t src_stride) {
+  int32_t i;
+  DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
+  DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
+
+  /* column transform */
+  for (i = 0; i < 4; ++i) {
+    fdct8x32_1d_column(input + (8 * i), src_stride, tmp_buf,
+                       tmp_buf_big + (8 * i));
+  }
+
+  /* row transform */
+  fdct32x8_1d_row_4x(tmp_buf_big, tmp_buf, output);
+
+  /* row transform */
+  for (i = 1; i < 4; ++i) {
+    fdct32x8_1d_row(tmp_buf_big + (i * 256), tmp_buf, output + (i * 256));
+  }
+}
+
+static void fdct8x32_1d_row_even_rd(int16_t *temp, int16_t *out) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+  v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
+  v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
+
+  /* fdct32 even */
+  /* stage 2 */
+  LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7);
+  LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15);
+
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
+               in8, in9, in10, in11, in12, in13, in14, in15,
+               vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7,
+               in8, in9, in10, in11, in12, in13, in14, in15);
+  FDCT_POSTPROC_2V_NEG_H(vec0, vec1);
+  FDCT_POSTPROC_2V_NEG_H(vec2, vec3);
+  FDCT_POSTPROC_2V_NEG_H(vec4, vec5);
+  FDCT_POSTPROC_2V_NEG_H(vec6, vec7);
+  FDCT_POSTPROC_2V_NEG_H(in8, in9);
+  FDCT_POSTPROC_2V_NEG_H(in10, in11);
+  FDCT_POSTPROC_2V_NEG_H(in12, in13);
+  FDCT_POSTPROC_2V_NEG_H(in14, in15);
+
+  /* Stage 3 */
+  ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3);
+
+  temp0 = in0 + in3;
+  in0 = in0 - in3;
+  in3 = in1 + in2;
+  in1 = in1 - in2;
+
+  DOTP_CONST_PAIR(temp0, in3, cospi_16_64, cospi_16_64, temp1, temp0);
+  ST_SH(temp0, out);
+  ST_SH(temp1, out + 8);
+
+  DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
+  ST_SH(temp0, out + 16);
+  ST_SH(temp1, out + 24);
+
+  SUB4(vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4, vec5, vec6, vec7);
+  DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
+  ADD2(vec4, vec5, vec7, vec6, vec0, vec1);
+  DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
+  ST_SH(temp0, out + 32);
+  ST_SH(temp1, out + 56);
+
+  SUB2(vec4, vec5, vec7, vec6, vec4, vec7);
+  DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
+  ST_SH(temp0, out + 40);
+  ST_SH(temp1, out + 48);
+
+  DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
+  DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
+  ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2);
+  DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
+  ADD2(in0, in1, in2, in3, vec0, vec7);
+  DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
+  ST_SH(temp0, out + 64);
+  ST_SH(temp1, out + 120);
+
+  SUB2(in0, in1, in2, in3, in0, in2);
+  DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
+  ST_SH(temp0, out + 72);
+  ST_SH(temp1, out + 112);
+
+  SUB2(in9, vec2, in14, vec5, vec2, vec5);
+  DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
+  SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
+  DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
+  ST_SH(temp0, out + 80);
+  ST_SH(temp1, out + 104);
+
+  ADD2(in3, in2, in0, in1, vec3, vec4);
+  DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
+  ST_SH(temp0, out + 96);
+  ST_SH(temp1, out + 88);
+}
+
+static void fdct8x32_1d_row_odd_rd(int16_t *temp, int16_t *interm_ptr,
+                                   int16_t *out) {
+  v8i16 in16, in17, in18, in19, in20, in21, in22, in23;
+  v8i16 in24, in25, in26, in27, in28, in29, in30, in31;
+  v8i16 vec4, vec5;
+
+  in20 = LD_SH(temp + 32);
+  in21 = LD_SH(temp + 40);
+  in26 = LD_SH(temp + 80);
+  in27 = LD_SH(temp + 88);
+
+  DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
+  DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
+
+  FDCT_POSTPROC_2V_NEG_H(in20, in21);
+  FDCT_POSTPROC_2V_NEG_H(in26, in27);
+
+  in18 = LD_SH(temp + 16);
+  in19 = LD_SH(temp + 24);
+  in28 = LD_SH(temp + 96);
+  in29 = LD_SH(temp + 104);
+
+  FDCT_POSTPROC_2V_NEG_H(in18, in19);
+  FDCT_POSTPROC_2V_NEG_H(in28, in29);
+
+  vec4 = in19 - in20;
+  ST_SH(vec4, interm_ptr + 32);
+  vec4 = in18 - in21;
+  ST_SH(vec4, interm_ptr + 88);
+  vec4 = in29 - in26;
+  ST_SH(vec4, interm_ptr + 64);
+  vec4 = in28 - in27;
+  ST_SH(vec4, interm_ptr + 56);
+
+  ADD4(in18, in21, in19, in20, in28, in27, in29, in26, in21, in20, in27, in26);
+
+  in22 = LD_SH(temp + 48);
+  in23 = LD_SH(temp + 56);
+  in24 = LD_SH(temp + 64);
+  in25 = LD_SH(temp + 72);
+
+  DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
+  DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
+  FDCT_POSTPROC_2V_NEG_H(in22, in23);
+  FDCT_POSTPROC_2V_NEG_H(in24, in25);
+
+  in16 = LD_SH(temp);
+  in17 = LD_SH(temp + 8);
+  in30 = LD_SH(temp + 112);
+  in31 = LD_SH(temp + 120);
+
+  FDCT_POSTPROC_2V_NEG_H(in16, in17);
+  FDCT_POSTPROC_2V_NEG_H(in30, in31);
+
+  vec4 = in17 - in22;
+  ST_SH(vec4, interm_ptr + 40);
+  vec4 = in30 - in25;
+  ST_SH(vec4, interm_ptr + 48);
+  vec4 = in31 - in24;
+  ST_SH(vec4, interm_ptr + 72);
+  vec4 = in16 - in23;
+  ST_SH(vec4, interm_ptr + 80);
+
+  ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31);
+  DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
+  DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
+  ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25);
+  DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
+  ADD2(in27, in26, in25, in24, in23, in20);
+  DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
+  ST_SH(vec5, out);
+  ST_SH(vec4, out + 120);
+
+  SUB2(in27, in26, in25, in24, in22, in21);
+  DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
+  ST_SH(vec5, out + 112);
+  ST_SH(vec4, out + 8);
+
+  SUB4(in17, in18, in16, in19, in31, in28, in30, in29, in23, in26, in24, in20);
+  DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25);
+  SUB2(in26, in27, in24, in25, in23, in20);
+  DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
+  ST_SH(vec4, out + 16);
+  ST_SH(vec5, out + 104);
+
+  ADD2(in26, in27, in24, in25, in22, in21);
+  DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
+  ST_SH(vec4, out + 24);
+  ST_SH(vec5, out + 96);
+
+  in20 = LD_SH(interm_ptr + 32);
+  in21 = LD_SH(interm_ptr + 88);
+  in27 = LD_SH(interm_ptr + 56);
+  in26 = LD_SH(interm_ptr + 64);
+
+  in16 = in20;
+  in17 = in21;
+  DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27);
+  DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26);
+
+  in22 = LD_SH(interm_ptr + 40);
+  in25 = LD_SH(interm_ptr + 48);
+  in24 = LD_SH(interm_ptr + 72);
+  in23 = LD_SH(interm_ptr + 80);
+
+  SUB4(in23, in20, in22, in21, in25, in26, in24, in27, in28, in17, in18, in31);
+  DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
+  in16 = in28 + in29;
+  in19 = in31 + in30;
+  DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
+  ST_SH(vec5, out + 32);
+  ST_SH(vec4, out + 88);
+
+  SUB2(in28, in29, in31, in30, in17, in18);
+  DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
+  ST_SH(vec5, out + 40);
+  ST_SH(vec4, out + 80);
+
+  ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19);
+  DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31);
+  SUB2(in29, in28, in30, in31, in16, in19);
+  DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
+  ST_SH(vec5, out + 72);
+  ST_SH(vec4, out + 48);
+
+  ADD2(in29, in28, in30, in31, in17, in18);
+  DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
+  ST_SH(vec4, out + 56);
+  ST_SH(vec5, out + 64);
+}
+
+static void fdct32x8_1d_row_rd(int16_t *tmp_buf_big, int16_t *tmp_buf,
+                               int16_t *output) {
+  fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
+  fdct8x32_1d_row_even_rd(tmp_buf, tmp_buf);
+  fdct8x32_1d_row_odd_rd((tmp_buf + 128), tmp_buf_big, (tmp_buf + 128));
+  fdct8x32_1d_row_transpose_store(tmp_buf, output);
+}
+
+void vp9_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
+                          int32_t src_stride) {
+  int32_t i;
+  DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
+  DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
+
+  /* column transform */
+  for (i = 0; i < 4; ++i) {
+    fdct8x32_1d_column(input + (8 * i), src_stride, &tmp_buf[0],
+                       &tmp_buf_big[0] + (8 * i));
+  }
+
+  /* row transform */
+  for (i = 0; i < 4; ++i) {
+    fdct32x8_1d_row_rd(&tmp_buf_big[0] + (8 * i * 32), &tmp_buf[0],
+                       out + (8 * i * 32));
+  }
+}
--- a/vpx_dsp/mips/fwd_txfm_msa.h
+++ b/vpx_dsp/mips/fwd_txfm_msa.h
@@ -273,6 +273,85 @@
   out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);           \
 }
 
+#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) {      \
+  v8i16 tp0_m, tp1_m;                             \
+  v8i16 one_m = __msa_ldi_h(1);                   \
+                                                  \
+  tp0_m = __msa_clti_s_h(vec0, 0);                \
+  tp1_m = __msa_clti_s_h(vec1, 0);                \
+  vec0 += 1;                                      \
+  vec1 += 1;                                      \
+  tp0_m = one_m & tp0_m;                          \
+  tp1_m = one_m & tp1_m;                          \
+  vec0 += tp0_m;                                  \
+  vec1 += tp1_m;                                  \
+  vec0 >>= 2;                                     \
+  vec1 >>= 2;                                     \
+}
+
+#define FDCT32_POSTPROC_NEG_W(vec) {      \
+  v4i32 temp_m;                           \
+  v4i32 one_m = __msa_ldi_w(1);           \
+                                          \
+  temp_m = __msa_clti_s_w(vec, 0);        \
+  vec += 1;                               \
+  temp_m = one_m & temp_m;                \
+  vec += temp_m;                          \
+  vec >>= 2;                              \
+}
+
+#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) {      \
+  v8i16 tp0_m, tp1_m;                               \
+  v8i16 one = __msa_ldi_h(1);                       \
+                                                    \
+  tp0_m = __msa_clei_s_h(vec0, 0);                  \
+  tp1_m = __msa_clei_s_h(vec1, 0);                  \
+  tp0_m = (v8i16)__msa_xori_b((v16u8)tp0_m, 255);   \
+  tp1_m = (v8i16)__msa_xori_b((v16u8)tp1_m, 255);   \
+  vec0 += 1;                                        \
+  vec1 += 1;                                        \
+  tp0_m = one & tp0_m;                              \
+  tp1_m = one & tp1_m;                              \
+  vec0 += tp0_m;                                    \
+  vec1 += tp1_m;                                    \
+  vec0 >>= 2;                                       \
+  vec1 >>= 2;                                       \
+}
+
+#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right,      \
+                          reg1_right, const0, const1,            \
+                          out0, out1, out2, out3) {              \
+  v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m;          \
+  v2i64 tp0_m, tp1_m, tp2_m, tp3_m;                              \
+  v4i32 k0_m = __msa_fill_w((int32_t) const0);                   \
+                                                                 \
+  s0_m = __msa_fill_w((int32_t) const1);                         \
+  k0_m = __msa_ilvev_w(s0_m, k0_m);                              \
+                                                                 \
+  ILVRL_W2_SW(-reg1_left, reg0_left, s1_m, s0_m);                \
+  ILVRL_W2_SW(reg0_left, reg1_left, s3_m, s2_m);                 \
+  ILVRL_W2_SW(-reg1_right, reg0_right, s5_m, s4_m);              \
+  ILVRL_W2_SW(reg0_right, reg1_right, s7_m, s6_m);               \
+                                                                 \
+  DOTP_SW2_SD(s0_m, s1_m, k0_m, k0_m, tp0_m, tp1_m);             \
+  DOTP_SW2_SD(s4_m, s5_m, k0_m, k0_m, tp2_m, tp3_m);             \
+  tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                  \
+  tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                  \
+  tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                  \
+  tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                  \
+  out0 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);              \
+  out1 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);              \
+                                                                 \
+  DOTP_SW2_SD(s2_m, s3_m, k0_m, k0_m, tp0_m, tp1_m);             \
+  DOTP_SW2_SD(s6_m, s7_m, k0_m, k0_m, tp2_m, tp3_m);             \
+  tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS);                  \
+  tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS);                  \
+  tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS);                  \
+  tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS);                  \
+  out2 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m);              \
+  out3 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m);              \
+}
+
 void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr,
                         int32_t src_stride);
 void fdct16x8_1d_row(int16_t *input, int16_t *output);
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -71,14 +71,18 @@
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_sse2.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_sse2.c
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_impl_sse2.h
+DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_dct32x32_impl_sse2.h
 ifeq ($(ARCH_X86_64),yes)
 ifeq ($(CONFIG_USE_X86INC),yes)
 DSP_SRCS-$(HAVE_SSSE3)  += x86/fwd_txfm_ssse3_x86_64.asm
 endif
 endif
+DSP_SRCS-$(HAVE_AVX2)   += x86/fwd_txfm_avx2.c
+DSP_SRCS-$(HAVE_AVX2)   += x86/fwd_dct32x32_impl_avx2.h
 DSP_SRCS-$(HAVE_NEON)   += arm/fwd_txfm_neon.c
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_txfm_msa.h
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_txfm_msa.c
+DSP_SRCS-$(HAVE_MSA)    += mips/fwd_dct32x32_msa.c
 endif  # CONFIG_VP9_ENCODER
 
 # quantization
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -138,6 +138,12 @@
   add_proto qw/void vp9_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_fdct16x16 sse2/;
 
+  add_proto qw/void vp9_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/vp9_fdct32x32 sse2/;
+
+  add_proto qw/void vp9_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/vp9_fdct32x32_rd sse2/;
+
   add_proto qw/void vp9_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_highbd_fdct4x4 sse2/;
 
@@ -146,6 +152,12 @@
 
   add_proto qw/void vp9_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_highbd_fdct16x16 sse2/;
+
+  add_proto qw/void vp9_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/vp9_highbd_fdct32x32 sse2/;
+
+  add_proto qw/void vp9_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/vp9_highbd_fdct32x32_rd sse2/;
 } else {
   add_proto qw/void vp9_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_fdct4x4 sse2 msa/;
@@ -155,6 +167,12 @@
 
   add_proto qw/void vp9_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/vp9_fdct16x16 sse2 msa/;
+
+  add_proto qw/void vp9_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/vp9_fdct32x32 sse2 avx2 msa/;
+
+  add_proto qw/void vp9_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/vp9_fdct32x32_rd sse2 avx2 msa/;
 }  # CONFIG_VP9_HIGHBITDEPTH
 }  # CONFIG_VP9_ENCODER
 
--- /dev/null
+++ b/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h
@@ -1,0 +1,2711 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <immintrin.h>  // AVX2
+
+#include "vpx_dsp/txfm_common.h"
+
+#define pair256_set_epi16(a, b) \
+  _mm256_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
+                   (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
+                   (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
+                   (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a))
+
+#define pair256_set_epi32(a, b) \
+  _mm256_set_epi32((int)(b), (int)(a), (int)(b), (int)(a), \
+                   (int)(b), (int)(a), (int)(b), (int)(a))
+
+#if FDCT32x32_HIGH_PRECISION
+static INLINE __m256i k_madd_epi32_avx2(__m256i a, __m256i b) {
+  __m256i buf0, buf1;
+  buf0 = _mm256_mul_epu32(a, b);
+  a = _mm256_srli_epi64(a, 32);
+  b = _mm256_srli_epi64(b, 32);
+  buf1 = _mm256_mul_epu32(a, b);
+  return _mm256_add_epi64(buf0, buf1);
+}
+
+static INLINE __m256i k_packs_epi64_avx2(__m256i a, __m256i b) {
+  __m256i buf0 = _mm256_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 2, 0));
+  __m256i buf1 = _mm256_shuffle_epi32(b, _MM_SHUFFLE(0, 0, 2, 0));
+  return _mm256_unpacklo_epi64(buf0, buf1);
+}
+#endif
+
+void FDCT32x32_2D_AVX2(const int16_t *input,
+                  int16_t *output_org, int stride) {
+  // Calculate pre-multiplied strides
+  const int str1 = stride;
+  const int str2 = 2 * stride;
+  const int str3 = 2 * stride + str1;
+  // We need an intermediate buffer between passes.
+  DECLARE_ALIGNED(32, int16_t, intermediate[32 * 32]);
+  // Constants
+  //    When we use them, in one case, they are all the same. In all others
+  //    it's a pair of them that we need to repeat four times. This is done
+  //    by constructing the 32 bit constant corresponding to that pair.
+  const __m256i k__cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+  const __m256i k__cospi_p16_m16 = pair256_set_epi16(+cospi_16_64, -cospi_16_64);
+  const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64,   cospi_24_64);
+  const __m256i k__cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+  const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64,  cospi_8_64);
+  const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64,  cospi_20_64);
+  const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64,  cospi_12_64);
+  const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64,   cospi_28_64);
+  const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64,  cospi_4_64);
+  const __m256i k__cospi_m28_m04 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
+  const __m256i k__cospi_m12_m20 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+  const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64,  cospi_2_64);
+  const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64,  cospi_18_64);
+  const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64,  cospi_10_64);
+  const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64,   cospi_26_64);
+  const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64,  cospi_6_64);
+  const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64,  cospi_22_64);
+  const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64,  cospi_14_64);
+  const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64,   cospi_30_64);
+  const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64,  cospi_1_64);
+  const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64,  cospi_17_64);
+  const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64,  cospi_9_64);
+  const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64,   cospi_25_64);
+  const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64,  cospi_7_64);
+  const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64,   cospi_23_64);
+  const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64,  cospi_15_64);
+  const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64,   cospi_31_64);
+  const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64,  cospi_5_64);
+  const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64,  cospi_21_64);
+  const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64,  cospi_13_64);
+  const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64,   cospi_29_64);
+  const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64,  cospi_3_64);
+  const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64,  cospi_19_64);
+  const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64,  cospi_11_64);
+  const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64,   cospi_27_64);
+  const __m256i k__DCT_CONST_ROUNDING = _mm256_set1_epi32(DCT_CONST_ROUNDING);
+  const __m256i kZero = _mm256_set1_epi16(0);
+  const __m256i kOne  = _mm256_set1_epi16(1);
+  // Do the two transform/transpose passes
+  int pass;
+  for (pass = 0; pass < 2; ++pass) {
+    // We process sixteen columns (transposed rows in second pass) at a time.
+    int column_start;
+    for (column_start = 0; column_start < 32; column_start += 16) {
+      __m256i step1[32];
+      __m256i step2[32];
+      __m256i step3[32];
+      __m256i out[32];
+      // Stage 1
+      // Note: even though all the loads below are aligned, using the aligned
+      //       intrinsic make the code slightly slower.
+      if (0 == pass) {
+        const int16_t *in  = &input[column_start];
+        // step1[i] =  (in[ 0 * stride] + in[(32 -  1) * stride]) << 2;
+        // Note: the next four blocks could be in a loop. That would help the
+        //       instruction cache but is actually slower.
+        {
+          const int16_t *ina =  in +  0 * str1;
+          const int16_t *inb =  in + 31 * str1;
+          __m256i *step1a = &step1[ 0];
+          __m256i *step1b = &step1[31];
+          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
+        }
+        {
+          const int16_t *ina =  in +  4 * str1;
+          const int16_t *inb =  in + 27 * str1;
+          __m256i *step1a = &step1[ 4];
+          __m256i *step1b = &step1[27];
+          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
+        }
+        {
+          const int16_t *ina =  in +  8 * str1;
+          const int16_t *inb =  in + 23 * str1;
+          __m256i *step1a = &step1[ 8];
+          __m256i *step1b = &step1[23];
+          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
+        }
+        {
+          const int16_t *ina =  in + 12 * str1;
+          const int16_t *inb =  in + 19 * str1;
+          __m256i *step1a = &step1[12];
+          __m256i *step1b = &step1[19];
+          const __m256i ina0  = _mm256_loadu_si256((const __m256i *)(ina));
+          const __m256i ina1  = _mm256_loadu_si256((const __m256i *)(ina + str1));
+          const __m256i ina2  = _mm256_loadu_si256((const __m256i *)(ina + str2));
+          const __m256i ina3  = _mm256_loadu_si256((const __m256i *)(ina + str3));
+          const __m256i inb3  = _mm256_loadu_si256((const __m256i *)(inb - str3));
+          const __m256i inb2  = _mm256_loadu_si256((const __m256i *)(inb - str2));
+          const __m256i inb1  = _mm256_loadu_si256((const __m256i *)(inb - str1));
+          const __m256i inb0  = _mm256_loadu_si256((const __m256i *)(inb));
+          step1a[ 0] = _mm256_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm256_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm256_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm256_add_epi16(ina3, inb3);
+          step1b[-3] = _mm256_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm256_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm256_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm256_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
+        }
+      } else {
+        int16_t *in = &intermediate[column_start];
+        // step1[i] =  in[ 0 * 32] + in[(32 -  1) * 32];
+        // Note: using the same approach as above to have common offset is
+        //       counter-productive as all offsets can be calculated at compile
+        //       time.
+        // Note: the next four blocks could be in a loop. That would help the
+        //       instruction cache but is actually slower.
+        {
+          __m256i in00  = _mm256_loadu_si256((const __m256i *)(in +  0 * 32));
+          __m256i in01  = _mm256_loadu_si256((const __m256i *)(in +  1 * 32));
+          __m256i in02  = _mm256_loadu_si256((const __m256i *)(in +  2 * 32));
+          __m256i in03  = _mm256_loadu_si256((const __m256i *)(in +  3 * 32));
+          __m256i in28  = _mm256_loadu_si256((const __m256i *)(in + 28 * 32));
+          __m256i in29  = _mm256_loadu_si256((const __m256i *)(in + 29 * 32));
+          __m256i in30  = _mm256_loadu_si256((const __m256i *)(in + 30 * 32));
+          __m256i in31  = _mm256_loadu_si256((const __m256i *)(in + 31 * 32));
+          step1[ 0] = _mm256_add_epi16(in00, in31);
+          step1[ 1] = _mm256_add_epi16(in01, in30);
+          step1[ 2] = _mm256_add_epi16(in02, in29);
+          step1[ 3] = _mm256_add_epi16(in03, in28);
+          step1[28] = _mm256_sub_epi16(in03, in28);
+          step1[29] = _mm256_sub_epi16(in02, in29);
+          step1[30] = _mm256_sub_epi16(in01, in30);
+          step1[31] = _mm256_sub_epi16(in00, in31);
+        }
+        {
+          __m256i in04  = _mm256_loadu_si256((const __m256i *)(in +  4 * 32));
+          __m256i in05  = _mm256_loadu_si256((const __m256i *)(in +  5 * 32));
+          __m256i in06  = _mm256_loadu_si256((const __m256i *)(in +  6 * 32));
+          __m256i in07  = _mm256_loadu_si256((const __m256i *)(in +  7 * 32));
+          __m256i in24  = _mm256_loadu_si256((const __m256i *)(in + 24 * 32));
+          __m256i in25  = _mm256_loadu_si256((const __m256i *)(in + 25 * 32));
+          __m256i in26  = _mm256_loadu_si256((const __m256i *)(in + 26 * 32));
+          __m256i in27  = _mm256_loadu_si256((const __m256i *)(in + 27 * 32));
+          step1[ 4] = _mm256_add_epi16(in04, in27);
+          step1[ 5] = _mm256_add_epi16(in05, in26);
+          step1[ 6] = _mm256_add_epi16(in06, in25);
+          step1[ 7] = _mm256_add_epi16(in07, in24);
+          step1[24] = _mm256_sub_epi16(in07, in24);
+          step1[25] = _mm256_sub_epi16(in06, in25);
+          step1[26] = _mm256_sub_epi16(in05, in26);
+          step1[27] = _mm256_sub_epi16(in04, in27);
+        }
+        {
+          __m256i in08  = _mm256_loadu_si256((const __m256i *)(in +  8 * 32));
+          __m256i in09  = _mm256_loadu_si256((const __m256i *)(in +  9 * 32));
+          __m256i in10  = _mm256_loadu_si256((const __m256i *)(in + 10 * 32));
+          __m256i in11  = _mm256_loadu_si256((const __m256i *)(in + 11 * 32));
+          __m256i in20  = _mm256_loadu_si256((const __m256i *)(in + 20 * 32));
+          __m256i in21  = _mm256_loadu_si256((const __m256i *)(in + 21 * 32));
+          __m256i in22  = _mm256_loadu_si256((const __m256i *)(in + 22 * 32));
+          __m256i in23  = _mm256_loadu_si256((const __m256i *)(in + 23 * 32));
+          step1[ 8] = _mm256_add_epi16(in08, in23);
+          step1[ 9] = _mm256_add_epi16(in09, in22);
+          step1[10] = _mm256_add_epi16(in10, in21);
+          step1[11] = _mm256_add_epi16(in11, in20);
+          step1[20] = _mm256_sub_epi16(in11, in20);
+          step1[21] = _mm256_sub_epi16(in10, in21);
+          step1[22] = _mm256_sub_epi16(in09, in22);
+          step1[23] = _mm256_sub_epi16(in08, in23);
+        }
+        {
+          __m256i in12  = _mm256_loadu_si256((const __m256i *)(in + 12 * 32));
+          __m256i in13  = _mm256_loadu_si256((const __m256i *)(in + 13 * 32));
+          __m256i in14  = _mm256_loadu_si256((const __m256i *)(in + 14 * 32));
+          __m256i in15  = _mm256_loadu_si256((const __m256i *)(in + 15 * 32));
+          __m256i in16  = _mm256_loadu_si256((const __m256i *)(in + 16 * 32));
+          __m256i in17  = _mm256_loadu_si256((const __m256i *)(in + 17 * 32));
+          __m256i in18  = _mm256_loadu_si256((const __m256i *)(in + 18 * 32));
+          __m256i in19  = _mm256_loadu_si256((const __m256i *)(in + 19 * 32));
+          step1[12] = _mm256_add_epi16(in12, in19);
+          step1[13] = _mm256_add_epi16(in13, in18);
+          step1[14] = _mm256_add_epi16(in14, in17);
+          step1[15] = _mm256_add_epi16(in15, in16);
+          step1[16] = _mm256_sub_epi16(in15, in16);
+          step1[17] = _mm256_sub_epi16(in14, in17);
+          step1[18] = _mm256_sub_epi16(in13, in18);
+          step1[19] = _mm256_sub_epi16(in12, in19);
+        }
+      }
+      // Stage 2
+      {
+        step2[ 0] = _mm256_add_epi16(step1[0], step1[15]);
+        step2[ 1] = _mm256_add_epi16(step1[1], step1[14]);
+        step2[ 2] = _mm256_add_epi16(step1[2], step1[13]);
+        step2[ 3] = _mm256_add_epi16(step1[3], step1[12]);
+        step2[ 4] = _mm256_add_epi16(step1[4], step1[11]);
+        step2[ 5] = _mm256_add_epi16(step1[5], step1[10]);
+        step2[ 6] = _mm256_add_epi16(step1[6], step1[ 9]);
+        step2[ 7] = _mm256_add_epi16(step1[7], step1[ 8]);
+        step2[ 8] = _mm256_sub_epi16(step1[7], step1[ 8]);
+        step2[ 9] = _mm256_sub_epi16(step1[6], step1[ 9]);
+        step2[10] = _mm256_sub_epi16(step1[5], step1[10]);
+        step2[11] = _mm256_sub_epi16(step1[4], step1[11]);
+        step2[12] = _mm256_sub_epi16(step1[3], step1[12]);
+        step2[13] = _mm256_sub_epi16(step1[2], step1[13]);
+        step2[14] = _mm256_sub_epi16(step1[1], step1[14]);
+        step2[15] = _mm256_sub_epi16(step1[0], step1[15]);
+      }
+      {
+        const __m256i s2_20_0 = _mm256_unpacklo_epi16(step1[27], step1[20]);
+        const __m256i s2_20_1 = _mm256_unpackhi_epi16(step1[27], step1[20]);
+        const __m256i s2_21_0 = _mm256_unpacklo_epi16(step1[26], step1[21]);
+        const __m256i s2_21_1 = _mm256_unpackhi_epi16(step1[26], step1[21]);
+        const __m256i s2_22_0 = _mm256_unpacklo_epi16(step1[25], step1[22]);
+        const __m256i s2_22_1 = _mm256_unpackhi_epi16(step1[25], step1[22]);
+        const __m256i s2_23_0 = _mm256_unpacklo_epi16(step1[24], step1[23]);
+        const __m256i s2_23_1 = _mm256_unpackhi_epi16(step1[24], step1[23]);
+        const __m256i s2_20_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_m16);
+        const __m256i s2_20_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_m16);
+        const __m256i s2_21_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_m16);
+        const __m256i s2_21_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_m16);
+        const __m256i s2_22_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_m16);
+        const __m256i s2_22_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_m16);
+        const __m256i s2_23_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_m16);
+        const __m256i s2_23_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_m16);
+        const __m256i s2_24_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_p16);
+        const __m256i s2_24_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_p16);
+        const __m256i s2_25_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_p16);
+        const __m256i s2_25_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_p16);
+        const __m256i s2_26_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_p16);
+        const __m256i s2_26_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_p16);
+        const __m256i s2_27_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_p16);
+        const __m256i s2_27_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_p16);
+        // dct_const_round_shift
+        const __m256i s2_20_4 = _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_20_5 = _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_21_4 = _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_21_5 = _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_22_4 = _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_22_5 = _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_23_4 = _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_23_5 = _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_24_4 = _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_24_5 = _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_25_4 = _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_25_5 = _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_26_4 = _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_26_5 = _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_27_4 = _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_27_5 = _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_20_6 = _mm256_srai_epi32(s2_20_4, DCT_CONST_BITS);
+        const __m256i s2_20_7 = _mm256_srai_epi32(s2_20_5, DCT_CONST_BITS);
+        const __m256i s2_21_6 = _mm256_srai_epi32(s2_21_4, DCT_CONST_BITS);
+        const __m256i s2_21_7 = _mm256_srai_epi32(s2_21_5, DCT_CONST_BITS);
+        const __m256i s2_22_6 = _mm256_srai_epi32(s2_22_4, DCT_CONST_BITS);
+        const __m256i s2_22_7 = _mm256_srai_epi32(s2_22_5, DCT_CONST_BITS);
+        const __m256i s2_23_6 = _mm256_srai_epi32(s2_23_4, DCT_CONST_BITS);
+        const __m256i s2_23_7 = _mm256_srai_epi32(s2_23_5, DCT_CONST_BITS);
+        const __m256i s2_24_6 = _mm256_srai_epi32(s2_24_4, DCT_CONST_BITS);
+        const __m256i s2_24_7 = _mm256_srai_epi32(s2_24_5, DCT_CONST_BITS);
+        const __m256i s2_25_6 = _mm256_srai_epi32(s2_25_4, DCT_CONST_BITS);
+        const __m256i s2_25_7 = _mm256_srai_epi32(s2_25_5, DCT_CONST_BITS);
+        const __m256i s2_26_6 = _mm256_srai_epi32(s2_26_4, DCT_CONST_BITS);
+        const __m256i s2_26_7 = _mm256_srai_epi32(s2_26_5, DCT_CONST_BITS);
+        const __m256i s2_27_6 = _mm256_srai_epi32(s2_27_4, DCT_CONST_BITS);
+        const __m256i s2_27_7 = _mm256_srai_epi32(s2_27_5, DCT_CONST_BITS);
+        // Combine
+        step2[20] = _mm256_packs_epi32(s2_20_6, s2_20_7);
+        step2[21] = _mm256_packs_epi32(s2_21_6, s2_21_7);
+        step2[22] = _mm256_packs_epi32(s2_22_6, s2_22_7);
+        step2[23] = _mm256_packs_epi32(s2_23_6, s2_23_7);
+        step2[24] = _mm256_packs_epi32(s2_24_6, s2_24_7);
+        step2[25] = _mm256_packs_epi32(s2_25_6, s2_25_7);
+        step2[26] = _mm256_packs_epi32(s2_26_6, s2_26_7);
+        step2[27] = _mm256_packs_epi32(s2_27_6, s2_27_7);
+      }
+
+#if !FDCT32x32_HIGH_PRECISION
+      // dump the magnitude by half, hence the intermediate values are within
+      // the range of 16 bits.
+      if (1 == pass) {
+        __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero,step2[ 0]);
+        __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero,step2[ 1]);
+        __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero,step2[ 2]);
+        __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero,step2[ 3]);
+        __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero,step2[ 4]);
+        __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero,step2[ 5]);
+        __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero,step2[ 6]);
+        __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero,step2[ 7]);
+        __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero,step2[ 8]);
+        __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero,step2[ 9]);
+        __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero,step2[10]);
+        __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero,step2[11]);
+        __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero,step2[12]);
+        __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero,step2[13]);
+        __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero,step2[14]);
+        __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero,step2[15]);
+        __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero,step1[16]);
+        __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero,step1[17]);
+        __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero,step1[18]);
+        __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero,step1[19]);
+        __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero,step2[20]);
+        __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero,step2[21]);
+        __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero,step2[22]);
+        __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero,step2[23]);
+        __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero,step2[24]);
+        __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero,step2[25]);
+        __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero,step2[26]);
+        __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero,step2[27]);
+        __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero,step1[28]);
+        __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero,step1[29]);
+        __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero,step1[30]);
+        __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero,step1[31]);
+
+        step2[ 0] = _mm256_sub_epi16(step2[ 0], s3_00_0);
+        step2[ 1] = _mm256_sub_epi16(step2[ 1], s3_01_0);
+        step2[ 2] = _mm256_sub_epi16(step2[ 2], s3_02_0);
+        step2[ 3] = _mm256_sub_epi16(step2[ 3], s3_03_0);
+        step2[ 4] = _mm256_sub_epi16(step2[ 4], s3_04_0);
+        step2[ 5] = _mm256_sub_epi16(step2[ 5], s3_05_0);
+        step2[ 6] = _mm256_sub_epi16(step2[ 6], s3_06_0);
+        step2[ 7] = _mm256_sub_epi16(step2[ 7], s3_07_0);
+        step2[ 8] = _mm256_sub_epi16(step2[ 8], s2_08_0);
+        step2[ 9] = _mm256_sub_epi16(step2[ 9], s2_09_0);
+        step2[10] = _mm256_sub_epi16(step2[10], s3_10_0);
+        step2[11] = _mm256_sub_epi16(step2[11], s3_11_0);
+        step2[12] = _mm256_sub_epi16(step2[12], s3_12_0);
+        step2[13] = _mm256_sub_epi16(step2[13], s3_13_0);
+        step2[14] = _mm256_sub_epi16(step2[14], s2_14_0);
+        step2[15] = _mm256_sub_epi16(step2[15], s2_15_0);
+        step1[16] = _mm256_sub_epi16(step1[16], s3_16_0);
+        step1[17] = _mm256_sub_epi16(step1[17], s3_17_0);
+        step1[18] = _mm256_sub_epi16(step1[18], s3_18_0);
+        step1[19] = _mm256_sub_epi16(step1[19], s3_19_0);
+        step2[20] = _mm256_sub_epi16(step2[20], s3_20_0);
+        step2[21] = _mm256_sub_epi16(step2[21], s3_21_0);
+        step2[22] = _mm256_sub_epi16(step2[22], s3_22_0);
+        step2[23] = _mm256_sub_epi16(step2[23], s3_23_0);
+        step2[24] = _mm256_sub_epi16(step2[24], s3_24_0);
+        step2[25] = _mm256_sub_epi16(step2[25], s3_25_0);
+        step2[26] = _mm256_sub_epi16(step2[26], s3_26_0);
+        step2[27] = _mm256_sub_epi16(step2[27], s3_27_0);
+        step1[28] = _mm256_sub_epi16(step1[28], s3_28_0);
+        step1[29] = _mm256_sub_epi16(step1[29], s3_29_0);
+        step1[30] = _mm256_sub_epi16(step1[30], s3_30_0);
+        step1[31] = _mm256_sub_epi16(step1[31], s3_31_0);
+
+        step2[ 0] = _mm256_add_epi16(step2[ 0], kOne);
+        step2[ 1] = _mm256_add_epi16(step2[ 1], kOne);
+        step2[ 2] = _mm256_add_epi16(step2[ 2], kOne);
+        step2[ 3] = _mm256_add_epi16(step2[ 3], kOne);
+        step2[ 4] = _mm256_add_epi16(step2[ 4], kOne);
+        step2[ 5] = _mm256_add_epi16(step2[ 5], kOne);
+        step2[ 6] = _mm256_add_epi16(step2[ 6], kOne);
+        step2[ 7] = _mm256_add_epi16(step2[ 7], kOne);
+        step2[ 8] = _mm256_add_epi16(step2[ 8], kOne);
+        step2[ 9] = _mm256_add_epi16(step2[ 9], kOne);
+        step2[10] = _mm256_add_epi16(step2[10], kOne);
+        step2[11] = _mm256_add_epi16(step2[11], kOne);
+        step2[12] = _mm256_add_epi16(step2[12], kOne);
+        step2[13] = _mm256_add_epi16(step2[13], kOne);
+        step2[14] = _mm256_add_epi16(step2[14], kOne);
+        step2[15] = _mm256_add_epi16(step2[15], kOne);
+        step1[16] = _mm256_add_epi16(step1[16], kOne);
+        step1[17] = _mm256_add_epi16(step1[17], kOne);
+        step1[18] = _mm256_add_epi16(step1[18], kOne);
+        step1[19] = _mm256_add_epi16(step1[19], kOne);
+        step2[20] = _mm256_add_epi16(step2[20], kOne);
+        step2[21] = _mm256_add_epi16(step2[21], kOne);
+        step2[22] = _mm256_add_epi16(step2[22], kOne);
+        step2[23] = _mm256_add_epi16(step2[23], kOne);
+        step2[24] = _mm256_add_epi16(step2[24], kOne);
+        step2[25] = _mm256_add_epi16(step2[25], kOne);
+        step2[26] = _mm256_add_epi16(step2[26], kOne);
+        step2[27] = _mm256_add_epi16(step2[27], kOne);
+        step1[28] = _mm256_add_epi16(step1[28], kOne);
+        step1[29] = _mm256_add_epi16(step1[29], kOne);
+        step1[30] = _mm256_add_epi16(step1[30], kOne);
+        step1[31] = _mm256_add_epi16(step1[31], kOne);
+
+        step2[ 0] = _mm256_srai_epi16(step2[ 0], 2);
+        step2[ 1] = _mm256_srai_epi16(step2[ 1], 2);
+        step2[ 2] = _mm256_srai_epi16(step2[ 2], 2);
+        step2[ 3] = _mm256_srai_epi16(step2[ 3], 2);
+        step2[ 4] = _mm256_srai_epi16(step2[ 4], 2);
+        step2[ 5] = _mm256_srai_epi16(step2[ 5], 2);
+        step2[ 6] = _mm256_srai_epi16(step2[ 6], 2);
+        step2[ 7] = _mm256_srai_epi16(step2[ 7], 2);
+        step2[ 8] = _mm256_srai_epi16(step2[ 8], 2);
+        step2[ 9] = _mm256_srai_epi16(step2[ 9], 2);
+        step2[10] = _mm256_srai_epi16(step2[10], 2);
+        step2[11] = _mm256_srai_epi16(step2[11], 2);
+        step2[12] = _mm256_srai_epi16(step2[12], 2);
+        step2[13] = _mm256_srai_epi16(step2[13], 2);
+        step2[14] = _mm256_srai_epi16(step2[14], 2);
+        step2[15] = _mm256_srai_epi16(step2[15], 2);
+        step1[16] = _mm256_srai_epi16(step1[16], 2);
+        step1[17] = _mm256_srai_epi16(step1[17], 2);
+        step1[18] = _mm256_srai_epi16(step1[18], 2);
+        step1[19] = _mm256_srai_epi16(step1[19], 2);
+        step2[20] = _mm256_srai_epi16(step2[20], 2);
+        step2[21] = _mm256_srai_epi16(step2[21], 2);
+        step2[22] = _mm256_srai_epi16(step2[22], 2);
+        step2[23] = _mm256_srai_epi16(step2[23], 2);
+        step2[24] = _mm256_srai_epi16(step2[24], 2);
+        step2[25] = _mm256_srai_epi16(step2[25], 2);
+        step2[26] = _mm256_srai_epi16(step2[26], 2);
+        step2[27] = _mm256_srai_epi16(step2[27], 2);
+        step1[28] = _mm256_srai_epi16(step1[28], 2);
+        step1[29] = _mm256_srai_epi16(step1[29], 2);
+        step1[30] = _mm256_srai_epi16(step1[30], 2);
+        step1[31] = _mm256_srai_epi16(step1[31], 2);
+      }
+#endif
+
+#if FDCT32x32_HIGH_PRECISION
+      if (pass == 0) {
+#endif
+      // Stage 3
+      {
+        step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]);
+        step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]);
+        step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]);
+        step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]);
+        step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]);
+        step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]);
+        step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]);
+        step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]);
+      }
+      {
+        const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
+        const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
+        const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
+        const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
+        const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
+        const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
+        const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
+        const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
+        const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
+        const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
+        const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
+        const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
+        // dct_const_round_shift
+        const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
+        const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
+        const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
+        const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
+        const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
+        const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
+        const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
+        const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
+        // Combine
+        step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7);
+        step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7);
+        step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7);
+        step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7);
+      }
+      {
+        step3[16] = _mm256_add_epi16(step2[23], step1[16]);
+        step3[17] = _mm256_add_epi16(step2[22], step1[17]);
+        step3[18] = _mm256_add_epi16(step2[21], step1[18]);
+        step3[19] = _mm256_add_epi16(step2[20], step1[19]);
+        step3[20] = _mm256_sub_epi16(step1[19], step2[20]);
+        step3[21] = _mm256_sub_epi16(step1[18], step2[21]);
+        step3[22] = _mm256_sub_epi16(step1[17], step2[22]);
+        step3[23] = _mm256_sub_epi16(step1[16], step2[23]);
+        step3[24] = _mm256_sub_epi16(step1[31], step2[24]);
+        step3[25] = _mm256_sub_epi16(step1[30], step2[25]);
+        step3[26] = _mm256_sub_epi16(step1[29], step2[26]);
+        step3[27] = _mm256_sub_epi16(step1[28], step2[27]);
+        step3[28] = _mm256_add_epi16(step2[27], step1[28]);
+        step3[29] = _mm256_add_epi16(step2[26], step1[29]);
+        step3[30] = _mm256_add_epi16(step2[25], step1[30]);
+        step3[31] = _mm256_add_epi16(step2[24], step1[31]);
+      }
+
+      // Stage 4
+      {
+        step1[ 0] = _mm256_add_epi16(step3[ 3], step3[ 0]);
+        step1[ 1] = _mm256_add_epi16(step3[ 2], step3[ 1]);
+        step1[ 2] = _mm256_sub_epi16(step3[ 1], step3[ 2]);
+        step1[ 3] = _mm256_sub_epi16(step3[ 0], step3[ 3]);
+        step1[ 8] = _mm256_add_epi16(step3[11], step2[ 8]);
+        step1[ 9] = _mm256_add_epi16(step3[10], step2[ 9]);
+        step1[10] = _mm256_sub_epi16(step2[ 9], step3[10]);
+        step1[11] = _mm256_sub_epi16(step2[ 8], step3[11]);
+        step1[12] = _mm256_sub_epi16(step2[15], step3[12]);
+        step1[13] = _mm256_sub_epi16(step2[14], step3[13]);
+        step1[14] = _mm256_add_epi16(step3[13], step2[14]);
+        step1[15] = _mm256_add_epi16(step3[12], step2[15]);
+      }
+      {
+        const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]);
+        const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]);
+        const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16);
+        const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16);
+        const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16);
+        const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16);
+        // dct_const_round_shift
+        const __m256i s1_05_4 = _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_05_5 = _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_06_4 = _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_06_5 = _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS);
+        const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS);
+        const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS);
+        const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS);
+        // Combine
+        step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7);
+        step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7);
+      }
+      {
+        const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]);
+        const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]);
+        const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]);
+        const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]);
+        const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]);
+        const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]);
+        const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]);
+        const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]);
+        const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24);
+        const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24);
+        const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24);
+        const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24);
+        const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08);
+        const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08);
+        const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08);
+        const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08);
+        const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24);
+        const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24);
+        const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24);
+        const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24);
+        const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08);
+        const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08);
+        const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08);
+        const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08);
+        // dct_const_round_shift
+        const __m256i s1_18_4 = _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_18_5 = _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_19_4 = _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_19_5 = _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_20_4 = _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_20_5 = _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_21_4 = _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_21_5 = _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_26_4 = _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_26_5 = _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_27_4 = _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_27_5 = _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_28_4 = _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_28_5 = _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_29_4 = _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
+        const __m256i s1_29_5 = _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
+        const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS);
+        const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS);
+        const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS);
+        const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS);
+        const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS);
+        const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS);
+        const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS);
+        const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS);
+        const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS);
+        const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS);
+        const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS);
+        const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS);
+        const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS);
+        const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS);
+        const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS);
+        const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS);
+        // Combine
+        step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7);
+        step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7);
+        step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7);
+        step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7);
+        step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7);
+        step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7);
+        step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7);
+        step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7);
+      }
+      // Stage 5
+      {
+        step2[4] = _mm256_add_epi16(step1[5], step3[4]);
+        step2[5] = _mm256_sub_epi16(step3[4], step1[5]);
+        step2[6] = _mm256_sub_epi16(step3[7], step1[6]);
+        step2[7] = _mm256_add_epi16(step1[6], step3[7]);
+      }
+      {
+        const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]);
+        const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]);
+        const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]);
+        const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]);
+        const __m256i out_00_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_p16);
+        const __m256i out_00_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_p16);
+        const __m256i out_16_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_m16);
+        const __m256i out_16_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_m16);
+        const __m256i out_08_2 = _mm256_madd_epi16(out_08_0, k__cospi_p24_p08);
+        const __m256i out_08_3 = _mm256_madd_epi16(out_08_1, k__cospi_p24_p08);
+        const __m256i out_24_2 = _mm256_madd_epi16(out_08_0, k__cospi_m08_p24);
+        const __m256i out_24_3 = _mm256_madd_epi16(out_08_1, k__cospi_m08_p24);
+        // dct_const_round_shift
+        const __m256i out_00_4 = _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_00_5 = _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_16_4 = _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_16_5 = _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_08_4 = _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_08_5 = _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_24_4 = _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_24_5 = _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS);
+        const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS);
+        const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS);
+        const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS);
+        const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS);
+        const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS);
+        const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS);
+        const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS);
+        // Combine
+        out[ 0] = _mm256_packs_epi32(out_00_6, out_00_7);
+        out[16] = _mm256_packs_epi32(out_16_6, out_16_7);
+        out[ 8] = _mm256_packs_epi32(out_08_6, out_08_7);
+        out[24] = _mm256_packs_epi32(out_24_6, out_24_7);
+      }
+      {
+        const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[ 9], step1[14]);
+        const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[ 9], step1[14]);
+        const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]);
+        const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]);
+        const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24);
+        const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24);
+        const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08);
+        const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08);
+        const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24);
+        const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24);
+        const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08);
+        const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08);
+        // dct_const_round_shift
+        const __m256i s2_09_4 = _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_09_5 = _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_10_4 = _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_10_5 = _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_13_4 = _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_13_5 = _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_14_4 = _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
+        const __m256i s2_14_5 = _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
+        const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS);
+        const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS);
+        const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS);
+        const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS);
+        const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS);
+        const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS);
+        const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS);
+        const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS);
+        // Combine
+        step2[ 9] = _mm256_packs_epi32(s2_09_6, s2_09_7);
+        step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7);
+        step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7);
+        step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7);
+      }
+      {
+        step2[16] = _mm256_add_epi16(step1[19], step3[16]);
+        step2[17] = _mm256_add_epi16(step1[18], step3[17]);
+        step2[18] = _mm256_sub_epi16(step3[17], step1[18]);
+        step2[19] = _mm256_sub_epi16(step3[16], step1[19]);
+        step2[20] = _mm256_sub_epi16(step3[23], step1[20]);
+        step2[21] = _mm256_sub_epi16(step3[22], step1[21]);
+        step2[22] = _mm256_add_epi16(step1[21], step3[22]);
+        step2[23] = _mm256_add_epi16(step1[20], step3[23]);
+        step2[24] = _mm256_add_epi16(step1[27], step3[24]);
+        step2[25] = _mm256_add_epi16(step1[26], step3[25]);
+        step2[26] = _mm256_sub_epi16(step3[25], step1[26]);
+        step2[27] = _mm256_sub_epi16(step3[24], step1[27]);
+        step2[28] = _mm256_sub_epi16(step3[31], step1[28]);
+        step2[29] = _mm256_sub_epi16(step3[30], step1[29]);
+        step2[30] = _mm256_add_epi16(step1[29], step3[30]);
+        step2[31] = _mm256_add_epi16(step1[28], step3[31]);
+      }
+      // Stage 6
+      {
+        const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
+        const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
+        const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
+        const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
+        const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
+        const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
+        const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
+        const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
+        const __m256i out_04_2 = _mm256_madd_epi16(out_04_0, k__cospi_p28_p04);
+        const __m256i out_04_3 = _mm256_madd_epi16(out_04_1, k__cospi_p28_p04);
+        const __m256i out_20_2 = _mm256_madd_epi16(out_20_0, k__cospi_p12_p20);
+        const __m256i out_20_3 = _mm256_madd_epi16(out_20_1, k__cospi_p12_p20);
+        const __m256i out_12_2 = _mm256_madd_epi16(out_12_0, k__cospi_m20_p12);
+        const __m256i out_12_3 = _mm256_madd_epi16(out_12_1, k__cospi_m20_p12);
+        const __m256i out_28_2 = _mm256_madd_epi16(out_28_0, k__cospi_m04_p28);
+        const __m256i out_28_3 = _mm256_madd_epi16(out_28_1, k__cospi_m04_p28);
+        // dct_const_round_shift
+        const __m256i out_04_4 = _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_04_5 = _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_20_4 = _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_20_5 = _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_12_4 = _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_12_5 = _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_28_4 = _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_28_5 = _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS);
+        const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS);
+        const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS);
+        const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS);
+        const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS);
+        const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS);
+        const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS);
+        const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS);
+        // Combine
+        out[ 4] = _mm256_packs_epi32(out_04_6, out_04_7);
+        out[20] = _mm256_packs_epi32(out_20_6, out_20_7);
+        out[12] = _mm256_packs_epi32(out_12_6, out_12_7);
+        out[28] = _mm256_packs_epi32(out_28_6, out_28_7);
+      }
+      {
+        step3[ 8] = _mm256_add_epi16(step2[ 9], step1[ 8]);
+        step3[ 9] = _mm256_sub_epi16(step1[ 8], step2[ 9]);
+        step3[10] = _mm256_sub_epi16(step1[11], step2[10]);
+        step3[11] = _mm256_add_epi16(step2[10], step1[11]);
+        step3[12] = _mm256_add_epi16(step2[13], step1[12]);
+        step3[13] = _mm256_sub_epi16(step1[12], step2[13]);
+        step3[14] = _mm256_sub_epi16(step1[15], step2[14]);
+        step3[15] = _mm256_add_epi16(step2[14], step1[15]);
+      }
+      {
+        const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]);
+        const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]);
+        const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]);
+        const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]);
+        const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]);
+        const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]);
+        const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]);
+        const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]);
+        const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28);
+        const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28);
+        const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04);
+        const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04);
+        const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12);
+        const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12);
+        const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20);
+        const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20);
+        const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12);
+        const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12);
+        const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20);
+        const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20);
+        const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28);
+        const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28);
+        const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04);
+        const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04);
+        // dct_const_round_shift
+        const __m256i s3_17_4 = _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_17_5 = _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_18_4 = _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_18_5 = _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_21_4 = _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_21_5 = _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_22_4 = _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_22_5 = _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS);
+        const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS);
+        const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS);
+        const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS);
+        const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS);
+        const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS);
+        const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS);
+        const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS);
+        const __m256i s3_25_4 = _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_25_5 = _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_26_4 = _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_26_5 = _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_29_4 = _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_29_5 = _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_30_4 = _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
+        const __m256i s3_30_5 = _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
+        const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS);
+        const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS);
+        const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS);
+        const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS);
+        const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS);
+        const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS);
+        const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS);
+        const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS);
+        // Combine
+        step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7);
+        step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7);
+        step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7);
+        step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7);
+        // Combine
+        step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7);
+        step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7);
+        step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7);
+        step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7);
+      }
+      // Stage 7
+      {
+        const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[ 8], step3[15]);
+        const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[ 8], step3[15]);
+        const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[ 9], step3[14]);
+        const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[ 9], step3[14]);
+        const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]);
+        const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]);
+        const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]);
+        const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]);
+        const __m256i out_02_2 = _mm256_madd_epi16(out_02_0, k__cospi_p30_p02);
+        const __m256i out_02_3 = _mm256_madd_epi16(out_02_1, k__cospi_p30_p02);
+        const __m256i out_18_2 = _mm256_madd_epi16(out_18_0, k__cospi_p14_p18);
+        const __m256i out_18_3 = _mm256_madd_epi16(out_18_1, k__cospi_p14_p18);
+        const __m256i out_10_2 = _mm256_madd_epi16(out_10_0, k__cospi_p22_p10);
+        const __m256i out_10_3 = _mm256_madd_epi16(out_10_1, k__cospi_p22_p10);
+        const __m256i out_26_2 = _mm256_madd_epi16(out_26_0, k__cospi_p06_p26);
+        const __m256i out_26_3 = _mm256_madd_epi16(out_26_1, k__cospi_p06_p26);
+        const __m256i out_06_2 = _mm256_madd_epi16(out_26_0, k__cospi_m26_p06);
+        const __m256i out_06_3 = _mm256_madd_epi16(out_26_1, k__cospi_m26_p06);
+        const __m256i out_22_2 = _mm256_madd_epi16(out_10_0, k__cospi_m10_p22);
+        const __m256i out_22_3 = _mm256_madd_epi16(out_10_1, k__cospi_m10_p22);
+        const __m256i out_14_2 = _mm256_madd_epi16(out_18_0, k__cospi_m18_p14);
+        const __m256i out_14_3 = _mm256_madd_epi16(out_18_1, k__cospi_m18_p14);
+        const __m256i out_30_2 = _mm256_madd_epi16(out_02_0, k__cospi_m02_p30);
+        const __m256i out_30_3 = _mm256_madd_epi16(out_02_1, k__cospi_m02_p30);
+        // dct_const_round_shift
+        const __m256i out_02_4 = _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_02_5 = _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_18_4 = _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_18_5 = _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_10_4 = _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_10_5 = _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_26_4 = _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_26_5 = _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_06_4 = _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_06_5 = _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_22_4 = _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_22_5 = _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_14_4 = _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_14_5 = _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_30_4 = _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_30_5 = _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS);
+        const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS);
+        const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS);
+        const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS);
+        const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS);
+        const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS);
+        const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS);
+        const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS);
+        const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS);
+        const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS);
+        const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS);
+        const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS);
+        const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS);
+        const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS);
+        const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS);
+        const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS);
+        // Combine
+        out[ 2] = _mm256_packs_epi32(out_02_6, out_02_7);
+        out[18] = _mm256_packs_epi32(out_18_6, out_18_7);
+        out[10] = _mm256_packs_epi32(out_10_6, out_10_7);
+        out[26] = _mm256_packs_epi32(out_26_6, out_26_7);
+        out[ 6] = _mm256_packs_epi32(out_06_6, out_06_7);
+        out[22] = _mm256_packs_epi32(out_22_6, out_22_7);
+        out[14] = _mm256_packs_epi32(out_14_6, out_14_7);
+        out[30] = _mm256_packs_epi32(out_30_6, out_30_7);
+      }
+      {
+        step1[16] = _mm256_add_epi16(step3[17], step2[16]);
+        step1[17] = _mm256_sub_epi16(step2[16], step3[17]);
+        step1[18] = _mm256_sub_epi16(step2[19], step3[18]);
+        step1[19] = _mm256_add_epi16(step3[18], step2[19]);
+        step1[20] = _mm256_add_epi16(step3[21], step2[20]);
+        step1[21] = _mm256_sub_epi16(step2[20], step3[21]);
+        step1[22] = _mm256_sub_epi16(step2[23], step3[22]);
+        step1[23] = _mm256_add_epi16(step3[22], step2[23]);
+        step1[24] = _mm256_add_epi16(step3[25], step2[24]);
+        step1[25] = _mm256_sub_epi16(step2[24], step3[25]);
+        step1[26] = _mm256_sub_epi16(step2[27], step3[26]);
+        step1[27] = _mm256_add_epi16(step3[26], step2[27]);
+        step1[28] = _mm256_add_epi16(step3[29], step2[28]);
+        step1[29] = _mm256_sub_epi16(step2[28], step3[29]);
+        step1[30] = _mm256_sub_epi16(step2[31], step3[30]);
+        step1[31] = _mm256_add_epi16(step3[30], step2[31]);
+      }
+      // Final stage --- outputs indices are bit-reversed.
+      {
+        const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]);
+        const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]);
+        const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]);
+        const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]);
+        const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]);
+        const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]);
+        const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]);
+        const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]);
+        const __m256i out_01_2 = _mm256_madd_epi16(out_01_0, k__cospi_p31_p01);
+        const __m256i out_01_3 = _mm256_madd_epi16(out_01_1, k__cospi_p31_p01);
+        const __m256i out_17_2 = _mm256_madd_epi16(out_17_0, k__cospi_p15_p17);
+        const __m256i out_17_3 = _mm256_madd_epi16(out_17_1, k__cospi_p15_p17);
+        const __m256i out_09_2 = _mm256_madd_epi16(out_09_0, k__cospi_p23_p09);
+        const __m256i out_09_3 = _mm256_madd_epi16(out_09_1, k__cospi_p23_p09);
+        const __m256i out_25_2 = _mm256_madd_epi16(out_25_0, k__cospi_p07_p25);
+        const __m256i out_25_3 = _mm256_madd_epi16(out_25_1, k__cospi_p07_p25);
+        const __m256i out_07_2 = _mm256_madd_epi16(out_25_0, k__cospi_m25_p07);
+        const __m256i out_07_3 = _mm256_madd_epi16(out_25_1, k__cospi_m25_p07);
+        const __m256i out_23_2 = _mm256_madd_epi16(out_09_0, k__cospi_m09_p23);
+        const __m256i out_23_3 = _mm256_madd_epi16(out_09_1, k__cospi_m09_p23);
+        const __m256i out_15_2 = _mm256_madd_epi16(out_17_0, k__cospi_m17_p15);
+        const __m256i out_15_3 = _mm256_madd_epi16(out_17_1, k__cospi_m17_p15);
+        const __m256i out_31_2 = _mm256_madd_epi16(out_01_0, k__cospi_m01_p31);
+        const __m256i out_31_3 = _mm256_madd_epi16(out_01_1, k__cospi_m01_p31);
+        // dct_const_round_shift
+        const __m256i out_01_4 = _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_01_5 = _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_17_4 = _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_17_5 = _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_09_4 = _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_09_5 = _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_25_4 = _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_25_5 = _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_07_4 = _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_07_5 = _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_23_4 = _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_23_5 = _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_15_4 = _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_15_5 = _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_31_4 = _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_31_5 = _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS);
+        const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS);
+        const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS);
+        const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS);
+        const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS);
+        const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS);
+        const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS);
+        const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS);
+        const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS);
+        const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS);
+        const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS);
+        const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS);
+        const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS);
+        const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS);
+        const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS);
+        const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS);
+        // Combine
+        out[ 1] = _mm256_packs_epi32(out_01_6, out_01_7);
+        out[17] = _mm256_packs_epi32(out_17_6, out_17_7);
+        out[ 9] = _mm256_packs_epi32(out_09_6, out_09_7);
+        out[25] = _mm256_packs_epi32(out_25_6, out_25_7);
+        out[ 7] = _mm256_packs_epi32(out_07_6, out_07_7);
+        out[23] = _mm256_packs_epi32(out_23_6, out_23_7);
+        out[15] = _mm256_packs_epi32(out_15_6, out_15_7);
+        out[31] = _mm256_packs_epi32(out_31_6, out_31_7);
+      }
+      {
+        const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]);
+        const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]);
+        const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]);
+        const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]);
+        const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]);
+        const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]);
+        const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]);
+        const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]);
+        const __m256i out_05_2 = _mm256_madd_epi16(out_05_0, k__cospi_p27_p05);
+        const __m256i out_05_3 = _mm256_madd_epi16(out_05_1, k__cospi_p27_p05);
+        const __m256i out_21_2 = _mm256_madd_epi16(out_21_0, k__cospi_p11_p21);
+        const __m256i out_21_3 = _mm256_madd_epi16(out_21_1, k__cospi_p11_p21);
+        const __m256i out_13_2 = _mm256_madd_epi16(out_13_0, k__cospi_p19_p13);
+        const __m256i out_13_3 = _mm256_madd_epi16(out_13_1, k__cospi_p19_p13);
+        const __m256i out_29_2 = _mm256_madd_epi16(out_29_0, k__cospi_p03_p29);
+        const __m256i out_29_3 = _mm256_madd_epi16(out_29_1, k__cospi_p03_p29);
+        const __m256i out_03_2 = _mm256_madd_epi16(out_29_0, k__cospi_m29_p03);
+        const __m256i out_03_3 = _mm256_madd_epi16(out_29_1, k__cospi_m29_p03);
+        const __m256i out_19_2 = _mm256_madd_epi16(out_13_0, k__cospi_m13_p19);
+        const __m256i out_19_3 = _mm256_madd_epi16(out_13_1, k__cospi_m13_p19);
+        const __m256i out_11_2 = _mm256_madd_epi16(out_21_0, k__cospi_m21_p11);
+        const __m256i out_11_3 = _mm256_madd_epi16(out_21_1, k__cospi_m21_p11);
+        const __m256i out_27_2 = _mm256_madd_epi16(out_05_0, k__cospi_m05_p27);
+        const __m256i out_27_3 = _mm256_madd_epi16(out_05_1, k__cospi_m05_p27);
+        // dct_const_round_shift
+        const __m256i out_05_4 = _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_05_5 = _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_21_4 = _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_21_5 = _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_13_4 = _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_13_5 = _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_29_4 = _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_29_5 = _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_03_4 = _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_03_5 = _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_19_4 = _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_19_5 = _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_11_4 = _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_11_5 = _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_27_4 = _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
+        const __m256i out_27_5 = _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
+        const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS);
+        const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS);
+        const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS);
+        const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS);
+        const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS);
+        const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS);
+        const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS);
+        const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS);
+        const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS);
+        const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS);
+        const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS);
+        const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS);
+        const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS);
+        const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS);
+        const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS);
+        const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS);
+        // Combine
+        out[ 5] = _mm256_packs_epi32(out_05_6, out_05_7);
+        out[21] = _mm256_packs_epi32(out_21_6, out_21_7);
+        out[13] = _mm256_packs_epi32(out_13_6, out_13_7);
+        out[29] = _mm256_packs_epi32(out_29_6, out_29_7);
+        out[ 3] = _mm256_packs_epi32(out_03_6, out_03_7);
+        out[19] = _mm256_packs_epi32(out_19_6, out_19_7);
+        out[11] = _mm256_packs_epi32(out_11_6, out_11_7);
+        out[27] = _mm256_packs_epi32(out_27_6, out_27_7);
+      }
+#if FDCT32x32_HIGH_PRECISION
+      } else {
+        __m256i lstep1[64], lstep2[64], lstep3[64];
+        __m256i u[32], v[32], sign[16];
+        const __m256i K32One = _mm256_set_epi32(1, 1, 1, 1, 1, 1, 1, 1);
+        // start using 32-bit operations
+        // stage 3
+        {
+          // expanding to 32-bit length priori to addition operations
+          lstep2[ 0] = _mm256_unpacklo_epi16(step2[ 0], kZero);
+          lstep2[ 1] = _mm256_unpackhi_epi16(step2[ 0], kZero);
+          lstep2[ 2] = _mm256_unpacklo_epi16(step2[ 1], kZero);
+          lstep2[ 3] = _mm256_unpackhi_epi16(step2[ 1], kZero);
+          lstep2[ 4] = _mm256_unpacklo_epi16(step2[ 2], kZero);
+          lstep2[ 5] = _mm256_unpackhi_epi16(step2[ 2], kZero);
+          lstep2[ 6] = _mm256_unpacklo_epi16(step2[ 3], kZero);
+          lstep2[ 7] = _mm256_unpackhi_epi16(step2[ 3], kZero);
+          lstep2[ 8] = _mm256_unpacklo_epi16(step2[ 4], kZero);
+          lstep2[ 9] = _mm256_unpackhi_epi16(step2[ 4], kZero);
+          lstep2[10] = _mm256_unpacklo_epi16(step2[ 5], kZero);
+          lstep2[11] = _mm256_unpackhi_epi16(step2[ 5], kZero);
+          lstep2[12] = _mm256_unpacklo_epi16(step2[ 6], kZero);
+          lstep2[13] = _mm256_unpackhi_epi16(step2[ 6], kZero);
+          lstep2[14] = _mm256_unpacklo_epi16(step2[ 7], kZero);
+          lstep2[15] = _mm256_unpackhi_epi16(step2[ 7], kZero);
+          lstep2[ 0] = _mm256_madd_epi16(lstep2[ 0], kOne);
+          lstep2[ 1] = _mm256_madd_epi16(lstep2[ 1], kOne);
+          lstep2[ 2] = _mm256_madd_epi16(lstep2[ 2], kOne);
+          lstep2[ 3] = _mm256_madd_epi16(lstep2[ 3], kOne);
+          lstep2[ 4] = _mm256_madd_epi16(lstep2[ 4], kOne);
+          lstep2[ 5] = _mm256_madd_epi16(lstep2[ 5], kOne);
+          lstep2[ 6] = _mm256_madd_epi16(lstep2[ 6], kOne);
+          lstep2[ 7] = _mm256_madd_epi16(lstep2[ 7], kOne);
+          lstep2[ 8] = _mm256_madd_epi16(lstep2[ 8], kOne);
+          lstep2[ 9] = _mm256_madd_epi16(lstep2[ 9], kOne);
+          lstep2[10] = _mm256_madd_epi16(lstep2[10], kOne);
+          lstep2[11] = _mm256_madd_epi16(lstep2[11], kOne);
+          lstep2[12] = _mm256_madd_epi16(lstep2[12], kOne);
+          lstep2[13] = _mm256_madd_epi16(lstep2[13], kOne);
+          lstep2[14] = _mm256_madd_epi16(lstep2[14], kOne);
+          lstep2[15] = _mm256_madd_epi16(lstep2[15], kOne);
+
+          lstep3[ 0] = _mm256_add_epi32(lstep2[14], lstep2[ 0]);
+          lstep3[ 1] = _mm256_add_epi32(lstep2[15], lstep2[ 1]);
+          lstep3[ 2] = _mm256_add_epi32(lstep2[12], lstep2[ 2]);
+          lstep3[ 3] = _mm256_add_epi32(lstep2[13], lstep2[ 3]);
+          lstep3[ 4] = _mm256_add_epi32(lstep2[10], lstep2[ 4]);
+          lstep3[ 5] = _mm256_add_epi32(lstep2[11], lstep2[ 5]);
+          lstep3[ 6] = _mm256_add_epi32(lstep2[ 8], lstep2[ 6]);
+          lstep3[ 7] = _mm256_add_epi32(lstep2[ 9], lstep2[ 7]);
+          lstep3[ 8] = _mm256_sub_epi32(lstep2[ 6], lstep2[ 8]);
+          lstep3[ 9] = _mm256_sub_epi32(lstep2[ 7], lstep2[ 9]);
+          lstep3[10] = _mm256_sub_epi32(lstep2[ 4], lstep2[10]);
+          lstep3[11] = _mm256_sub_epi32(lstep2[ 5], lstep2[11]);
+          lstep3[12] = _mm256_sub_epi32(lstep2[ 2], lstep2[12]);
+          lstep3[13] = _mm256_sub_epi32(lstep2[ 3], lstep2[13]);
+          lstep3[14] = _mm256_sub_epi32(lstep2[ 0], lstep2[14]);
+          lstep3[15] = _mm256_sub_epi32(lstep2[ 1], lstep2[15]);
+        }
+        {
+          const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
+          const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
+          const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
+          const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
+          const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
+          const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
+          const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
+          const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
+          const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
+          const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
+          const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
+          const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+          const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+          const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+          lstep3[20] = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
+          lstep3[21] = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
+          lstep3[22] = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
+          lstep3[23] = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
+          lstep3[24] = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
+          lstep3[25] = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
+          lstep3[26] = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
+          lstep3[27] = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
+        }
+        {
+          lstep2[40] = _mm256_unpacklo_epi16(step2[20], kZero);
+          lstep2[41] = _mm256_unpackhi_epi16(step2[20], kZero);
+          lstep2[42] = _mm256_unpacklo_epi16(step2[21], kZero);
+          lstep2[43] = _mm256_unpackhi_epi16(step2[21], kZero);
+          lstep2[44] = _mm256_unpacklo_epi16(step2[22], kZero);
+          lstep2[45] = _mm256_unpackhi_epi16(step2[22], kZero);
+          lstep2[46] = _mm256_unpacklo_epi16(step2[23], kZero);
+          lstep2[47] = _mm256_unpackhi_epi16(step2[23], kZero);
+          lstep2[48] = _mm256_unpacklo_epi16(step2[24], kZero);
+          lstep2[49] = _mm256_unpackhi_epi16(step2[24], kZero);
+          lstep2[50] = _mm256_unpacklo_epi16(step2[25], kZero);
+          lstep2[51] = _mm256_unpackhi_epi16(step2[25], kZero);
+          lstep2[52] = _mm256_unpacklo_epi16(step2[26], kZero);
+          lstep2[53] = _mm256_unpackhi_epi16(step2[26], kZero);
+          lstep2[54] = _mm256_unpacklo_epi16(step2[27], kZero);
+          lstep2[55] = _mm256_unpackhi_epi16(step2[27], kZero);
+          lstep2[40] = _mm256_madd_epi16(lstep2[40], kOne);
+          lstep2[41] = _mm256_madd_epi16(lstep2[41], kOne);
+          lstep2[42] = _mm256_madd_epi16(lstep2[42], kOne);
+          lstep2[43] = _mm256_madd_epi16(lstep2[43], kOne);
+          lstep2[44] = _mm256_madd_epi16(lstep2[44], kOne);
+          lstep2[45] = _mm256_madd_epi16(lstep2[45], kOne);
+          lstep2[46] = _mm256_madd_epi16(lstep2[46], kOne);
+          lstep2[47] = _mm256_madd_epi16(lstep2[47], kOne);
+          lstep2[48] = _mm256_madd_epi16(lstep2[48], kOne);
+          lstep2[49] = _mm256_madd_epi16(lstep2[49], kOne);
+          lstep2[50] = _mm256_madd_epi16(lstep2[50], kOne);
+          lstep2[51] = _mm256_madd_epi16(lstep2[51], kOne);
+          lstep2[52] = _mm256_madd_epi16(lstep2[52], kOne);
+          lstep2[53] = _mm256_madd_epi16(lstep2[53], kOne);
+          lstep2[54] = _mm256_madd_epi16(lstep2[54], kOne);
+          lstep2[55] = _mm256_madd_epi16(lstep2[55], kOne);
+
+          lstep1[32] = _mm256_unpacklo_epi16(step1[16], kZero);
+          lstep1[33] = _mm256_unpackhi_epi16(step1[16], kZero);
+          lstep1[34] = _mm256_unpacklo_epi16(step1[17], kZero);
+          lstep1[35] = _mm256_unpackhi_epi16(step1[17], kZero);
+          lstep1[36] = _mm256_unpacklo_epi16(step1[18], kZero);
+          lstep1[37] = _mm256_unpackhi_epi16(step1[18], kZero);
+          lstep1[38] = _mm256_unpacklo_epi16(step1[19], kZero);
+          lstep1[39] = _mm256_unpackhi_epi16(step1[19], kZero);
+          lstep1[56] = _mm256_unpacklo_epi16(step1[28], kZero);
+          lstep1[57] = _mm256_unpackhi_epi16(step1[28], kZero);
+          lstep1[58] = _mm256_unpacklo_epi16(step1[29], kZero);
+          lstep1[59] = _mm256_unpackhi_epi16(step1[29], kZero);
+          lstep1[60] = _mm256_unpacklo_epi16(step1[30], kZero);
+          lstep1[61] = _mm256_unpackhi_epi16(step1[30], kZero);
+          lstep1[62] = _mm256_unpacklo_epi16(step1[31], kZero);
+          lstep1[63] = _mm256_unpackhi_epi16(step1[31], kZero);
+          lstep1[32] = _mm256_madd_epi16(lstep1[32], kOne);
+          lstep1[33] = _mm256_madd_epi16(lstep1[33], kOne);
+          lstep1[34] = _mm256_madd_epi16(lstep1[34], kOne);
+          lstep1[35] = _mm256_madd_epi16(lstep1[35], kOne);
+          lstep1[36] = _mm256_madd_epi16(lstep1[36], kOne);
+          lstep1[37] = _mm256_madd_epi16(lstep1[37], kOne);
+          lstep1[38] = _mm256_madd_epi16(lstep1[38], kOne);
+          lstep1[39] = _mm256_madd_epi16(lstep1[39], kOne);
+          lstep1[56] = _mm256_madd_epi16(lstep1[56], kOne);
+          lstep1[57] = _mm256_madd_epi16(lstep1[57], kOne);
+          lstep1[58] = _mm256_madd_epi16(lstep1[58], kOne);
+          lstep1[59] = _mm256_madd_epi16(lstep1[59], kOne);
+          lstep1[60] = _mm256_madd_epi16(lstep1[60], kOne);
+          lstep1[61] = _mm256_madd_epi16(lstep1[61], kOne);
+          lstep1[62] = _mm256_madd_epi16(lstep1[62], kOne);
+          lstep1[63] = _mm256_madd_epi16(lstep1[63], kOne);
+
+          lstep3[32] = _mm256_add_epi32(lstep2[46], lstep1[32]);
+          lstep3[33] = _mm256_add_epi32(lstep2[47], lstep1[33]);
+
+          lstep3[34] = _mm256_add_epi32(lstep2[44], lstep1[34]);
+          lstep3[35] = _mm256_add_epi32(lstep2[45], lstep1[35]);
+          lstep3[36] = _mm256_add_epi32(lstep2[42], lstep1[36]);
+          lstep3[37] = _mm256_add_epi32(lstep2[43], lstep1[37]);
+          lstep3[38] = _mm256_add_epi32(lstep2[40], lstep1[38]);
+          lstep3[39] = _mm256_add_epi32(lstep2[41], lstep1[39]);
+          lstep3[40] = _mm256_sub_epi32(lstep1[38], lstep2[40]);
+          lstep3[41] = _mm256_sub_epi32(lstep1[39], lstep2[41]);
+          lstep3[42] = _mm256_sub_epi32(lstep1[36], lstep2[42]);
+          lstep3[43] = _mm256_sub_epi32(lstep1[37], lstep2[43]);
+          lstep3[44] = _mm256_sub_epi32(lstep1[34], lstep2[44]);
+          lstep3[45] = _mm256_sub_epi32(lstep1[35], lstep2[45]);
+          lstep3[46] = _mm256_sub_epi32(lstep1[32], lstep2[46]);
+          lstep3[47] = _mm256_sub_epi32(lstep1[33], lstep2[47]);
+          lstep3[48] = _mm256_sub_epi32(lstep1[62], lstep2[48]);
+          lstep3[49] = _mm256_sub_epi32(lstep1[63], lstep2[49]);
+          lstep3[50] = _mm256_sub_epi32(lstep1[60], lstep2[50]);
+          lstep3[51] = _mm256_sub_epi32(lstep1[61], lstep2[51]);
+          lstep3[52] = _mm256_sub_epi32(lstep1[58], lstep2[52]);
+          lstep3[53] = _mm256_sub_epi32(lstep1[59], lstep2[53]);
+          lstep3[54] = _mm256_sub_epi32(lstep1[56], lstep2[54]);
+          lstep3[55] = _mm256_sub_epi32(lstep1[57], lstep2[55]);
+          lstep3[56] = _mm256_add_epi32(lstep2[54], lstep1[56]);
+          lstep3[57] = _mm256_add_epi32(lstep2[55], lstep1[57]);
+          lstep3[58] = _mm256_add_epi32(lstep2[52], lstep1[58]);
+          lstep3[59] = _mm256_add_epi32(lstep2[53], lstep1[59]);
+          lstep3[60] = _mm256_add_epi32(lstep2[50], lstep1[60]);
+          lstep3[61] = _mm256_add_epi32(lstep2[51], lstep1[61]);
+          lstep3[62] = _mm256_add_epi32(lstep2[48], lstep1[62]);
+          lstep3[63] = _mm256_add_epi32(lstep2[49], lstep1[63]);
+        }
+
+        // stage 4
+        {
+          // expanding to 32-bit length priori to addition operations
+          lstep2[16] = _mm256_unpacklo_epi16(step2[ 8], kZero);
+          lstep2[17] = _mm256_unpackhi_epi16(step2[ 8], kZero);
+          lstep2[18] = _mm256_unpacklo_epi16(step2[ 9], kZero);
+          lstep2[19] = _mm256_unpackhi_epi16(step2[ 9], kZero);
+          lstep2[28] = _mm256_unpacklo_epi16(step2[14], kZero);
+          lstep2[29] = _mm256_unpackhi_epi16(step2[14], kZero);
+          lstep2[30] = _mm256_unpacklo_epi16(step2[15], kZero);
+          lstep2[31] = _mm256_unpackhi_epi16(step2[15], kZero);
+          lstep2[16] = _mm256_madd_epi16(lstep2[16], kOne);
+          lstep2[17] = _mm256_madd_epi16(lstep2[17], kOne);
+          lstep2[18] = _mm256_madd_epi16(lstep2[18], kOne);
+          lstep2[19] = _mm256_madd_epi16(lstep2[19], kOne);
+          lstep2[28] = _mm256_madd_epi16(lstep2[28], kOne);
+          lstep2[29] = _mm256_madd_epi16(lstep2[29], kOne);
+          lstep2[30] = _mm256_madd_epi16(lstep2[30], kOne);
+          lstep2[31] = _mm256_madd_epi16(lstep2[31], kOne);
+
+          lstep1[ 0] = _mm256_add_epi32(lstep3[ 6], lstep3[ 0]);
+          lstep1[ 1] = _mm256_add_epi32(lstep3[ 7], lstep3[ 1]);
+          lstep1[ 2] = _mm256_add_epi32(lstep3[ 4], lstep3[ 2]);
+          lstep1[ 3] = _mm256_add_epi32(lstep3[ 5], lstep3[ 3]);
+          lstep1[ 4] = _mm256_sub_epi32(lstep3[ 2], lstep3[ 4]);
+          lstep1[ 5] = _mm256_sub_epi32(lstep3[ 3], lstep3[ 5]);
+          lstep1[ 6] = _mm256_sub_epi32(lstep3[ 0], lstep3[ 6]);
+          lstep1[ 7] = _mm256_sub_epi32(lstep3[ 1], lstep3[ 7]);
+          lstep1[16] = _mm256_add_epi32(lstep3[22], lstep2[16]);
+          lstep1[17] = _mm256_add_epi32(lstep3[23], lstep2[17]);
+          lstep1[18] = _mm256_add_epi32(lstep3[20], lstep2[18]);
+          lstep1[19] = _mm256_add_epi32(lstep3[21], lstep2[19]);
+          lstep1[20] = _mm256_sub_epi32(lstep2[18], lstep3[20]);
+          lstep1[21] = _mm256_sub_epi32(lstep2[19], lstep3[21]);
+          lstep1[22] = _mm256_sub_epi32(lstep2[16], lstep3[22]);
+          lstep1[23] = _mm256_sub_epi32(lstep2[17], lstep3[23]);
+          lstep1[24] = _mm256_sub_epi32(lstep2[30], lstep3[24]);
+          lstep1[25] = _mm256_sub_epi32(lstep2[31], lstep3[25]);
+          lstep1[26] = _mm256_sub_epi32(lstep2[28], lstep3[26]);
+          lstep1[27] = _mm256_sub_epi32(lstep2[29], lstep3[27]);
+          lstep1[28] = _mm256_add_epi32(lstep3[26], lstep2[28]);
+          lstep1[29] = _mm256_add_epi32(lstep3[27], lstep2[29]);
+          lstep1[30] = _mm256_add_epi32(lstep3[24], lstep2[30]);
+          lstep1[31] = _mm256_add_epi32(lstep3[25], lstep2[31]);
+        }
+        {
+        // to be continued...
+        //
+        const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64);
+        const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64);
+
+        u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]);
+        u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]);
+        u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]);
+        u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]);
+
+        // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
+        // instruction latency.
+        v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_m16);
+        v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_m16);
+        v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_m16);
+        v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_m16);
+        v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_p16);
+        v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_p16);
+        v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_p16);
+        v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_p16);
+
+        u[0] = k_packs_epi64_avx2(v[0], v[1]);
+        u[1] = k_packs_epi64_avx2(v[2], v[3]);
+        u[2] = k_packs_epi64_avx2(v[4], v[5]);
+        u[3] = k_packs_epi64_avx2(v[6], v[7]);
+
+        v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+        v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+        v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+        v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+
+        lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+        lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+        lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+        lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+        }
+        {
+          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
+          const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64);
+          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
+
+          u[ 0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]);
+          u[ 1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]);
+          u[ 2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]);
+          u[ 3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]);
+          u[ 4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]);
+          u[ 5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]);
+          u[ 6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]);
+          u[ 7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]);
+          u[ 8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]);
+          u[ 9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]);
+          u[10] = _mm256_unpacklo_epi32(lstep3[41], lstep3[55]);
+          u[11] = _mm256_unpackhi_epi32(lstep3[41], lstep3[55]);
+          u[12] = _mm256_unpacklo_epi32(lstep3[42], lstep3[52]);
+          u[13] = _mm256_unpackhi_epi32(lstep3[42], lstep3[52]);
+          u[14] = _mm256_unpacklo_epi32(lstep3[43], lstep3[53]);
+          u[15] = _mm256_unpackhi_epi32(lstep3[43], lstep3[53]);
+
+          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m08_p24);
+          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m08_p24);
+          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m08_p24);
+          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m08_p24);
+          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m08_p24);
+          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m08_p24);
+          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m08_p24);
+          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m08_p24);
+          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m24_m08);
+          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m24_m08);
+          v[10] = k_madd_epi32_avx2(u[10], k32_m24_m08);
+          v[11] = k_madd_epi32_avx2(u[11], k32_m24_m08);
+          v[12] = k_madd_epi32_avx2(u[12], k32_m24_m08);
+          v[13] = k_madd_epi32_avx2(u[13], k32_m24_m08);
+          v[14] = k_madd_epi32_avx2(u[14], k32_m24_m08);
+          v[15] = k_madd_epi32_avx2(u[15], k32_m24_m08);
+          v[16] = k_madd_epi32_avx2(u[12], k32_m08_p24);
+          v[17] = k_madd_epi32_avx2(u[13], k32_m08_p24);
+          v[18] = k_madd_epi32_avx2(u[14], k32_m08_p24);
+          v[19] = k_madd_epi32_avx2(u[15], k32_m08_p24);
+          v[20] = k_madd_epi32_avx2(u[ 8], k32_m08_p24);
+          v[21] = k_madd_epi32_avx2(u[ 9], k32_m08_p24);
+          v[22] = k_madd_epi32_avx2(u[10], k32_m08_p24);
+          v[23] = k_madd_epi32_avx2(u[11], k32_m08_p24);
+          v[24] = k_madd_epi32_avx2(u[ 4], k32_p24_p08);
+          v[25] = k_madd_epi32_avx2(u[ 5], k32_p24_p08);
+          v[26] = k_madd_epi32_avx2(u[ 6], k32_p24_p08);
+          v[27] = k_madd_epi32_avx2(u[ 7], k32_p24_p08);
+          v[28] = k_madd_epi32_avx2(u[ 0], k32_p24_p08);
+          v[29] = k_madd_epi32_avx2(u[ 1], k32_p24_p08);
+          v[30] = k_madd_epi32_avx2(u[ 2], k32_p24_p08);
+          v[31] = k_madd_epi32_avx2(u[ 3], k32_p24_p08);
+
+          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          u[10] = k_packs_epi64_avx2(v[20], v[21]);
+          u[11] = k_packs_epi64_avx2(v[22], v[23]);
+          u[12] = k_packs_epi64_avx2(v[24], v[25]);
+          u[13] = k_packs_epi64_avx2(v[26], v[27]);
+          u[14] = k_packs_epi64_avx2(v[28], v[29]);
+          u[15] = k_packs_epi64_avx2(v[30], v[31]);
+
+          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          lstep1[36] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
+          lstep1[37] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
+          lstep1[38] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
+          lstep1[39] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
+          lstep1[40] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
+          lstep1[41] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
+          lstep1[42] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
+          lstep1[43] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
+          lstep1[52] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
+          lstep1[53] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep1[54] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
+          lstep1[55] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
+          lstep1[56] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
+          lstep1[57] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
+          lstep1[58] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
+          lstep1[59] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
+        }
+        // stage 5
+        {
+          lstep2[ 8] = _mm256_add_epi32(lstep1[10], lstep3[ 8]);
+          lstep2[ 9] = _mm256_add_epi32(lstep1[11], lstep3[ 9]);
+          lstep2[10] = _mm256_sub_epi32(lstep3[ 8], lstep1[10]);
+          lstep2[11] = _mm256_sub_epi32(lstep3[ 9], lstep1[11]);
+          lstep2[12] = _mm256_sub_epi32(lstep3[14], lstep1[12]);
+          lstep2[13] = _mm256_sub_epi32(lstep3[15], lstep1[13]);
+          lstep2[14] = _mm256_add_epi32(lstep1[12], lstep3[14]);
+          lstep2[15] = _mm256_add_epi32(lstep1[13], lstep3[15]);
+        }
+        {
+          const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64);
+          const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64);
+          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
+          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep1[0], lstep1[2]);
+          u[1] = _mm256_unpackhi_epi32(lstep1[0], lstep1[2]);
+          u[2] = _mm256_unpacklo_epi32(lstep1[1], lstep1[3]);
+          u[3] = _mm256_unpackhi_epi32(lstep1[1], lstep1[3]);
+          u[4] = _mm256_unpacklo_epi32(lstep1[4], lstep1[6]);
+          u[5] = _mm256_unpackhi_epi32(lstep1[4], lstep1[6]);
+          u[6] = _mm256_unpacklo_epi32(lstep1[5], lstep1[7]);
+          u[7] = _mm256_unpackhi_epi32(lstep1[5], lstep1[7]);
+
+          // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
+          // instruction latency.
+          v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_p16);
+          v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_p16);
+          v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_p16);
+          v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_p16);
+          v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_m16);
+          v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_m16);
+          v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_m16);
+          v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_m16);
+          v[ 8] = k_madd_epi32_avx2(u[4], k32_p24_p08);
+          v[ 9] = k_madd_epi32_avx2(u[5], k32_p24_p08);
+          v[10] = k_madd_epi32_avx2(u[6], k32_p24_p08);
+          v[11] = k_madd_epi32_avx2(u[7], k32_p24_p08);
+          v[12] = k_madd_epi32_avx2(u[4], k32_m08_p24);
+          v[13] = k_madd_epi32_avx2(u[5], k32_m08_p24);
+          v[14] = k_madd_epi32_avx2(u[6], k32_m08_p24);
+          v[15] = k_madd_epi32_avx2(u[7], k32_m08_p24);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+          u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
+
+          sign[0] = _mm256_cmpgt_epi32(kZero,u[0]);
+          sign[1] = _mm256_cmpgt_epi32(kZero,u[1]);
+          sign[2] = _mm256_cmpgt_epi32(kZero,u[2]);
+          sign[3] = _mm256_cmpgt_epi32(kZero,u[3]);
+          sign[4] = _mm256_cmpgt_epi32(kZero,u[4]);
+          sign[5] = _mm256_cmpgt_epi32(kZero,u[5]);
+          sign[6] = _mm256_cmpgt_epi32(kZero,u[6]);
+          sign[7] = _mm256_cmpgt_epi32(kZero,u[7]);
+
+          u[0] = _mm256_sub_epi32(u[0], sign[0]);
+          u[1] = _mm256_sub_epi32(u[1], sign[1]);
+          u[2] = _mm256_sub_epi32(u[2], sign[2]);
+          u[3] = _mm256_sub_epi32(u[3], sign[3]);
+          u[4] = _mm256_sub_epi32(u[4], sign[4]);
+          u[5] = _mm256_sub_epi32(u[5], sign[5]);
+          u[6] = _mm256_sub_epi32(u[6], sign[6]);
+          u[7] = _mm256_sub_epi32(u[7], sign[7]);
+
+          u[0] = _mm256_add_epi32(u[0], K32One);
+          u[1] = _mm256_add_epi32(u[1], K32One);
+          u[2] = _mm256_add_epi32(u[2], K32One);
+          u[3] = _mm256_add_epi32(u[3], K32One);
+          u[4] = _mm256_add_epi32(u[4], K32One);
+          u[5] = _mm256_add_epi32(u[5], K32One);
+          u[6] = _mm256_add_epi32(u[6], K32One);
+          u[7] = _mm256_add_epi32(u[7], K32One);
+
+          u[0] = _mm256_srai_epi32(u[0], 2);
+          u[1] = _mm256_srai_epi32(u[1], 2);
+          u[2] = _mm256_srai_epi32(u[2], 2);
+          u[3] = _mm256_srai_epi32(u[3], 2);
+          u[4] = _mm256_srai_epi32(u[4], 2);
+          u[5] = _mm256_srai_epi32(u[5], 2);
+          u[6] = _mm256_srai_epi32(u[6], 2);
+          u[7] = _mm256_srai_epi32(u[7], 2);
+
+          // Combine
+          out[ 0] = _mm256_packs_epi32(u[0], u[1]);
+          out[16] = _mm256_packs_epi32(u[2], u[3]);
+          out[ 8] = _mm256_packs_epi32(u[4], u[5]);
+          out[24] = _mm256_packs_epi32(u[6], u[7]);
+        }
+        {
+          const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64);
+          const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64);
+          const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep1[18], lstep1[28]);
+          u[1] = _mm256_unpackhi_epi32(lstep1[18], lstep1[28]);
+          u[2] = _mm256_unpacklo_epi32(lstep1[19], lstep1[29]);
+          u[3] = _mm256_unpackhi_epi32(lstep1[19], lstep1[29]);
+          u[4] = _mm256_unpacklo_epi32(lstep1[20], lstep1[26]);
+          u[5] = _mm256_unpackhi_epi32(lstep1[20], lstep1[26]);
+          u[6] = _mm256_unpacklo_epi32(lstep1[21], lstep1[27]);
+          u[7] = _mm256_unpackhi_epi32(lstep1[21], lstep1[27]);
+
+          v[0] = k_madd_epi32_avx2(u[0], k32_m08_p24);
+          v[1] = k_madd_epi32_avx2(u[1], k32_m08_p24);
+          v[2] = k_madd_epi32_avx2(u[2], k32_m08_p24);
+          v[3] = k_madd_epi32_avx2(u[3], k32_m08_p24);
+          v[4] = k_madd_epi32_avx2(u[4], k32_m24_m08);
+          v[5] = k_madd_epi32_avx2(u[5], k32_m24_m08);
+          v[6] = k_madd_epi32_avx2(u[6], k32_m24_m08);
+          v[7] = k_madd_epi32_avx2(u[7], k32_m24_m08);
+          v[ 8] = k_madd_epi32_avx2(u[4], k32_m08_p24);
+          v[ 9] = k_madd_epi32_avx2(u[5], k32_m08_p24);
+          v[10] = k_madd_epi32_avx2(u[6], k32_m08_p24);
+          v[11] = k_madd_epi32_avx2(u[7], k32_m08_p24);
+          v[12] = k_madd_epi32_avx2(u[0], k32_p24_p08);
+          v[13] = k_madd_epi32_avx2(u[1], k32_p24_p08);
+          v[14] = k_madd_epi32_avx2(u[2], k32_p24_p08);
+          v[15] = k_madd_epi32_avx2(u[3], k32_p24_p08);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+
+          u[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          u[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          u[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          u[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          u[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          u[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          u[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          u[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+          lstep2[18] = _mm256_srai_epi32(u[0], DCT_CONST_BITS);
+          lstep2[19] = _mm256_srai_epi32(u[1], DCT_CONST_BITS);
+          lstep2[20] = _mm256_srai_epi32(u[2], DCT_CONST_BITS);
+          lstep2[21] = _mm256_srai_epi32(u[3], DCT_CONST_BITS);
+          lstep2[26] = _mm256_srai_epi32(u[4], DCT_CONST_BITS);
+          lstep2[27] = _mm256_srai_epi32(u[5], DCT_CONST_BITS);
+          lstep2[28] = _mm256_srai_epi32(u[6], DCT_CONST_BITS);
+          lstep2[29] = _mm256_srai_epi32(u[7], DCT_CONST_BITS);
+        }
+        {
+          lstep2[32] = _mm256_add_epi32(lstep1[38], lstep3[32]);
+          lstep2[33] = _mm256_add_epi32(lstep1[39], lstep3[33]);
+          lstep2[34] = _mm256_add_epi32(lstep1[36], lstep3[34]);
+          lstep2[35] = _mm256_add_epi32(lstep1[37], lstep3[35]);
+          lstep2[36] = _mm256_sub_epi32(lstep3[34], lstep1[36]);
+          lstep2[37] = _mm256_sub_epi32(lstep3[35], lstep1[37]);
+          lstep2[38] = _mm256_sub_epi32(lstep3[32], lstep1[38]);
+          lstep2[39] = _mm256_sub_epi32(lstep3[33], lstep1[39]);
+          lstep2[40] = _mm256_sub_epi32(lstep3[46], lstep1[40]);
+          lstep2[41] = _mm256_sub_epi32(lstep3[47], lstep1[41]);
+          lstep2[42] = _mm256_sub_epi32(lstep3[44], lstep1[42]);
+          lstep2[43] = _mm256_sub_epi32(lstep3[45], lstep1[43]);
+          lstep2[44] = _mm256_add_epi32(lstep1[42], lstep3[44]);
+          lstep2[45] = _mm256_add_epi32(lstep1[43], lstep3[45]);
+          lstep2[46] = _mm256_add_epi32(lstep1[40], lstep3[46]);
+          lstep2[47] = _mm256_add_epi32(lstep1[41], lstep3[47]);
+          lstep2[48] = _mm256_add_epi32(lstep1[54], lstep3[48]);
+          lstep2[49] = _mm256_add_epi32(lstep1[55], lstep3[49]);
+          lstep2[50] = _mm256_add_epi32(lstep1[52], lstep3[50]);
+          lstep2[51] = _mm256_add_epi32(lstep1[53], lstep3[51]);
+          lstep2[52] = _mm256_sub_epi32(lstep3[50], lstep1[52]);
+          lstep2[53] = _mm256_sub_epi32(lstep3[51], lstep1[53]);
+          lstep2[54] = _mm256_sub_epi32(lstep3[48], lstep1[54]);
+          lstep2[55] = _mm256_sub_epi32(lstep3[49], lstep1[55]);
+          lstep2[56] = _mm256_sub_epi32(lstep3[62], lstep1[56]);
+          lstep2[57] = _mm256_sub_epi32(lstep3[63], lstep1[57]);
+          lstep2[58] = _mm256_sub_epi32(lstep3[60], lstep1[58]);
+          lstep2[59] = _mm256_sub_epi32(lstep3[61], lstep1[59]);
+          lstep2[60] = _mm256_add_epi32(lstep1[58], lstep3[60]);
+          lstep2[61] = _mm256_add_epi32(lstep1[59], lstep3[61]);
+          lstep2[62] = _mm256_add_epi32(lstep1[56], lstep3[62]);
+          lstep2[63] = _mm256_add_epi32(lstep1[57], lstep3[63]);
+        }
+        // stage 6
+        {
+          const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64);
+          const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64);
+          const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64);
+          const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64);
+
+          u[0] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]);
+          u[1] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]);
+          u[2] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]);
+          u[3] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          u[4] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]);
+          u[5] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
+          u[6] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
+          u[7] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]);
+          u[8] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]);
+          u[9] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
+          u[10] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
+          u[11] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]);
+          u[12] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]);
+          u[13] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]);
+          u[14] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]);
+          u[15] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+
+          v[0] = k_madd_epi32_avx2(u[0], k32_p28_p04);
+          v[1] = k_madd_epi32_avx2(u[1], k32_p28_p04);
+          v[2] = k_madd_epi32_avx2(u[2], k32_p28_p04);
+          v[3] = k_madd_epi32_avx2(u[3], k32_p28_p04);
+          v[4] = k_madd_epi32_avx2(u[4], k32_p12_p20);
+          v[5] = k_madd_epi32_avx2(u[5], k32_p12_p20);
+          v[6] = k_madd_epi32_avx2(u[6], k32_p12_p20);
+          v[7] = k_madd_epi32_avx2(u[7], k32_p12_p20);
+          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12);
+          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12);
+          v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
+          v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
+          v[12] = k_madd_epi32_avx2(u[12], k32_m04_p28);
+          v[13] = k_madd_epi32_avx2(u[13], k32_m04_p28);
+          v[14] = k_madd_epi32_avx2(u[14], k32_m04_p28);
+          v[15] = k_madd_epi32_avx2(u[15], k32_m04_p28);
+
+          u[0] = k_packs_epi64_avx2(v[0], v[1]);
+          u[1] = k_packs_epi64_avx2(v[2], v[3]);
+          u[2] = k_packs_epi64_avx2(v[4], v[5]);
+          u[3] = k_packs_epi64_avx2(v[6], v[7]);
+          u[4] = k_packs_epi64_avx2(v[8], v[9]);
+          u[5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[7] = k_packs_epi64_avx2(v[14], v[15]);
+
+          v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+          u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
+
+          sign[0] = _mm256_cmpgt_epi32(kZero,u[0]);
+          sign[1] = _mm256_cmpgt_epi32(kZero,u[1]);
+          sign[2] = _mm256_cmpgt_epi32(kZero,u[2]);
+          sign[3] = _mm256_cmpgt_epi32(kZero,u[3]);
+          sign[4] = _mm256_cmpgt_epi32(kZero,u[4]);
+          sign[5] = _mm256_cmpgt_epi32(kZero,u[5]);
+          sign[6] = _mm256_cmpgt_epi32(kZero,u[6]);
+          sign[7] = _mm256_cmpgt_epi32(kZero,u[7]);
+
+          u[0] = _mm256_sub_epi32(u[0], sign[0]);
+          u[1] = _mm256_sub_epi32(u[1], sign[1]);
+          u[2] = _mm256_sub_epi32(u[2], sign[2]);
+          u[3] = _mm256_sub_epi32(u[3], sign[3]);
+          u[4] = _mm256_sub_epi32(u[4], sign[4]);
+          u[5] = _mm256_sub_epi32(u[5], sign[5]);
+          u[6] = _mm256_sub_epi32(u[6], sign[6]);
+          u[7] = _mm256_sub_epi32(u[7], sign[7]);
+
+          u[0] = _mm256_add_epi32(u[0], K32One);
+          u[1] = _mm256_add_epi32(u[1], K32One);
+          u[2] = _mm256_add_epi32(u[2], K32One);
+          u[3] = _mm256_add_epi32(u[3], K32One);
+          u[4] = _mm256_add_epi32(u[4], K32One);
+          u[5] = _mm256_add_epi32(u[5], K32One);
+          u[6] = _mm256_add_epi32(u[6], K32One);
+          u[7] = _mm256_add_epi32(u[7], K32One);
+
+          u[0] = _mm256_srai_epi32(u[0], 2);
+          u[1] = _mm256_srai_epi32(u[1], 2);
+          u[2] = _mm256_srai_epi32(u[2], 2);
+          u[3] = _mm256_srai_epi32(u[3], 2);
+          u[4] = _mm256_srai_epi32(u[4], 2);
+          u[5] = _mm256_srai_epi32(u[5], 2);
+          u[6] = _mm256_srai_epi32(u[6], 2);
+          u[7] = _mm256_srai_epi32(u[7], 2);
+
+          out[ 4] = _mm256_packs_epi32(u[0], u[1]);
+          out[20] = _mm256_packs_epi32(u[2], u[3]);
+          out[12] = _mm256_packs_epi32(u[4], u[5]);
+          out[28] = _mm256_packs_epi32(u[6], u[7]);
+        }
+        {
+          lstep3[16] = _mm256_add_epi32(lstep2[18], lstep1[16]);
+          lstep3[17] = _mm256_add_epi32(lstep2[19], lstep1[17]);
+          lstep3[18] = _mm256_sub_epi32(lstep1[16], lstep2[18]);
+          lstep3[19] = _mm256_sub_epi32(lstep1[17], lstep2[19]);
+          lstep3[20] = _mm256_sub_epi32(lstep1[22], lstep2[20]);
+          lstep3[21] = _mm256_sub_epi32(lstep1[23], lstep2[21]);
+          lstep3[22] = _mm256_add_epi32(lstep2[20], lstep1[22]);
+          lstep3[23] = _mm256_add_epi32(lstep2[21], lstep1[23]);
+          lstep3[24] = _mm256_add_epi32(lstep2[26], lstep1[24]);
+          lstep3[25] = _mm256_add_epi32(lstep2[27], lstep1[25]);
+          lstep3[26] = _mm256_sub_epi32(lstep1[24], lstep2[26]);
+          lstep3[27] = _mm256_sub_epi32(lstep1[25], lstep2[27]);
+          lstep3[28] = _mm256_sub_epi32(lstep1[30], lstep2[28]);
+          lstep3[29] = _mm256_sub_epi32(lstep1[31], lstep2[29]);
+          lstep3[30] = _mm256_add_epi32(lstep2[28], lstep1[30]);
+          lstep3[31] = _mm256_add_epi32(lstep2[29], lstep1[31]);
+        }
+        {
+          const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64);
+          const __m256i k32_m28_m04 = pair256_set_epi32(-cospi_28_64, -cospi_4_64);
+          const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64);
+          const __m256i k32_m12_m20 = pair256_set_epi32(-cospi_12_64,
+                                                     -cospi_20_64);
+          const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64);
+          const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64);
+
+          u[ 0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]);
+          u[ 1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]);
+          u[ 2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]);
+          u[ 3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]);
+          u[ 4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]);
+          u[ 5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]);
+          u[ 6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]);
+          u[ 7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]);
+          u[ 8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]);
+          u[ 9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]);
+          u[10] = _mm256_unpacklo_epi32(lstep2[43], lstep2[53]);
+          u[11] = _mm256_unpackhi_epi32(lstep2[43], lstep2[53]);
+          u[12] = _mm256_unpacklo_epi32(lstep2[44], lstep2[50]);
+          u[13] = _mm256_unpackhi_epi32(lstep2[44], lstep2[50]);
+          u[14] = _mm256_unpacklo_epi32(lstep2[45], lstep2[51]);
+          u[15] = _mm256_unpackhi_epi32(lstep2[45], lstep2[51]);
+
+          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m04_p28);
+          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m04_p28);
+          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m04_p28);
+          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m04_p28);
+          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m28_m04);
+          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m28_m04);
+          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m28_m04);
+          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m28_m04);
+          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12);
+          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12);
+          v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
+          v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
+          v[12] = k_madd_epi32_avx2(u[12], k32_m12_m20);
+          v[13] = k_madd_epi32_avx2(u[13], k32_m12_m20);
+          v[14] = k_madd_epi32_avx2(u[14], k32_m12_m20);
+          v[15] = k_madd_epi32_avx2(u[15], k32_m12_m20);
+          v[16] = k_madd_epi32_avx2(u[12], k32_m20_p12);
+          v[17] = k_madd_epi32_avx2(u[13], k32_m20_p12);
+          v[18] = k_madd_epi32_avx2(u[14], k32_m20_p12);
+          v[19] = k_madd_epi32_avx2(u[15], k32_m20_p12);
+          v[20] = k_madd_epi32_avx2(u[ 8], k32_p12_p20);
+          v[21] = k_madd_epi32_avx2(u[ 9], k32_p12_p20);
+          v[22] = k_madd_epi32_avx2(u[10], k32_p12_p20);
+          v[23] = k_madd_epi32_avx2(u[11], k32_p12_p20);
+          v[24] = k_madd_epi32_avx2(u[ 4], k32_m04_p28);
+          v[25] = k_madd_epi32_avx2(u[ 5], k32_m04_p28);
+          v[26] = k_madd_epi32_avx2(u[ 6], k32_m04_p28);
+          v[27] = k_madd_epi32_avx2(u[ 7], k32_m04_p28);
+          v[28] = k_madd_epi32_avx2(u[ 0], k32_p28_p04);
+          v[29] = k_madd_epi32_avx2(u[ 1], k32_p28_p04);
+          v[30] = k_madd_epi32_avx2(u[ 2], k32_p28_p04);
+          v[31] = k_madd_epi32_avx2(u[ 3], k32_p28_p04);
+
+          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          u[10] = k_packs_epi64_avx2(v[20], v[21]);
+          u[11] = k_packs_epi64_avx2(v[22], v[23]);
+          u[12] = k_packs_epi64_avx2(v[24], v[25]);
+          u[13] = k_packs_epi64_avx2(v[26], v[27]);
+          u[14] = k_packs_epi64_avx2(v[28], v[29]);
+          u[15] = k_packs_epi64_avx2(v[30], v[31]);
+
+          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          lstep3[34] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
+          lstep3[35] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
+          lstep3[36] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
+          lstep3[37] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
+          lstep3[42] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
+          lstep3[43] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
+          lstep3[44] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
+          lstep3[45] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
+          lstep3[50] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
+          lstep3[51] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep3[52] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
+          lstep3[53] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
+          lstep3[58] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
+          lstep3[59] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
+          lstep3[60] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
+          lstep3[61] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
+        }
+        // stage 7
+        {
+          const __m256i k32_p30_p02 = pair256_set_epi32(cospi_30_64, cospi_2_64);
+          const __m256i k32_p14_p18 = pair256_set_epi32(cospi_14_64, cospi_18_64);
+          const __m256i k32_p22_p10 = pair256_set_epi32(cospi_22_64, cospi_10_64);
+          const __m256i k32_p06_p26 = pair256_set_epi32(cospi_6_64,  cospi_26_64);
+          const __m256i k32_m26_p06 = pair256_set_epi32(-cospi_26_64, cospi_6_64);
+          const __m256i k32_m10_p22 = pair256_set_epi32(-cospi_10_64, cospi_22_64);
+          const __m256i k32_m18_p14 = pair256_set_epi32(-cospi_18_64, cospi_14_64);
+          const __m256i k32_m02_p30 = pair256_set_epi32(-cospi_2_64, cospi_30_64);
+
+          u[ 0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]);
+          u[ 1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]);
+          u[ 2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]);
+          u[ 3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]);
+          u[ 4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]);
+          u[ 5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]);
+          u[ 6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]);
+          u[ 7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]);
+          u[ 8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]);
+          u[ 9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]);
+          u[10] = _mm256_unpacklo_epi32(lstep3[21], lstep3[27]);
+          u[11] = _mm256_unpackhi_epi32(lstep3[21], lstep3[27]);
+          u[12] = _mm256_unpacklo_epi32(lstep3[22], lstep3[24]);
+          u[13] = _mm256_unpackhi_epi32(lstep3[22], lstep3[24]);
+          u[14] = _mm256_unpacklo_epi32(lstep3[23], lstep3[25]);
+          u[15] = _mm256_unpackhi_epi32(lstep3[23], lstep3[25]);
+
+          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p30_p02);
+          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p30_p02);
+          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p30_p02);
+          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p30_p02);
+          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p14_p18);
+          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p14_p18);
+          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p14_p18);
+          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p14_p18);
+          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p22_p10);
+          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p22_p10);
+          v[10] = k_madd_epi32_avx2(u[10], k32_p22_p10);
+          v[11] = k_madd_epi32_avx2(u[11], k32_p22_p10);
+          v[12] = k_madd_epi32_avx2(u[12], k32_p06_p26);
+          v[13] = k_madd_epi32_avx2(u[13], k32_p06_p26);
+          v[14] = k_madd_epi32_avx2(u[14], k32_p06_p26);
+          v[15] = k_madd_epi32_avx2(u[15], k32_p06_p26);
+          v[16] = k_madd_epi32_avx2(u[12], k32_m26_p06);
+          v[17] = k_madd_epi32_avx2(u[13], k32_m26_p06);
+          v[18] = k_madd_epi32_avx2(u[14], k32_m26_p06);
+          v[19] = k_madd_epi32_avx2(u[15], k32_m26_p06);
+          v[20] = k_madd_epi32_avx2(u[ 8], k32_m10_p22);
+          v[21] = k_madd_epi32_avx2(u[ 9], k32_m10_p22);
+          v[22] = k_madd_epi32_avx2(u[10], k32_m10_p22);
+          v[23] = k_madd_epi32_avx2(u[11], k32_m10_p22);
+          v[24] = k_madd_epi32_avx2(u[ 4], k32_m18_p14);
+          v[25] = k_madd_epi32_avx2(u[ 5], k32_m18_p14);
+          v[26] = k_madd_epi32_avx2(u[ 6], k32_m18_p14);
+          v[27] = k_madd_epi32_avx2(u[ 7], k32_m18_p14);
+          v[28] = k_madd_epi32_avx2(u[ 0], k32_m02_p30);
+          v[29] = k_madd_epi32_avx2(u[ 1], k32_m02_p30);
+          v[30] = k_madd_epi32_avx2(u[ 2], k32_m02_p30);
+          v[31] = k_madd_epi32_avx2(u[ 3], k32_m02_p30);
+
+          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          u[10] = k_packs_epi64_avx2(v[20], v[21]);
+          u[11] = k_packs_epi64_avx2(v[22], v[23]);
+          u[12] = k_packs_epi64_avx2(v[24], v[25]);
+          u[13] = k_packs_epi64_avx2(v[26], v[27]);
+          u[14] = k_packs_epi64_avx2(v[28], v[29]);
+          u[15] = k_packs_epi64_avx2(v[30], v[31]);
+
+          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
+          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
+          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
+          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
+          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
+          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
+          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
+          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
+          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
+          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
+          u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
+          u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
+          u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
+          u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
+          u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
+
+          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
+          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
+          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
+          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
+          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
+          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
+          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
+          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
+          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
+          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
+          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
+          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
+          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
+          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
+          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
+          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
+
+          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
+          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
+          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
+          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
+          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
+          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
+          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
+          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
+          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
+          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
+          u[10] = _mm256_sub_epi32(u[10], v[10]);
+          u[11] = _mm256_sub_epi32(u[11], v[11]);
+          u[12] = _mm256_sub_epi32(u[12], v[12]);
+          u[13] = _mm256_sub_epi32(u[13], v[13]);
+          u[14] = _mm256_sub_epi32(u[14], v[14]);
+          u[15] = _mm256_sub_epi32(u[15], v[15]);
+
+          v[ 0] = _mm256_add_epi32(u[ 0], K32One);
+          v[ 1] = _mm256_add_epi32(u[ 1], K32One);
+          v[ 2] = _mm256_add_epi32(u[ 2], K32One);
+          v[ 3] = _mm256_add_epi32(u[ 3], K32One);
+          v[ 4] = _mm256_add_epi32(u[ 4], K32One);
+          v[ 5] = _mm256_add_epi32(u[ 5], K32One);
+          v[ 6] = _mm256_add_epi32(u[ 6], K32One);
+          v[ 7] = _mm256_add_epi32(u[ 7], K32One);
+          v[ 8] = _mm256_add_epi32(u[ 8], K32One);
+          v[ 9] = _mm256_add_epi32(u[ 9], K32One);
+          v[10] = _mm256_add_epi32(u[10], K32One);
+          v[11] = _mm256_add_epi32(u[11], K32One);
+          v[12] = _mm256_add_epi32(u[12], K32One);
+          v[13] = _mm256_add_epi32(u[13], K32One);
+          v[14] = _mm256_add_epi32(u[14], K32One);
+          v[15] = _mm256_add_epi32(u[15], K32One);
+
+          u[ 0] = _mm256_srai_epi32(v[ 0], 2);
+          u[ 1] = _mm256_srai_epi32(v[ 1], 2);
+          u[ 2] = _mm256_srai_epi32(v[ 2], 2);
+          u[ 3] = _mm256_srai_epi32(v[ 3], 2);
+          u[ 4] = _mm256_srai_epi32(v[ 4], 2);
+          u[ 5] = _mm256_srai_epi32(v[ 5], 2);
+          u[ 6] = _mm256_srai_epi32(v[ 6], 2);
+          u[ 7] = _mm256_srai_epi32(v[ 7], 2);
+          u[ 8] = _mm256_srai_epi32(v[ 8], 2);
+          u[ 9] = _mm256_srai_epi32(v[ 9], 2);
+          u[10] = _mm256_srai_epi32(v[10], 2);
+          u[11] = _mm256_srai_epi32(v[11], 2);
+          u[12] = _mm256_srai_epi32(v[12], 2);
+          u[13] = _mm256_srai_epi32(v[13], 2);
+          u[14] = _mm256_srai_epi32(v[14], 2);
+          u[15] = _mm256_srai_epi32(v[15], 2);
+
+          out[ 2] = _mm256_packs_epi32(u[0], u[1]);
+          out[18] = _mm256_packs_epi32(u[2], u[3]);
+          out[10] = _mm256_packs_epi32(u[4], u[5]);
+          out[26] = _mm256_packs_epi32(u[6], u[7]);
+          out[ 6] = _mm256_packs_epi32(u[8], u[9]);
+          out[22] = _mm256_packs_epi32(u[10], u[11]);
+          out[14] = _mm256_packs_epi32(u[12], u[13]);
+          out[30] = _mm256_packs_epi32(u[14], u[15]);
+        }
+        {
+          lstep1[32] = _mm256_add_epi32(lstep3[34], lstep2[32]);
+          lstep1[33] = _mm256_add_epi32(lstep3[35], lstep2[33]);
+          lstep1[34] = _mm256_sub_epi32(lstep2[32], lstep3[34]);
+          lstep1[35] = _mm256_sub_epi32(lstep2[33], lstep3[35]);
+          lstep1[36] = _mm256_sub_epi32(lstep2[38], lstep3[36]);
+          lstep1[37] = _mm256_sub_epi32(lstep2[39], lstep3[37]);
+          lstep1[38] = _mm256_add_epi32(lstep3[36], lstep2[38]);
+          lstep1[39] = _mm256_add_epi32(lstep3[37], lstep2[39]);
+          lstep1[40] = _mm256_add_epi32(lstep3[42], lstep2[40]);
+          lstep1[41] = _mm256_add_epi32(lstep3[43], lstep2[41]);
+          lstep1[42] = _mm256_sub_epi32(lstep2[40], lstep3[42]);
+          lstep1[43] = _mm256_sub_epi32(lstep2[41], lstep3[43]);
+          lstep1[44] = _mm256_sub_epi32(lstep2[46], lstep3[44]);
+          lstep1[45] = _mm256_sub_epi32(lstep2[47], lstep3[45]);
+          lstep1[46] = _mm256_add_epi32(lstep3[44], lstep2[46]);
+          lstep1[47] = _mm256_add_epi32(lstep3[45], lstep2[47]);
+          lstep1[48] = _mm256_add_epi32(lstep3[50], lstep2[48]);
+          lstep1[49] = _mm256_add_epi32(lstep3[51], lstep2[49]);
+          lstep1[50] = _mm256_sub_epi32(lstep2[48], lstep3[50]);
+          lstep1[51] = _mm256_sub_epi32(lstep2[49], lstep3[51]);
+          lstep1[52] = _mm256_sub_epi32(lstep2[54], lstep3[52]);
+          lstep1[53] = _mm256_sub_epi32(lstep2[55], lstep3[53]);
+          lstep1[54] = _mm256_add_epi32(lstep3[52], lstep2[54]);
+          lstep1[55] = _mm256_add_epi32(lstep3[53], lstep2[55]);
+          lstep1[56] = _mm256_add_epi32(lstep3[58], lstep2[56]);
+          lstep1[57] = _mm256_add_epi32(lstep3[59], lstep2[57]);
+          lstep1[58] = _mm256_sub_epi32(lstep2[56], lstep3[58]);
+          lstep1[59] = _mm256_sub_epi32(lstep2[57], lstep3[59]);
+          lstep1[60] = _mm256_sub_epi32(lstep2[62], lstep3[60]);
+          lstep1[61] = _mm256_sub_epi32(lstep2[63], lstep3[61]);
+          lstep1[62] = _mm256_add_epi32(lstep3[60], lstep2[62]);
+          lstep1[63] = _mm256_add_epi32(lstep3[61], lstep2[63]);
+        }
+        // stage 8
+        {
+          const __m256i k32_p31_p01 = pair256_set_epi32(cospi_31_64, cospi_1_64);
+          const __m256i k32_p15_p17 = pair256_set_epi32(cospi_15_64, cospi_17_64);
+          const __m256i k32_p23_p09 = pair256_set_epi32(cospi_23_64, cospi_9_64);
+          const __m256i k32_p07_p25 = pair256_set_epi32(cospi_7_64, cospi_25_64);
+          const __m256i k32_m25_p07 = pair256_set_epi32(-cospi_25_64, cospi_7_64);
+          const __m256i k32_m09_p23 = pair256_set_epi32(-cospi_9_64, cospi_23_64);
+          const __m256i k32_m17_p15 = pair256_set_epi32(-cospi_17_64, cospi_15_64);
+          const __m256i k32_m01_p31 = pair256_set_epi32(-cospi_1_64, cospi_31_64);
+
+          u[ 0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]);
+          u[ 1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]);
+          u[ 2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]);
+          u[ 3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]);
+          u[ 4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]);
+          u[ 5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]);
+          u[ 6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]);
+          u[ 7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]);
+          u[ 8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]);
+          u[ 9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]);
+          u[10] = _mm256_unpacklo_epi32(lstep1[37], lstep1[59]);
+          u[11] = _mm256_unpackhi_epi32(lstep1[37], lstep1[59]);
+          u[12] = _mm256_unpacklo_epi32(lstep1[38], lstep1[56]);
+          u[13] = _mm256_unpackhi_epi32(lstep1[38], lstep1[56]);
+          u[14] = _mm256_unpacklo_epi32(lstep1[39], lstep1[57]);
+          u[15] = _mm256_unpackhi_epi32(lstep1[39], lstep1[57]);
+
+          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p31_p01);
+          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p31_p01);
+          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p31_p01);
+          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p31_p01);
+          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p15_p17);
+          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p15_p17);
+          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p15_p17);
+          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p15_p17);
+          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p23_p09);
+          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p23_p09);
+          v[10] = k_madd_epi32_avx2(u[10], k32_p23_p09);
+          v[11] = k_madd_epi32_avx2(u[11], k32_p23_p09);
+          v[12] = k_madd_epi32_avx2(u[12], k32_p07_p25);
+          v[13] = k_madd_epi32_avx2(u[13], k32_p07_p25);
+          v[14] = k_madd_epi32_avx2(u[14], k32_p07_p25);
+          v[15] = k_madd_epi32_avx2(u[15], k32_p07_p25);
+          v[16] = k_madd_epi32_avx2(u[12], k32_m25_p07);
+          v[17] = k_madd_epi32_avx2(u[13], k32_m25_p07);
+          v[18] = k_madd_epi32_avx2(u[14], k32_m25_p07);
+          v[19] = k_madd_epi32_avx2(u[15], k32_m25_p07);
+          v[20] = k_madd_epi32_avx2(u[ 8], k32_m09_p23);
+          v[21] = k_madd_epi32_avx2(u[ 9], k32_m09_p23);
+          v[22] = k_madd_epi32_avx2(u[10], k32_m09_p23);
+          v[23] = k_madd_epi32_avx2(u[11], k32_m09_p23);
+          v[24] = k_madd_epi32_avx2(u[ 4], k32_m17_p15);
+          v[25] = k_madd_epi32_avx2(u[ 5], k32_m17_p15);
+          v[26] = k_madd_epi32_avx2(u[ 6], k32_m17_p15);
+          v[27] = k_madd_epi32_avx2(u[ 7], k32_m17_p15);
+          v[28] = k_madd_epi32_avx2(u[ 0], k32_m01_p31);
+          v[29] = k_madd_epi32_avx2(u[ 1], k32_m01_p31);
+          v[30] = k_madd_epi32_avx2(u[ 2], k32_m01_p31);
+          v[31] = k_madd_epi32_avx2(u[ 3], k32_m01_p31);
+
+          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          u[10] = k_packs_epi64_avx2(v[20], v[21]);
+          u[11] = k_packs_epi64_avx2(v[22], v[23]);
+          u[12] = k_packs_epi64_avx2(v[24], v[25]);
+          u[13] = k_packs_epi64_avx2(v[26], v[27]);
+          u[14] = k_packs_epi64_avx2(v[28], v[29]);
+          u[15] = k_packs_epi64_avx2(v[30], v[31]);
+
+          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
+          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
+          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
+          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
+          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
+          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
+          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
+          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
+          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
+          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
+          u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
+          u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
+          u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
+          u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
+          u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
+
+          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
+          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
+          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
+          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
+          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
+          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
+          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
+          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
+          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
+          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
+          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
+          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
+          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
+          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
+          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
+          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
+
+          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
+          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
+          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
+          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
+          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
+          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
+          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
+          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
+          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
+          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
+          u[10] = _mm256_sub_epi32(u[10], v[10]);
+          u[11] = _mm256_sub_epi32(u[11], v[11]);
+          u[12] = _mm256_sub_epi32(u[12], v[12]);
+          u[13] = _mm256_sub_epi32(u[13], v[13]);
+          u[14] = _mm256_sub_epi32(u[14], v[14]);
+          u[15] = _mm256_sub_epi32(u[15], v[15]);
+
+          v[0] = _mm256_add_epi32(u[0], K32One);
+          v[1] = _mm256_add_epi32(u[1], K32One);
+          v[2] = _mm256_add_epi32(u[2], K32One);
+          v[3] = _mm256_add_epi32(u[3], K32One);
+          v[4] = _mm256_add_epi32(u[4], K32One);
+          v[5] = _mm256_add_epi32(u[5], K32One);
+          v[6] = _mm256_add_epi32(u[6], K32One);
+          v[7] = _mm256_add_epi32(u[7], K32One);
+          v[8] = _mm256_add_epi32(u[8], K32One);
+          v[9] = _mm256_add_epi32(u[9], K32One);
+          v[10] = _mm256_add_epi32(u[10], K32One);
+          v[11] = _mm256_add_epi32(u[11], K32One);
+          v[12] = _mm256_add_epi32(u[12], K32One);
+          v[13] = _mm256_add_epi32(u[13], K32One);
+          v[14] = _mm256_add_epi32(u[14], K32One);
+          v[15] = _mm256_add_epi32(u[15], K32One);
+
+          u[0] = _mm256_srai_epi32(v[0], 2);
+          u[1] = _mm256_srai_epi32(v[1], 2);
+          u[2] = _mm256_srai_epi32(v[2], 2);
+          u[3] = _mm256_srai_epi32(v[3], 2);
+          u[4] = _mm256_srai_epi32(v[4], 2);
+          u[5] = _mm256_srai_epi32(v[5], 2);
+          u[6] = _mm256_srai_epi32(v[6], 2);
+          u[7] = _mm256_srai_epi32(v[7], 2);
+          u[8] = _mm256_srai_epi32(v[8], 2);
+          u[9] = _mm256_srai_epi32(v[9], 2);
+          u[10] = _mm256_srai_epi32(v[10], 2);
+          u[11] = _mm256_srai_epi32(v[11], 2);
+          u[12] = _mm256_srai_epi32(v[12], 2);
+          u[13] = _mm256_srai_epi32(v[13], 2);
+          u[14] = _mm256_srai_epi32(v[14], 2);
+          u[15] = _mm256_srai_epi32(v[15], 2);
+
+          out[ 1] = _mm256_packs_epi32(u[0], u[1]);
+          out[17] = _mm256_packs_epi32(u[2], u[3]);
+          out[ 9] = _mm256_packs_epi32(u[4], u[5]);
+          out[25] = _mm256_packs_epi32(u[6], u[7]);
+          out[ 7] = _mm256_packs_epi32(u[8], u[9]);
+          out[23] = _mm256_packs_epi32(u[10], u[11]);
+          out[15] = _mm256_packs_epi32(u[12], u[13]);
+          out[31] = _mm256_packs_epi32(u[14], u[15]);
+        }
+        {
+          const __m256i k32_p27_p05 = pair256_set_epi32(cospi_27_64, cospi_5_64);
+          const __m256i k32_p11_p21 = pair256_set_epi32(cospi_11_64, cospi_21_64);
+          const __m256i k32_p19_p13 = pair256_set_epi32(cospi_19_64, cospi_13_64);
+          const __m256i k32_p03_p29 = pair256_set_epi32(cospi_3_64, cospi_29_64);
+          const __m256i k32_m29_p03 = pair256_set_epi32(-cospi_29_64, cospi_3_64);
+          const __m256i k32_m13_p19 = pair256_set_epi32(-cospi_13_64, cospi_19_64);
+          const __m256i k32_m21_p11 = pair256_set_epi32(-cospi_21_64, cospi_11_64);
+          const __m256i k32_m05_p27 = pair256_set_epi32(-cospi_5_64, cospi_27_64);
+
+          u[ 0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]);
+          u[ 1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]);
+          u[ 2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]);
+          u[ 3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]);
+          u[ 4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]);
+          u[ 5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]);
+          u[ 6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]);
+          u[ 7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]);
+          u[ 8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]);
+          u[ 9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]);
+          u[10] = _mm256_unpacklo_epi32(lstep1[45], lstep1[51]);
+          u[11] = _mm256_unpackhi_epi32(lstep1[45], lstep1[51]);
+          u[12] = _mm256_unpacklo_epi32(lstep1[46], lstep1[48]);
+          u[13] = _mm256_unpackhi_epi32(lstep1[46], lstep1[48]);
+          u[14] = _mm256_unpacklo_epi32(lstep1[47], lstep1[49]);
+          u[15] = _mm256_unpackhi_epi32(lstep1[47], lstep1[49]);
+
+          v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p27_p05);
+          v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p27_p05);
+          v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p27_p05);
+          v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p27_p05);
+          v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p11_p21);
+          v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p11_p21);
+          v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p11_p21);
+          v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p11_p21);
+          v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p19_p13);
+          v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p19_p13);
+          v[10] = k_madd_epi32_avx2(u[10], k32_p19_p13);
+          v[11] = k_madd_epi32_avx2(u[11], k32_p19_p13);
+          v[12] = k_madd_epi32_avx2(u[12], k32_p03_p29);
+          v[13] = k_madd_epi32_avx2(u[13], k32_p03_p29);
+          v[14] = k_madd_epi32_avx2(u[14], k32_p03_p29);
+          v[15] = k_madd_epi32_avx2(u[15], k32_p03_p29);
+          v[16] = k_madd_epi32_avx2(u[12], k32_m29_p03);
+          v[17] = k_madd_epi32_avx2(u[13], k32_m29_p03);
+          v[18] = k_madd_epi32_avx2(u[14], k32_m29_p03);
+          v[19] = k_madd_epi32_avx2(u[15], k32_m29_p03);
+          v[20] = k_madd_epi32_avx2(u[ 8], k32_m13_p19);
+          v[21] = k_madd_epi32_avx2(u[ 9], k32_m13_p19);
+          v[22] = k_madd_epi32_avx2(u[10], k32_m13_p19);
+          v[23] = k_madd_epi32_avx2(u[11], k32_m13_p19);
+          v[24] = k_madd_epi32_avx2(u[ 4], k32_m21_p11);
+          v[25] = k_madd_epi32_avx2(u[ 5], k32_m21_p11);
+          v[26] = k_madd_epi32_avx2(u[ 6], k32_m21_p11);
+          v[27] = k_madd_epi32_avx2(u[ 7], k32_m21_p11);
+          v[28] = k_madd_epi32_avx2(u[ 0], k32_m05_p27);
+          v[29] = k_madd_epi32_avx2(u[ 1], k32_m05_p27);
+          v[30] = k_madd_epi32_avx2(u[ 2], k32_m05_p27);
+          v[31] = k_madd_epi32_avx2(u[ 3], k32_m05_p27);
+
+          u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64_avx2(v[10], v[11]);
+          u[ 6] = k_packs_epi64_avx2(v[12], v[13]);
+          u[ 7] = k_packs_epi64_avx2(v[14], v[15]);
+          u[ 8] = k_packs_epi64_avx2(v[16], v[17]);
+          u[ 9] = k_packs_epi64_avx2(v[18], v[19]);
+          u[10] = k_packs_epi64_avx2(v[20], v[21]);
+          u[11] = k_packs_epi64_avx2(v[22], v[23]);
+          u[12] = k_packs_epi64_avx2(v[24], v[25]);
+          u[13] = k_packs_epi64_avx2(v[26], v[27]);
+          u[14] = k_packs_epi64_avx2(v[28], v[29]);
+          u[15] = k_packs_epi64_avx2(v[30], v[31]);
+
+          v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS);
+          u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS);
+          u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS);
+          u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS);
+          u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS);
+          u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS);
+          u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS);
+          u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS);
+          u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS);
+          u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
+          u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
+          u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
+          u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
+          u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
+          u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
+
+          v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]);
+          v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]);
+          v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]);
+          v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]);
+          v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]);
+          v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]);
+          v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]);
+          v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]);
+          v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]);
+          v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]);
+          v[10] = _mm256_cmpgt_epi32(kZero,u[10]);
+          v[11] = _mm256_cmpgt_epi32(kZero,u[11]);
+          v[12] = _mm256_cmpgt_epi32(kZero,u[12]);
+          v[13] = _mm256_cmpgt_epi32(kZero,u[13]);
+          v[14] = _mm256_cmpgt_epi32(kZero,u[14]);
+          v[15] = _mm256_cmpgt_epi32(kZero,u[15]);
+
+          u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]);
+          u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]);
+          u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]);
+          u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]);
+          u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]);
+          u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]);
+          u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]);
+          u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]);
+          u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]);
+          u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]);
+          u[10] = _mm256_sub_epi32(u[10], v[10]);
+          u[11] = _mm256_sub_epi32(u[11], v[11]);
+          u[12] = _mm256_sub_epi32(u[12], v[12]);
+          u[13] = _mm256_sub_epi32(u[13], v[13]);
+          u[14] = _mm256_sub_epi32(u[14], v[14]);
+          u[15] = _mm256_sub_epi32(u[15], v[15]);
+
+          v[0] = _mm256_add_epi32(u[0], K32One);
+          v[1] = _mm256_add_epi32(u[1], K32One);
+          v[2] = _mm256_add_epi32(u[2], K32One);
+          v[3] = _mm256_add_epi32(u[3], K32One);
+          v[4] = _mm256_add_epi32(u[4], K32One);
+          v[5] = _mm256_add_epi32(u[5], K32One);
+          v[6] = _mm256_add_epi32(u[6], K32One);
+          v[7] = _mm256_add_epi32(u[7], K32One);
+          v[8] = _mm256_add_epi32(u[8], K32One);
+          v[9] = _mm256_add_epi32(u[9], K32One);
+          v[10] = _mm256_add_epi32(u[10], K32One);
+          v[11] = _mm256_add_epi32(u[11], K32One);
+          v[12] = _mm256_add_epi32(u[12], K32One);
+          v[13] = _mm256_add_epi32(u[13], K32One);
+          v[14] = _mm256_add_epi32(u[14], K32One);
+          v[15] = _mm256_add_epi32(u[15], K32One);
+
+          u[0] = _mm256_srai_epi32(v[0], 2);
+          u[1] = _mm256_srai_epi32(v[1], 2);
+          u[2] = _mm256_srai_epi32(v[2], 2);
+          u[3] = _mm256_srai_epi32(v[3], 2);
+          u[4] = _mm256_srai_epi32(v[4], 2);
+          u[5] = _mm256_srai_epi32(v[5], 2);
+          u[6] = _mm256_srai_epi32(v[6], 2);
+          u[7] = _mm256_srai_epi32(v[7], 2);
+          u[8] = _mm256_srai_epi32(v[8], 2);
+          u[9] = _mm256_srai_epi32(v[9], 2);
+          u[10] = _mm256_srai_epi32(v[10], 2);
+          u[11] = _mm256_srai_epi32(v[11], 2);
+          u[12] = _mm256_srai_epi32(v[12], 2);
+          u[13] = _mm256_srai_epi32(v[13], 2);
+          u[14] = _mm256_srai_epi32(v[14], 2);
+          u[15] = _mm256_srai_epi32(v[15], 2);
+
+          out[ 5] = _mm256_packs_epi32(u[0], u[1]);
+          out[21] = _mm256_packs_epi32(u[2], u[3]);
+          out[13] = _mm256_packs_epi32(u[4], u[5]);
+          out[29] = _mm256_packs_epi32(u[6], u[7]);
+          out[ 3] = _mm256_packs_epi32(u[8], u[9]);
+          out[19] = _mm256_packs_epi32(u[10], u[11]);
+          out[11] = _mm256_packs_epi32(u[12], u[13]);
+          out[27] = _mm256_packs_epi32(u[14], u[15]);
+        }
+      }
+#endif
+      // Transpose the results, do it as four 8x8 transposes.
+      {
+        int transpose_block;
+        int16_t *output_currStep,*output_nextStep;
+        if (0 == pass){
+                 output_currStep = &intermediate[column_start * 32];
+                 output_nextStep = &intermediate[(column_start + 8) * 32];
+        } else{
+                 output_currStep = &output_org[column_start * 32];
+                 output_nextStep = &output_org[(column_start + 8) * 32];
+        }
+        for (transpose_block = 0; transpose_block < 4; ++transpose_block) {
+          __m256i *this_out = &out[8 * transpose_block];
+          // 00  01  02  03  04  05  06  07  08  09  10  11  12  13  14  15
+          // 20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35
+          // 40  41  42  43  44  45  46  47  48  49  50  51  52  53  54  55
+          // 60  61  62  63  64  65  66  67  68  69  70  71  72  73  74  75
+          // 80  81  82  83  84  85  86  87  88  89  90  91  92  93  94  95
+          // 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
+          // 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
+          // 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
+          const __m256i tr0_0 = _mm256_unpacklo_epi16(this_out[0], this_out[1]);
+          const __m256i tr0_1 = _mm256_unpacklo_epi16(this_out[2], this_out[3]);
+          const __m256i tr0_2 = _mm256_unpackhi_epi16(this_out[0], this_out[1]);
+          const __m256i tr0_3 = _mm256_unpackhi_epi16(this_out[2], this_out[3]);
+          const __m256i tr0_4 = _mm256_unpacklo_epi16(this_out[4], this_out[5]);
+          const __m256i tr0_5 = _mm256_unpacklo_epi16(this_out[6], this_out[7]);
+          const __m256i tr0_6 = _mm256_unpackhi_epi16(this_out[4], this_out[5]);
+          const __m256i tr0_7 = _mm256_unpackhi_epi16(this_out[6], this_out[7]);
+          // 00  20  01  21  02  22  03  23  08  28  09  29  10  30  11  31
+          // 40  60  41  61  42  62  43  63  48  68  49  69  50  70  51  71
+          // 04  24  05  25  06  26  07  27  12  32  13  33  14  34  15  35
+          // 44  64  45  65  46  66  47  67  52  72  53  73  54  74  55  75
+          // 80  100 81  101 82  102 83  103 88  108 89  109 90  110 91  101
+          // 120 140 121 141 122 142 123 143 128 148 129 149 130 150 131 151
+          // 84  104 85  105 86  106 87  107 92  112 93  113 94  114 95  115
+          // 124 144 125 145 126 146 127 147 132 152 133 153 134 154 135 155
+
+          const __m256i tr1_0 = _mm256_unpacklo_epi32(tr0_0, tr0_1);
+          const __m256i tr1_1 = _mm256_unpacklo_epi32(tr0_2, tr0_3);
+          const __m256i tr1_2 = _mm256_unpackhi_epi32(tr0_0, tr0_1);
+          const __m256i tr1_3 = _mm256_unpackhi_epi32(tr0_2, tr0_3);
+          const __m256i tr1_4 = _mm256_unpacklo_epi32(tr0_4, tr0_5);
+          const __m256i tr1_5 = _mm256_unpacklo_epi32(tr0_6, tr0_7);
+          const __m256i tr1_6 = _mm256_unpackhi_epi32(tr0_4, tr0_5);
+          const __m256i tr1_7 = _mm256_unpackhi_epi32(tr0_6, tr0_7);
+          // 00 20  40  60  01 21  41  61  08 28  48  68  09 29  49  69
+          // 04 24  44  64  05 25  45  65  12 32  52  72  13 33  53  73
+          // 02 22  42  62  03 23  43  63  10 30  50  70  11 31  51  71
+          // 06 26  46  66  07 27  47  67  14 34  54  74  15 35  55  75
+          // 80 100 120 140 81 101 121 141 88 108 128 148 89 109 129 149
+          // 84 104 124 144 85 105 125 145 92 112 132 152 93 113 133 153
+          // 82 102 122 142 83 103 123 143 90 110 130 150 91 101 131 151
+          // 86 106 126 146 87 107 127 147 94 114 134 154 95 115 135 155
+          __m256i tr2_0 = _mm256_unpacklo_epi64(tr1_0, tr1_4);
+          __m256i tr2_1 = _mm256_unpackhi_epi64(tr1_0, tr1_4);
+          __m256i tr2_2 = _mm256_unpacklo_epi64(tr1_2, tr1_6);
+          __m256i tr2_3 = _mm256_unpackhi_epi64(tr1_2, tr1_6);
+          __m256i tr2_4 = _mm256_unpacklo_epi64(tr1_1, tr1_5);
+          __m256i tr2_5 = _mm256_unpackhi_epi64(tr1_1, tr1_5);
+          __m256i tr2_6 = _mm256_unpacklo_epi64(tr1_3, tr1_7);
+          __m256i tr2_7 = _mm256_unpackhi_epi64(tr1_3, tr1_7);
+          // 00 20 40 60 80 100 120 140 08 28 48 68 88 108 128 148
+          // 01 21 41 61 81 101 121 141 09 29 49 69 89 109 129 149
+          // 02 22 42 62 82 102 122 142 10 30 50 70 90 110 130 150
+          // 03 23 43 63 83 103 123 143 11 31 51 71 91 101 131 151
+          // 04 24 44 64 84 104 124 144 12 32 52 72 92 112 132 152
+          // 05 25 45 65 85 105 125 145 13 33 53 73 93 113 133 153
+          // 06 26 46 66 86 106 126 146 14 34 54 74 94 114 134 154
+          // 07 27 47 67 87 107 127 147 15 35 55 75 95 115 135 155
+          if (0 == pass) {
+            // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2;
+            // TODO(cd): see quality impact of only doing
+            //           output[j] = (output[j] + 1) >> 2;
+            //           which would remove the code between here ...
+            __m256i tr2_0_0 = _mm256_cmpgt_epi16(tr2_0, kZero);
+            __m256i tr2_1_0 = _mm256_cmpgt_epi16(tr2_1, kZero);
+            __m256i tr2_2_0 = _mm256_cmpgt_epi16(tr2_2, kZero);
+            __m256i tr2_3_0 = _mm256_cmpgt_epi16(tr2_3, kZero);
+            __m256i tr2_4_0 = _mm256_cmpgt_epi16(tr2_4, kZero);
+            __m256i tr2_5_0 = _mm256_cmpgt_epi16(tr2_5, kZero);
+            __m256i tr2_6_0 = _mm256_cmpgt_epi16(tr2_6, kZero);
+            __m256i tr2_7_0 = _mm256_cmpgt_epi16(tr2_7, kZero);
+            tr2_0 = _mm256_sub_epi16(tr2_0, tr2_0_0);
+            tr2_1 = _mm256_sub_epi16(tr2_1, tr2_1_0);
+            tr2_2 = _mm256_sub_epi16(tr2_2, tr2_2_0);
+            tr2_3 = _mm256_sub_epi16(tr2_3, tr2_3_0);
+            tr2_4 = _mm256_sub_epi16(tr2_4, tr2_4_0);
+            tr2_5 = _mm256_sub_epi16(tr2_5, tr2_5_0);
+            tr2_6 = _mm256_sub_epi16(tr2_6, tr2_6_0);
+            tr2_7 = _mm256_sub_epi16(tr2_7, tr2_7_0);
+            //           ... and here.
+            //           PS: also change code in vp9/encoder/vp9_dct.c
+            tr2_0 = _mm256_add_epi16(tr2_0, kOne);
+            tr2_1 = _mm256_add_epi16(tr2_1, kOne);
+            tr2_2 = _mm256_add_epi16(tr2_2, kOne);
+            tr2_3 = _mm256_add_epi16(tr2_3, kOne);
+            tr2_4 = _mm256_add_epi16(tr2_4, kOne);
+            tr2_5 = _mm256_add_epi16(tr2_5, kOne);
+            tr2_6 = _mm256_add_epi16(tr2_6, kOne);
+            tr2_7 = _mm256_add_epi16(tr2_7, kOne);
+            tr2_0 = _mm256_srai_epi16(tr2_0, 2);
+            tr2_1 = _mm256_srai_epi16(tr2_1, 2);
+            tr2_2 = _mm256_srai_epi16(tr2_2, 2);
+            tr2_3 = _mm256_srai_epi16(tr2_3, 2);
+            tr2_4 = _mm256_srai_epi16(tr2_4, 2);
+            tr2_5 = _mm256_srai_epi16(tr2_5, 2);
+            tr2_6 = _mm256_srai_epi16(tr2_6, 2);
+            tr2_7 = _mm256_srai_epi16(tr2_7, 2);
+          }
+          // Note: even though all these stores are aligned, using the aligned
+          //       intrinsic make the code slightly slower.
+          _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32), _mm256_castsi256_si128(tr2_0));
+          _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32), _mm256_castsi256_si128(tr2_1));
+          _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32), _mm256_castsi256_si128(tr2_2));
+          _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32), _mm256_castsi256_si128(tr2_3));
+          _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32), _mm256_castsi256_si128(tr2_4));
+          _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32), _mm256_castsi256_si128(tr2_5));
+          _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32), _mm256_castsi256_si128(tr2_6));
+          _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32), _mm256_castsi256_si128(tr2_7));
+
+          _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32), _mm256_extractf128_si256(tr2_0,1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32), _mm256_extractf128_si256(tr2_1,1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32), _mm256_extractf128_si256(tr2_2,1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32), _mm256_extractf128_si256(tr2_3,1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32), _mm256_extractf128_si256(tr2_4,1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32), _mm256_extractf128_si256(tr2_5,1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32), _mm256_extractf128_si256(tr2_6,1));
+          _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32), _mm256_extractf128_si256(tr2_7,1));
+          // Process next 8x8
+          output_currStep += 8;
+          output_nextStep += 8;
+        }
+      }
+    }
+  }
+}  // NOLINT
--- /dev/null
+++ b/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h
@@ -1,0 +1,3153 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h>  // SSE2
+
+#include "vpx_dsp/fwd_txfm.h"
+#include "vpx_dsp/txfm_common.h"
+#include "vpx_dsp/x86/txfm_common_sse2.h"
+
+// TODO(jingning) The high bit-depth version needs re-work for performance.
+// The current SSE2 implementation also causes cross reference to the static
+// functions in the C implementation file.
+#if DCT_HIGH_BIT_DEPTH
+#define ADD_EPI16 _mm_adds_epi16
+#define SUB_EPI16 _mm_subs_epi16
+#if FDCT32x32_HIGH_PRECISION
+void vp9_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+    int i, j;
+    for (i = 0; i < 32; ++i) {
+      tran_high_t temp_in[32], temp_out[32];
+      for (j = 0; j < 32; ++j)
+        temp_in[j] = intermediate[j * 32 + i];
+      vp9_fdct32(temp_in, temp_out, 0);
+      for (j = 0; j < 32; ++j)
+        out[j + i * 32] =
+            (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
+    }
+}
+  #define HIGH_FDCT32x32_2D_C vp9_highbd_fdct32x32_c
+  #define HIGH_FDCT32x32_2D_ROWS_C vp9_fdct32x32_rows_c
+#else
+void vp9_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+    int i, j;
+    for (i = 0; i < 32; ++i) {
+      tran_high_t temp_in[32], temp_out[32];
+      for (j = 0; j < 32; ++j)
+        temp_in[j] = intermediate[j * 32 + i];
+      vp9_fdct32(temp_in, temp_out, 1);
+      for (j = 0; j < 32; ++j)
+        out[j + i * 32] = (tran_low_t)temp_out[j];
+    }
+}
+  #define HIGH_FDCT32x32_2D_C vp9_highbd_fdct32x32_rd_c
+  #define HIGH_FDCT32x32_2D_ROWS_C vp9_fdct32x32_rd_rows_c
+#endif  // FDCT32x32_HIGH_PRECISION
+#else
+#define ADD_EPI16 _mm_add_epi16
+#define SUB_EPI16 _mm_sub_epi16
+#endif  // DCT_HIGH_BIT_DEPTH
+
+
+void FDCT32x32_2D(const int16_t *input,
+                  tran_low_t *output_org, int stride) {
+  // Calculate pre-multiplied strides
+  const int str1 = stride;
+  const int str2 = 2 * stride;
+  const int str3 = 2 * stride + str1;
+  // We need an intermediate buffer between passes.
+  DECLARE_ALIGNED(16, int16_t, intermediate[32 * 32]);
+  // Constants
+  //    When we use them, in one case, they are all the same. In all others
+  //    it's a pair of them that we need to repeat four times. This is done
+  //    by constructing the 32 bit constant corresponding to that pair.
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64,   cospi_24_64);
+  const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64,  cospi_8_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64,  cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64,  cospi_12_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64,   cospi_28_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64,  cospi_4_64);
+  const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
+  const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
+  const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64,  cospi_2_64);
+  const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64,  cospi_18_64);
+  const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64,  cospi_10_64);
+  const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64,   cospi_26_64);
+  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64,  cospi_6_64);
+  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64,  cospi_22_64);
+  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64,  cospi_14_64);
+  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64,   cospi_30_64);
+  const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64,  cospi_1_64);
+  const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64,  cospi_17_64);
+  const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64,  cospi_9_64);
+  const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64,   cospi_25_64);
+  const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64,  cospi_7_64);
+  const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64,   cospi_23_64);
+  const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64,  cospi_15_64);
+  const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64,   cospi_31_64);
+  const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64,  cospi_5_64);
+  const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64,  cospi_21_64);
+  const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64,  cospi_13_64);
+  const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64,   cospi_29_64);
+  const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64,  cospi_3_64);
+  const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64,  cospi_19_64);
+  const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64,  cospi_11_64);
+  const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64,   cospi_27_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+  const __m128i kZero = _mm_set1_epi16(0);
+  const __m128i kOne  = _mm_set1_epi16(1);
+  // Do the two transform/transpose passes
+  int pass;
+#if DCT_HIGH_BIT_DEPTH
+  int overflow;
+#endif
+  for (pass = 0; pass < 2; ++pass) {
+    // We process eight columns (transposed rows in second pass) at a time.
+    int column_start;
+    for (column_start = 0; column_start < 32; column_start += 8) {
+      __m128i step1[32];
+      __m128i step2[32];
+      __m128i step3[32];
+      __m128i out[32];
+      // Stage 1
+      // Note: even though all the loads below are aligned, using the aligned
+      //       intrinsic make the code slightly slower.
+      if (0 == pass) {
+        const int16_t *in  = &input[column_start];
+        // step1[i] =  (in[ 0 * stride] + in[(32 -  1) * stride]) << 2;
+        // Note: the next four blocks could be in a loop. That would help the
+        //       instruction cache but is actually slower.
+        {
+          const int16_t *ina =  in +  0 * str1;
+          const int16_t *inb =  in + 31 * str1;
+          __m128i *step1a = &step1[ 0];
+          __m128i *step1b = &step1[31];
+          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[ 0] = _mm_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          step1b[-3] = _mm_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+        }
+        {
+          const int16_t *ina =  in +  4 * str1;
+          const int16_t *inb =  in + 27 * str1;
+          __m128i *step1a = &step1[ 4];
+          __m128i *step1b = &step1[27];
+          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[ 0] = _mm_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          step1b[-3] = _mm_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+        }
+        {
+          const int16_t *ina =  in +  8 * str1;
+          const int16_t *inb =  in + 23 * str1;
+          __m128i *step1a = &step1[ 8];
+          __m128i *step1b = &step1[23];
+          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[ 0] = _mm_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          step1b[-3] = _mm_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+        }
+        {
+          const int16_t *ina =  in + 12 * str1;
+          const int16_t *inb =  in + 19 * str1;
+          __m128i *step1a = &step1[12];
+          __m128i *step1b = &step1[19];
+          const __m128i ina0  = _mm_loadu_si128((const __m128i *)(ina));
+          const __m128i ina1  = _mm_loadu_si128((const __m128i *)(ina + str1));
+          const __m128i ina2  = _mm_loadu_si128((const __m128i *)(ina + str2));
+          const __m128i ina3  = _mm_loadu_si128((const __m128i *)(ina + str3));
+          const __m128i inb3  = _mm_loadu_si128((const __m128i *)(inb - str3));
+          const __m128i inb2  = _mm_loadu_si128((const __m128i *)(inb - str2));
+          const __m128i inb1  = _mm_loadu_si128((const __m128i *)(inb - str1));
+          const __m128i inb0  = _mm_loadu_si128((const __m128i *)(inb));
+          step1a[ 0] = _mm_add_epi16(ina0, inb0);
+          step1a[ 1] = _mm_add_epi16(ina1, inb1);
+          step1a[ 2] = _mm_add_epi16(ina2, inb2);
+          step1a[ 3] = _mm_add_epi16(ina3, inb3);
+          step1b[-3] = _mm_sub_epi16(ina3, inb3);
+          step1b[-2] = _mm_sub_epi16(ina2, inb2);
+          step1b[-1] = _mm_sub_epi16(ina1, inb1);
+          step1b[-0] = _mm_sub_epi16(ina0, inb0);
+          step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2);
+          step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2);
+          step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2);
+          step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2);
+          step1b[-3] = _mm_slli_epi16(step1b[-3], 2);
+          step1b[-2] = _mm_slli_epi16(step1b[-2], 2);
+          step1b[-1] = _mm_slli_epi16(step1b[-1], 2);
+          step1b[-0] = _mm_slli_epi16(step1b[-0], 2);
+        }
+      } else {
+        int16_t *in = &intermediate[column_start];
+        // step1[i] =  in[ 0 * 32] + in[(32 -  1) * 32];
+        // Note: using the same approach as above to have common offset is
+        //       counter-productive as all offsets can be calculated at compile
+        //       time.
+        // Note: the next four blocks could be in a loop. That would help the
+        //       instruction cache but is actually slower.
+        {
+          __m128i in00  = _mm_loadu_si128((const __m128i *)(in +  0 * 32));
+          __m128i in01  = _mm_loadu_si128((const __m128i *)(in +  1 * 32));
+          __m128i in02  = _mm_loadu_si128((const __m128i *)(in +  2 * 32));
+          __m128i in03  = _mm_loadu_si128((const __m128i *)(in +  3 * 32));
+          __m128i in28  = _mm_loadu_si128((const __m128i *)(in + 28 * 32));
+          __m128i in29  = _mm_loadu_si128((const __m128i *)(in + 29 * 32));
+          __m128i in30  = _mm_loadu_si128((const __m128i *)(in + 30 * 32));
+          __m128i in31  = _mm_loadu_si128((const __m128i *)(in + 31 * 32));
+          step1[0] = ADD_EPI16(in00, in31);
+          step1[1] = ADD_EPI16(in01, in30);
+          step1[2] = ADD_EPI16(in02, in29);
+          step1[3] = ADD_EPI16(in03, in28);
+          step1[28] = SUB_EPI16(in03, in28);
+          step1[29] = SUB_EPI16(in02, in29);
+          step1[30] = SUB_EPI16(in01, in30);
+          step1[31] = SUB_EPI16(in00, in31);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&step1[0], &step1[1], &step1[2],
+                                             &step1[3], &step1[28], &step1[29],
+                                             &step1[30], &step1[31]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+        {
+          __m128i in04  = _mm_loadu_si128((const __m128i *)(in +  4 * 32));
+          __m128i in05  = _mm_loadu_si128((const __m128i *)(in +  5 * 32));
+          __m128i in06  = _mm_loadu_si128((const __m128i *)(in +  6 * 32));
+          __m128i in07  = _mm_loadu_si128((const __m128i *)(in +  7 * 32));
+          __m128i in24  = _mm_loadu_si128((const __m128i *)(in + 24 * 32));
+          __m128i in25  = _mm_loadu_si128((const __m128i *)(in + 25 * 32));
+          __m128i in26  = _mm_loadu_si128((const __m128i *)(in + 26 * 32));
+          __m128i in27  = _mm_loadu_si128((const __m128i *)(in + 27 * 32));
+          step1[4] = ADD_EPI16(in04, in27);
+          step1[5] = ADD_EPI16(in05, in26);
+          step1[6] = ADD_EPI16(in06, in25);
+          step1[7] = ADD_EPI16(in07, in24);
+          step1[24] = SUB_EPI16(in07, in24);
+          step1[25] = SUB_EPI16(in06, in25);
+          step1[26] = SUB_EPI16(in05, in26);
+          step1[27] = SUB_EPI16(in04, in27);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&step1[4], &step1[5], &step1[6],
+                                             &step1[7], &step1[24], &step1[25],
+                                             &step1[26], &step1[27]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+        {
+          __m128i in08  = _mm_loadu_si128((const __m128i *)(in +  8 * 32));
+          __m128i in09  = _mm_loadu_si128((const __m128i *)(in +  9 * 32));
+          __m128i in10  = _mm_loadu_si128((const __m128i *)(in + 10 * 32));
+          __m128i in11  = _mm_loadu_si128((const __m128i *)(in + 11 * 32));
+          __m128i in20  = _mm_loadu_si128((const __m128i *)(in + 20 * 32));
+          __m128i in21  = _mm_loadu_si128((const __m128i *)(in + 21 * 32));
+          __m128i in22  = _mm_loadu_si128((const __m128i *)(in + 22 * 32));
+          __m128i in23  = _mm_loadu_si128((const __m128i *)(in + 23 * 32));
+          step1[8] = ADD_EPI16(in08, in23);
+          step1[9] = ADD_EPI16(in09, in22);
+          step1[10] = ADD_EPI16(in10, in21);
+          step1[11] = ADD_EPI16(in11, in20);
+          step1[20] = SUB_EPI16(in11, in20);
+          step1[21] = SUB_EPI16(in10, in21);
+          step1[22] = SUB_EPI16(in09, in22);
+          step1[23] = SUB_EPI16(in08, in23);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&step1[8], &step1[9], &step1[10],
+                                             &step1[11], &step1[20], &step1[21],
+                                             &step1[22], &step1[23]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+        {
+          __m128i in12  = _mm_loadu_si128((const __m128i *)(in + 12 * 32));
+          __m128i in13  = _mm_loadu_si128((const __m128i *)(in + 13 * 32));
+          __m128i in14  = _mm_loadu_si128((const __m128i *)(in + 14 * 32));
+          __m128i in15  = _mm_loadu_si128((const __m128i *)(in + 15 * 32));
+          __m128i in16  = _mm_loadu_si128((const __m128i *)(in + 16 * 32));
+          __m128i in17  = _mm_loadu_si128((const __m128i *)(in + 17 * 32));
+          __m128i in18  = _mm_loadu_si128((const __m128i *)(in + 18 * 32));
+          __m128i in19  = _mm_loadu_si128((const __m128i *)(in + 19 * 32));
+          step1[12] = ADD_EPI16(in12, in19);
+          step1[13] = ADD_EPI16(in13, in18);
+          step1[14] = ADD_EPI16(in14, in17);
+          step1[15] = ADD_EPI16(in15, in16);
+          step1[16] = SUB_EPI16(in15, in16);
+          step1[17] = SUB_EPI16(in14, in17);
+          step1[18] = SUB_EPI16(in13, in18);
+          step1[19] = SUB_EPI16(in12, in19);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&step1[12], &step1[13], &step1[14],
+                                             &step1[15], &step1[16], &step1[17],
+                                             &step1[18], &step1[19]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+      }
+      // Stage 2
+      {
+        step2[0] = ADD_EPI16(step1[0], step1[15]);
+        step2[1] = ADD_EPI16(step1[1], step1[14]);
+        step2[2] = ADD_EPI16(step1[2], step1[13]);
+        step2[3] = ADD_EPI16(step1[3], step1[12]);
+        step2[4] = ADD_EPI16(step1[4], step1[11]);
+        step2[5] = ADD_EPI16(step1[5], step1[10]);
+        step2[6] = ADD_EPI16(step1[6], step1[ 9]);
+        step2[7] = ADD_EPI16(step1[7], step1[ 8]);
+        step2[8] = SUB_EPI16(step1[7], step1[ 8]);
+        step2[9] = SUB_EPI16(step1[6], step1[ 9]);
+        step2[10] = SUB_EPI16(step1[5], step1[10]);
+        step2[11] = SUB_EPI16(step1[4], step1[11]);
+        step2[12] = SUB_EPI16(step1[3], step1[12]);
+        step2[13] = SUB_EPI16(step1[2], step1[13]);
+        step2[14] = SUB_EPI16(step1[1], step1[14]);
+        step2[15] = SUB_EPI16(step1[0], step1[15]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x16(
+            &step2[0], &step2[1], &step2[2], &step2[3],
+            &step2[4], &step2[5], &step2[6], &step2[7],
+            &step2[8], &step2[9], &step2[10], &step2[11],
+            &step2[12], &step2[13], &step2[14], &step2[15]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i s2_20_0 = _mm_unpacklo_epi16(step1[27], step1[20]);
+        const __m128i s2_20_1 = _mm_unpackhi_epi16(step1[27], step1[20]);
+        const __m128i s2_21_0 = _mm_unpacklo_epi16(step1[26], step1[21]);
+        const __m128i s2_21_1 = _mm_unpackhi_epi16(step1[26], step1[21]);
+        const __m128i s2_22_0 = _mm_unpacklo_epi16(step1[25], step1[22]);
+        const __m128i s2_22_1 = _mm_unpackhi_epi16(step1[25], step1[22]);
+        const __m128i s2_23_0 = _mm_unpacklo_epi16(step1[24], step1[23]);
+        const __m128i s2_23_1 = _mm_unpackhi_epi16(step1[24], step1[23]);
+        const __m128i s2_20_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_m16);
+        const __m128i s2_20_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_m16);
+        const __m128i s2_21_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_m16);
+        const __m128i s2_21_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_m16);
+        const __m128i s2_22_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_m16);
+        const __m128i s2_22_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_m16);
+        const __m128i s2_23_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_m16);
+        const __m128i s2_23_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_m16);
+        const __m128i s2_24_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_p16);
+        const __m128i s2_24_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_p16);
+        const __m128i s2_25_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_p16);
+        const __m128i s2_25_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_p16);
+        const __m128i s2_26_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_p16);
+        const __m128i s2_26_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_p16);
+        const __m128i s2_27_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_p16);
+        const __m128i s2_27_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_p16);
+        // dct_const_round_shift
+        const __m128i s2_20_4 = _mm_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_20_5 = _mm_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_21_4 = _mm_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_21_5 = _mm_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_22_4 = _mm_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_22_5 = _mm_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_23_4 = _mm_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_23_5 = _mm_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_24_4 = _mm_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_24_5 = _mm_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_25_4 = _mm_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_25_5 = _mm_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_26_4 = _mm_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_26_5 = _mm_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_27_4 = _mm_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_27_5 = _mm_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS);
+        const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS);
+        const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS);
+        const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS);
+        const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS);
+        const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS);
+        const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS);
+        const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS);
+        const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS);
+        const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS);
+        const __m128i s2_25_6 = _mm_srai_epi32(s2_25_4, DCT_CONST_BITS);
+        const __m128i s2_25_7 = _mm_srai_epi32(s2_25_5, DCT_CONST_BITS);
+        const __m128i s2_26_6 = _mm_srai_epi32(s2_26_4, DCT_CONST_BITS);
+        const __m128i s2_26_7 = _mm_srai_epi32(s2_26_5, DCT_CONST_BITS);
+        const __m128i s2_27_6 = _mm_srai_epi32(s2_27_4, DCT_CONST_BITS);
+        const __m128i s2_27_7 = _mm_srai_epi32(s2_27_5, DCT_CONST_BITS);
+        // Combine
+        step2[20] = _mm_packs_epi32(s2_20_6, s2_20_7);
+        step2[21] = _mm_packs_epi32(s2_21_6, s2_21_7);
+        step2[22] = _mm_packs_epi32(s2_22_6, s2_22_7);
+        step2[23] = _mm_packs_epi32(s2_23_6, s2_23_7);
+        step2[24] = _mm_packs_epi32(s2_24_6, s2_24_7);
+        step2[25] = _mm_packs_epi32(s2_25_6, s2_25_7);
+        step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7);
+        step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&step2[20], &step2[21], &step2[22],
+                                           &step2[23], &step2[24], &step2[25],
+                                           &step2[26], &step2[27]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+
+#if !FDCT32x32_HIGH_PRECISION
+      // dump the magnitude by half, hence the intermediate values are within
+      // the range of 16 bits.
+      if (1 == pass) {
+        __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero);
+        __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero);
+        __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero);
+        __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero);
+        __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero);
+        __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero);
+        __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero);
+        __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero);
+        __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero);
+        __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero);
+        __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero);
+        __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero);
+        __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero);
+        __m128i s3_13_0 = _mm_cmplt_epi16(step2[13], kZero);
+        __m128i s2_14_0 = _mm_cmplt_epi16(step2[14], kZero);
+        __m128i s2_15_0 = _mm_cmplt_epi16(step2[15], kZero);
+        __m128i s3_16_0 = _mm_cmplt_epi16(step1[16], kZero);
+        __m128i s3_17_0 = _mm_cmplt_epi16(step1[17], kZero);
+        __m128i s3_18_0 = _mm_cmplt_epi16(step1[18], kZero);
+        __m128i s3_19_0 = _mm_cmplt_epi16(step1[19], kZero);
+        __m128i s3_20_0 = _mm_cmplt_epi16(step2[20], kZero);
+        __m128i s3_21_0 = _mm_cmplt_epi16(step2[21], kZero);
+        __m128i s3_22_0 = _mm_cmplt_epi16(step2[22], kZero);
+        __m128i s3_23_0 = _mm_cmplt_epi16(step2[23], kZero);
+        __m128i s3_24_0 = _mm_cmplt_epi16(step2[24], kZero);
+        __m128i s3_25_0 = _mm_cmplt_epi16(step2[25], kZero);
+        __m128i s3_26_0 = _mm_cmplt_epi16(step2[26], kZero);
+        __m128i s3_27_0 = _mm_cmplt_epi16(step2[27], kZero);
+        __m128i s3_28_0 = _mm_cmplt_epi16(step1[28], kZero);
+        __m128i s3_29_0 = _mm_cmplt_epi16(step1[29], kZero);
+        __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero);
+        __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero);
+
+        step2[0] = SUB_EPI16(step2[ 0], s3_00_0);
+        step2[1] = SUB_EPI16(step2[ 1], s3_01_0);
+        step2[2] = SUB_EPI16(step2[ 2], s3_02_0);
+        step2[3] = SUB_EPI16(step2[ 3], s3_03_0);
+        step2[4] = SUB_EPI16(step2[ 4], s3_04_0);
+        step2[5] = SUB_EPI16(step2[ 5], s3_05_0);
+        step2[6] = SUB_EPI16(step2[ 6], s3_06_0);
+        step2[7] = SUB_EPI16(step2[ 7], s3_07_0);
+        step2[8] = SUB_EPI16(step2[ 8], s2_08_0);
+        step2[9] = SUB_EPI16(step2[ 9], s2_09_0);
+        step2[10] = SUB_EPI16(step2[10], s3_10_0);
+        step2[11] = SUB_EPI16(step2[11], s3_11_0);
+        step2[12] = SUB_EPI16(step2[12], s3_12_0);
+        step2[13] = SUB_EPI16(step2[13], s3_13_0);
+        step2[14] = SUB_EPI16(step2[14], s2_14_0);
+        step2[15] = SUB_EPI16(step2[15], s2_15_0);
+        step1[16] = SUB_EPI16(step1[16], s3_16_0);
+        step1[17] = SUB_EPI16(step1[17], s3_17_0);
+        step1[18] = SUB_EPI16(step1[18], s3_18_0);
+        step1[19] = SUB_EPI16(step1[19], s3_19_0);
+        step2[20] = SUB_EPI16(step2[20], s3_20_0);
+        step2[21] = SUB_EPI16(step2[21], s3_21_0);
+        step2[22] = SUB_EPI16(step2[22], s3_22_0);
+        step2[23] = SUB_EPI16(step2[23], s3_23_0);
+        step2[24] = SUB_EPI16(step2[24], s3_24_0);
+        step2[25] = SUB_EPI16(step2[25], s3_25_0);
+        step2[26] = SUB_EPI16(step2[26], s3_26_0);
+        step2[27] = SUB_EPI16(step2[27], s3_27_0);
+        step1[28] = SUB_EPI16(step1[28], s3_28_0);
+        step1[29] = SUB_EPI16(step1[29], s3_29_0);
+        step1[30] = SUB_EPI16(step1[30], s3_30_0);
+        step1[31] = SUB_EPI16(step1[31], s3_31_0);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x32(
+            &step2[0], &step2[1], &step2[2], &step2[3],
+            &step2[4], &step2[5], &step2[6], &step2[7],
+            &step2[8], &step2[9], &step2[10], &step2[11],
+            &step2[12], &step2[13], &step2[14], &step2[15],
+            &step1[16], &step1[17], &step1[18], &step1[19],
+            &step2[20], &step2[21], &step2[22], &step2[23],
+            &step2[24], &step2[25], &step2[26], &step2[27],
+            &step1[28], &step1[29], &step1[30], &step1[31]);
+        if (overflow) {
+          HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+        step2[0] = _mm_add_epi16(step2[ 0], kOne);
+        step2[1] = _mm_add_epi16(step2[ 1], kOne);
+        step2[2] = _mm_add_epi16(step2[ 2], kOne);
+        step2[3] = _mm_add_epi16(step2[ 3], kOne);
+        step2[4] = _mm_add_epi16(step2[ 4], kOne);
+        step2[5] = _mm_add_epi16(step2[ 5], kOne);
+        step2[6] = _mm_add_epi16(step2[ 6], kOne);
+        step2[7] = _mm_add_epi16(step2[ 7], kOne);
+        step2[8] = _mm_add_epi16(step2[ 8], kOne);
+        step2[9] = _mm_add_epi16(step2[ 9], kOne);
+        step2[10] = _mm_add_epi16(step2[10], kOne);
+        step2[11] = _mm_add_epi16(step2[11], kOne);
+        step2[12] = _mm_add_epi16(step2[12], kOne);
+        step2[13] = _mm_add_epi16(step2[13], kOne);
+        step2[14] = _mm_add_epi16(step2[14], kOne);
+        step2[15] = _mm_add_epi16(step2[15], kOne);
+        step1[16] = _mm_add_epi16(step1[16], kOne);
+        step1[17] = _mm_add_epi16(step1[17], kOne);
+        step1[18] = _mm_add_epi16(step1[18], kOne);
+        step1[19] = _mm_add_epi16(step1[19], kOne);
+        step2[20] = _mm_add_epi16(step2[20], kOne);
+        step2[21] = _mm_add_epi16(step2[21], kOne);
+        step2[22] = _mm_add_epi16(step2[22], kOne);
+        step2[23] = _mm_add_epi16(step2[23], kOne);
+        step2[24] = _mm_add_epi16(step2[24], kOne);
+        step2[25] = _mm_add_epi16(step2[25], kOne);
+        step2[26] = _mm_add_epi16(step2[26], kOne);
+        step2[27] = _mm_add_epi16(step2[27], kOne);
+        step1[28] = _mm_add_epi16(step1[28], kOne);
+        step1[29] = _mm_add_epi16(step1[29], kOne);
+        step1[30] = _mm_add_epi16(step1[30], kOne);
+        step1[31] = _mm_add_epi16(step1[31], kOne);
+
+        step2[0] = _mm_srai_epi16(step2[ 0], 2);
+        step2[1] = _mm_srai_epi16(step2[ 1], 2);
+        step2[2] = _mm_srai_epi16(step2[ 2], 2);
+        step2[3] = _mm_srai_epi16(step2[ 3], 2);
+        step2[4] = _mm_srai_epi16(step2[ 4], 2);
+        step2[5] = _mm_srai_epi16(step2[ 5], 2);
+        step2[6] = _mm_srai_epi16(step2[ 6], 2);
+        step2[7] = _mm_srai_epi16(step2[ 7], 2);
+        step2[8] = _mm_srai_epi16(step2[ 8], 2);
+        step2[9] = _mm_srai_epi16(step2[ 9], 2);
+        step2[10] = _mm_srai_epi16(step2[10], 2);
+        step2[11] = _mm_srai_epi16(step2[11], 2);
+        step2[12] = _mm_srai_epi16(step2[12], 2);
+        step2[13] = _mm_srai_epi16(step2[13], 2);
+        step2[14] = _mm_srai_epi16(step2[14], 2);
+        step2[15] = _mm_srai_epi16(step2[15], 2);
+        step1[16] = _mm_srai_epi16(step1[16], 2);
+        step1[17] = _mm_srai_epi16(step1[17], 2);
+        step1[18] = _mm_srai_epi16(step1[18], 2);
+        step1[19] = _mm_srai_epi16(step1[19], 2);
+        step2[20] = _mm_srai_epi16(step2[20], 2);
+        step2[21] = _mm_srai_epi16(step2[21], 2);
+        step2[22] = _mm_srai_epi16(step2[22], 2);
+        step2[23] = _mm_srai_epi16(step2[23], 2);
+        step2[24] = _mm_srai_epi16(step2[24], 2);
+        step2[25] = _mm_srai_epi16(step2[25], 2);
+        step2[26] = _mm_srai_epi16(step2[26], 2);
+        step2[27] = _mm_srai_epi16(step2[27], 2);
+        step1[28] = _mm_srai_epi16(step1[28], 2);
+        step1[29] = _mm_srai_epi16(step1[29], 2);
+        step1[30] = _mm_srai_epi16(step1[30], 2);
+        step1[31] = _mm_srai_epi16(step1[31], 2);
+      }
+#endif  // !FDCT32x32_HIGH_PRECISION
+
+#if FDCT32x32_HIGH_PRECISION
+      if (pass == 0) {
+#endif
+      // Stage 3
+      {
+        step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]);
+        step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]);
+        step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]);
+        step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]);
+        step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]);
+        step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]);
+        step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]);
+        step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2],
+                                           &step3[3], &step3[4], &step3[5],
+                                           &step3[6], &step3[7]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
+        const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
+        const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
+        const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
+        const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
+        const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
+        const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
+        const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
+        const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
+        const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
+        const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
+        const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
+        // dct_const_round_shift
+        const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
+        const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
+        const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
+        const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
+        const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
+        const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
+        const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
+        const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
+        // Combine
+        step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
+        step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
+        step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
+        step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x4(&step3[10], &step3[11],
+                                           &step3[12], &step3[13]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        step3[16] = ADD_EPI16(step2[23], step1[16]);
+        step3[17] = ADD_EPI16(step2[22], step1[17]);
+        step3[18] = ADD_EPI16(step2[21], step1[18]);
+        step3[19] = ADD_EPI16(step2[20], step1[19]);
+        step3[20] = SUB_EPI16(step1[19], step2[20]);
+        step3[21] = SUB_EPI16(step1[18], step2[21]);
+        step3[22] = SUB_EPI16(step1[17], step2[22]);
+        step3[23] = SUB_EPI16(step1[16], step2[23]);
+        step3[24] = SUB_EPI16(step1[31], step2[24]);
+        step3[25] = SUB_EPI16(step1[30], step2[25]);
+        step3[26] = SUB_EPI16(step1[29], step2[26]);
+        step3[27] = SUB_EPI16(step1[28], step2[27]);
+        step3[28] = ADD_EPI16(step2[27], step1[28]);
+        step3[29] = ADD_EPI16(step2[26], step1[29]);
+        step3[30] = ADD_EPI16(step2[25], step1[30]);
+        step3[31] = ADD_EPI16(step2[24], step1[31]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x16(
+            &step3[16], &step3[17], &step3[18], &step3[19],
+            &step3[20], &step3[21], &step3[22], &step3[23],
+            &step3[24], &step3[25], &step3[26], &step3[27],
+            &step3[28], &step3[29], &step3[30], &step3[31]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+
+      // Stage 4
+      {
+        step1[0] = ADD_EPI16(step3[ 3], step3[ 0]);
+        step1[1] = ADD_EPI16(step3[ 2], step3[ 1]);
+        step1[2] = SUB_EPI16(step3[ 1], step3[ 2]);
+        step1[3] = SUB_EPI16(step3[ 0], step3[ 3]);
+        step1[8] = ADD_EPI16(step3[11], step2[ 8]);
+        step1[9] = ADD_EPI16(step3[10], step2[ 9]);
+        step1[10] = SUB_EPI16(step2[ 9], step3[10]);
+        step1[11] = SUB_EPI16(step2[ 8], step3[11]);
+        step1[12] = SUB_EPI16(step2[15], step3[12]);
+        step1[13] = SUB_EPI16(step2[14], step3[13]);
+        step1[14] = ADD_EPI16(step3[13], step2[14]);
+        step1[15] = ADD_EPI16(step3[12], step2[15]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x16(
+            &step1[0], &step1[1], &step1[2], &step1[3],
+            &step1[4], &step1[5], &step1[6], &step1[7],
+            &step1[8], &step1[9], &step1[10], &step1[11],
+            &step1[12], &step1[13], &step1[14], &step1[15]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
+        const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
+        const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
+        const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
+        const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
+        const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
+        // dct_const_round_shift
+        const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
+        const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
+        const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
+        const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
+        // Combine
+        step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
+        step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x2(&step1[5], &step1[6]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
+        const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
+        const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
+        const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
+        const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
+        const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
+        const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
+        const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
+        const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
+        const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
+        const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
+        const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
+        const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
+        const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
+        const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
+        const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
+        const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
+        const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
+        const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
+        const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
+        const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
+        const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
+        const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
+        const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
+        // dct_const_round_shift
+        const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
+        const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
+        const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
+        const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
+        const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
+        const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
+        const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
+        const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
+        const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
+        const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
+        const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
+        const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
+        const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
+        const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
+        const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
+        const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
+        const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
+        const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
+        // Combine
+        step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
+        step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
+        step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
+        step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
+        step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
+        step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
+        step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
+        step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20],
+                                           &step1[21], &step1[26], &step1[27],
+                                           &step1[28], &step1[29]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      // Stage 5
+      {
+        step2[4] = ADD_EPI16(step1[5], step3[4]);
+        step2[5] = SUB_EPI16(step3[4], step1[5]);
+        step2[6] = SUB_EPI16(step3[7], step1[6]);
+        step2[7] = ADD_EPI16(step1[6], step3[7]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x4(&step2[4], &step2[5],
+                                           &step2[6], &step2[7]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
+        const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
+        const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
+        const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
+        const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
+        const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
+        const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
+        const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
+        const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
+        const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
+        const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
+        const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
+        // dct_const_round_shift
+        const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
+        const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
+        const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
+        const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
+        const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
+        const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
+        const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
+        const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
+        // Combine
+        out[ 0] = _mm_packs_epi32(out_00_6, out_00_7);
+        out[16] = _mm_packs_epi32(out_16_6, out_16_7);
+        out[ 8] = _mm_packs_epi32(out_08_6, out_08_7);
+        out[24] = _mm_packs_epi32(out_24_6, out_24_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x4(&out[0], &out[16],
+                                           &out[8], &out[24]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]);
+        const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]);
+        const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
+        const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
+        const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
+        const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
+        const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
+        const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
+        const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
+        const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
+        const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
+        const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
+        // dct_const_round_shift
+        const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
+        const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
+        const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
+        const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
+        const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
+        const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
+        const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
+        const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
+        const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
+        const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
+        // Combine
+        step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7);
+        step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
+        step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
+        step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x4(&step2[9], &step2[10],
+                                           &step2[13], &step2[14]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        step2[16] = ADD_EPI16(step1[19], step3[16]);
+        step2[17] = ADD_EPI16(step1[18], step3[17]);
+        step2[18] = SUB_EPI16(step3[17], step1[18]);
+        step2[19] = SUB_EPI16(step3[16], step1[19]);
+        step2[20] = SUB_EPI16(step3[23], step1[20]);
+        step2[21] = SUB_EPI16(step3[22], step1[21]);
+        step2[22] = ADD_EPI16(step1[21], step3[22]);
+        step2[23] = ADD_EPI16(step1[20], step3[23]);
+        step2[24] = ADD_EPI16(step1[27], step3[24]);
+        step2[25] = ADD_EPI16(step1[26], step3[25]);
+        step2[26] = SUB_EPI16(step3[25], step1[26]);
+        step2[27] = SUB_EPI16(step3[24], step1[27]);
+        step2[28] = SUB_EPI16(step3[31], step1[28]);
+        step2[29] = SUB_EPI16(step3[30], step1[29]);
+        step2[30] = ADD_EPI16(step1[29], step3[30]);
+        step2[31] = ADD_EPI16(step1[28], step3[31]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x16(
+            &step2[16], &step2[17], &step2[18], &step2[19],
+            &step2[20], &step2[21], &step2[22], &step2[23],
+            &step2[24], &step2[25], &step2[26], &step2[27],
+            &step2[28], &step2[29], &step2[30], &step2[31]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      // Stage 6
+      {
+        const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+        const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+        const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+        const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+        const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+        const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+        const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+        const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+        const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
+        const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
+        const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
+        const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
+        const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
+        const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
+        const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
+        const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
+        // dct_const_round_shift
+        const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
+        const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
+        const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
+        const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
+        const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
+        const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
+        const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
+        const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
+        // Combine
+        out[4] = _mm_packs_epi32(out_04_6, out_04_7);
+        out[20] = _mm_packs_epi32(out_20_6, out_20_7);
+        out[12] = _mm_packs_epi32(out_12_6, out_12_7);
+        out[28] = _mm_packs_epi32(out_28_6, out_28_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x4(&out[4], &out[20],
+                                           &out[12], &out[28]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        step3[8] = ADD_EPI16(step2[ 9], step1[ 8]);
+        step3[9] = SUB_EPI16(step1[ 8], step2[ 9]);
+        step3[10] = SUB_EPI16(step1[11], step2[10]);
+        step3[11] = ADD_EPI16(step2[10], step1[11]);
+        step3[12] = ADD_EPI16(step2[13], step1[12]);
+        step3[13] = SUB_EPI16(step1[12], step2[13]);
+        step3[14] = SUB_EPI16(step1[15], step2[14]);
+        step3[15] = ADD_EPI16(step2[14], step1[15]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10],
+                                           &step3[11], &step3[12], &step3[13],
+                                           &step3[14], &step3[15]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
+        const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
+        const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
+        const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
+        const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
+        const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
+        const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
+        const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
+        const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
+        const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
+        const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
+        const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
+        const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
+        const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
+        const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
+        const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
+        const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
+        const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
+        const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
+        const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
+        const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
+        const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
+        const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
+        const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
+        // dct_const_round_shift
+        const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
+        const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
+        const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
+        const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
+        const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
+        const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
+        const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
+        const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
+        const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
+        const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
+        const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
+        const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
+        const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
+        const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
+        const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
+        const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
+        const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
+        const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
+        // Combine
+        step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
+        step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
+        step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
+        step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
+        // Combine
+        step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
+        step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
+        step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
+        step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21],
+                                           &step3[22], &step3[25], &step3[26],
+                                           &step3[29], &step3[30]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      // Stage 7
+      {
+        const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]);
+        const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]);
+        const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]);
+        const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]);
+        const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
+        const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
+        const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
+        const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
+        const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
+        const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
+        const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
+        const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
+        const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
+        const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
+        const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
+        const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
+        const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
+        const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
+        const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
+        const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
+        const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
+        const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
+        const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
+        const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
+        // dct_const_round_shift
+        const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
+        const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
+        const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
+        const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
+        const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
+        const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
+        const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
+        const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
+        const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
+        const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
+        const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
+        const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
+        const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
+        const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
+        const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
+        const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
+        // Combine
+        out[ 2] = _mm_packs_epi32(out_02_6, out_02_7);
+        out[18] = _mm_packs_epi32(out_18_6, out_18_7);
+        out[10] = _mm_packs_epi32(out_10_6, out_10_7);
+        out[26] = _mm_packs_epi32(out_26_6, out_26_7);
+        out[ 6] = _mm_packs_epi32(out_06_6, out_06_7);
+        out[22] = _mm_packs_epi32(out_22_6, out_22_7);
+        out[14] = _mm_packs_epi32(out_14_6, out_14_7);
+        out[30] = _mm_packs_epi32(out_30_6, out_30_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
+                                           &out[26], &out[6], &out[22],
+                                           &out[14], &out[30]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        step1[16] = ADD_EPI16(step3[17], step2[16]);
+        step1[17] = SUB_EPI16(step2[16], step3[17]);
+        step1[18] = SUB_EPI16(step2[19], step3[18]);
+        step1[19] = ADD_EPI16(step3[18], step2[19]);
+        step1[20] = ADD_EPI16(step3[21], step2[20]);
+        step1[21] = SUB_EPI16(step2[20], step3[21]);
+        step1[22] = SUB_EPI16(step2[23], step3[22]);
+        step1[23] = ADD_EPI16(step3[22], step2[23]);
+        step1[24] = ADD_EPI16(step3[25], step2[24]);
+        step1[25] = SUB_EPI16(step2[24], step3[25]);
+        step1[26] = SUB_EPI16(step2[27], step3[26]);
+        step1[27] = ADD_EPI16(step3[26], step2[27]);
+        step1[28] = ADD_EPI16(step3[29], step2[28]);
+        step1[29] = SUB_EPI16(step2[28], step3[29]);
+        step1[30] = SUB_EPI16(step2[31], step3[30]);
+        step1[31] = ADD_EPI16(step3[30], step2[31]);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x16(
+            &step1[16], &step1[17], &step1[18], &step1[19],
+            &step1[20], &step1[21], &step1[22], &step1[23],
+            &step1[24], &step1[25], &step1[26], &step1[27],
+            &step1[28], &step1[29], &step1[30], &step1[31]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+             HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      // Final stage --- outputs indices are bit-reversed.
+      {
+        const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
+        const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
+        const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
+        const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
+        const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
+        const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
+        const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
+        const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
+        const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
+        const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
+        const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
+        const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
+        const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
+        const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
+        const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
+        const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
+        const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
+        const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
+        const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
+        const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
+        const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
+        const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
+        const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
+        const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
+        // dct_const_round_shift
+        const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
+        const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
+        const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
+        const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
+        const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
+        const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
+        const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
+        const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
+        const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
+        const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
+        const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
+        const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
+        const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
+        const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
+        const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
+        const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
+        // Combine
+        out[ 1] = _mm_packs_epi32(out_01_6, out_01_7);
+        out[17] = _mm_packs_epi32(out_17_6, out_17_7);
+        out[ 9] = _mm_packs_epi32(out_09_6, out_09_7);
+        out[25] = _mm_packs_epi32(out_25_6, out_25_7);
+        out[ 7] = _mm_packs_epi32(out_07_6, out_07_7);
+        out[23] = _mm_packs_epi32(out_23_6, out_23_7);
+        out[15] = _mm_packs_epi32(out_15_6, out_15_7);
+        out[31] = _mm_packs_epi32(out_31_6, out_31_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
+                                           &out[25], &out[7], &out[23],
+                                           &out[15], &out[31]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+      {
+        const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
+        const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
+        const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
+        const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
+        const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
+        const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
+        const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
+        const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
+        const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
+        const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
+        const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
+        const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
+        const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
+        const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
+        const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
+        const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
+        const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
+        const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
+        const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
+        const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
+        const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
+        const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
+        const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
+        const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
+        // dct_const_round_shift
+        const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
+        const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
+        const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
+        const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
+        const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
+        const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
+        const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
+        const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
+        const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
+        const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
+        const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
+        const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
+        const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
+        const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
+        const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
+        const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
+        const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
+        const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
+        // Combine
+        out[ 5] = _mm_packs_epi32(out_05_6, out_05_7);
+        out[21] = _mm_packs_epi32(out_21_6, out_21_7);
+        out[13] = _mm_packs_epi32(out_13_6, out_13_7);
+        out[29] = _mm_packs_epi32(out_29_6, out_29_7);
+        out[ 3] = _mm_packs_epi32(out_03_6, out_03_7);
+        out[19] = _mm_packs_epi32(out_19_6, out_19_7);
+        out[11] = _mm_packs_epi32(out_11_6, out_11_7);
+        out[27] = _mm_packs_epi32(out_27_6, out_27_7);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
+                                           &out[29], &out[3], &out[19],
+                                           &out[11], &out[27]);
+        if (overflow) {
+          if (pass == 0)
+            HIGH_FDCT32x32_2D_C(input, output_org, stride);
+          else
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+      }
+#if FDCT32x32_HIGH_PRECISION
+      } else {
+        __m128i lstep1[64], lstep2[64], lstep3[64];
+        __m128i u[32], v[32], sign[16];
+        const __m128i K32One = _mm_set_epi32(1, 1, 1, 1);
+        // start using 32-bit operations
+        // stage 3
+        {
+          // expanding to 32-bit length priori to addition operations
+          lstep2[ 0] = _mm_unpacklo_epi16(step2[ 0], kZero);
+          lstep2[ 1] = _mm_unpackhi_epi16(step2[ 0], kZero);
+          lstep2[ 2] = _mm_unpacklo_epi16(step2[ 1], kZero);
+          lstep2[ 3] = _mm_unpackhi_epi16(step2[ 1], kZero);
+          lstep2[ 4] = _mm_unpacklo_epi16(step2[ 2], kZero);
+          lstep2[ 5] = _mm_unpackhi_epi16(step2[ 2], kZero);
+          lstep2[ 6] = _mm_unpacklo_epi16(step2[ 3], kZero);
+          lstep2[ 7] = _mm_unpackhi_epi16(step2[ 3], kZero);
+          lstep2[ 8] = _mm_unpacklo_epi16(step2[ 4], kZero);
+          lstep2[ 9] = _mm_unpackhi_epi16(step2[ 4], kZero);
+          lstep2[10] = _mm_unpacklo_epi16(step2[ 5], kZero);
+          lstep2[11] = _mm_unpackhi_epi16(step2[ 5], kZero);
+          lstep2[12] = _mm_unpacklo_epi16(step2[ 6], kZero);
+          lstep2[13] = _mm_unpackhi_epi16(step2[ 6], kZero);
+          lstep2[14] = _mm_unpacklo_epi16(step2[ 7], kZero);
+          lstep2[15] = _mm_unpackhi_epi16(step2[ 7], kZero);
+          lstep2[ 0] = _mm_madd_epi16(lstep2[ 0], kOne);
+          lstep2[ 1] = _mm_madd_epi16(lstep2[ 1], kOne);
+          lstep2[ 2] = _mm_madd_epi16(lstep2[ 2], kOne);
+          lstep2[ 3] = _mm_madd_epi16(lstep2[ 3], kOne);
+          lstep2[ 4] = _mm_madd_epi16(lstep2[ 4], kOne);
+          lstep2[ 5] = _mm_madd_epi16(lstep2[ 5], kOne);
+          lstep2[ 6] = _mm_madd_epi16(lstep2[ 6], kOne);
+          lstep2[ 7] = _mm_madd_epi16(lstep2[ 7], kOne);
+          lstep2[ 8] = _mm_madd_epi16(lstep2[ 8], kOne);
+          lstep2[ 9] = _mm_madd_epi16(lstep2[ 9], kOne);
+          lstep2[10] = _mm_madd_epi16(lstep2[10], kOne);
+          lstep2[11] = _mm_madd_epi16(lstep2[11], kOne);
+          lstep2[12] = _mm_madd_epi16(lstep2[12], kOne);
+          lstep2[13] = _mm_madd_epi16(lstep2[13], kOne);
+          lstep2[14] = _mm_madd_epi16(lstep2[14], kOne);
+          lstep2[15] = _mm_madd_epi16(lstep2[15], kOne);
+
+          lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]);
+          lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]);
+          lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]);
+          lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]);
+          lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]);
+          lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]);
+          lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]);
+          lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]);
+          lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]);
+          lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]);
+          lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]);
+          lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]);
+          lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]);
+          lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]);
+          lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]);
+          lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]);
+        }
+        {
+          const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
+          const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
+          const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
+          const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
+          const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
+          const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
+          const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
+          const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
+          const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
+          const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
+          const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
+          const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
+          // dct_const_round_shift
+          const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+          const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+          const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+          lstep3[20] = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
+          lstep3[21] = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
+          lstep3[22] = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
+          lstep3[23] = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
+          lstep3[24] = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
+          lstep3[25] = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
+          lstep3[26] = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
+          lstep3[27] = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
+        }
+        {
+          lstep2[40] = _mm_unpacklo_epi16(step2[20], kZero);
+          lstep2[41] = _mm_unpackhi_epi16(step2[20], kZero);
+          lstep2[42] = _mm_unpacklo_epi16(step2[21], kZero);
+          lstep2[43] = _mm_unpackhi_epi16(step2[21], kZero);
+          lstep2[44] = _mm_unpacklo_epi16(step2[22], kZero);
+          lstep2[45] = _mm_unpackhi_epi16(step2[22], kZero);
+          lstep2[46] = _mm_unpacklo_epi16(step2[23], kZero);
+          lstep2[47] = _mm_unpackhi_epi16(step2[23], kZero);
+          lstep2[48] = _mm_unpacklo_epi16(step2[24], kZero);
+          lstep2[49] = _mm_unpackhi_epi16(step2[24], kZero);
+          lstep2[50] = _mm_unpacklo_epi16(step2[25], kZero);
+          lstep2[51] = _mm_unpackhi_epi16(step2[25], kZero);
+          lstep2[52] = _mm_unpacklo_epi16(step2[26], kZero);
+          lstep2[53] = _mm_unpackhi_epi16(step2[26], kZero);
+          lstep2[54] = _mm_unpacklo_epi16(step2[27], kZero);
+          lstep2[55] = _mm_unpackhi_epi16(step2[27], kZero);
+          lstep2[40] = _mm_madd_epi16(lstep2[40], kOne);
+          lstep2[41] = _mm_madd_epi16(lstep2[41], kOne);
+          lstep2[42] = _mm_madd_epi16(lstep2[42], kOne);
+          lstep2[43] = _mm_madd_epi16(lstep2[43], kOne);
+          lstep2[44] = _mm_madd_epi16(lstep2[44], kOne);
+          lstep2[45] = _mm_madd_epi16(lstep2[45], kOne);
+          lstep2[46] = _mm_madd_epi16(lstep2[46], kOne);
+          lstep2[47] = _mm_madd_epi16(lstep2[47], kOne);
+          lstep2[48] = _mm_madd_epi16(lstep2[48], kOne);
+          lstep2[49] = _mm_madd_epi16(lstep2[49], kOne);
+          lstep2[50] = _mm_madd_epi16(lstep2[50], kOne);
+          lstep2[51] = _mm_madd_epi16(lstep2[51], kOne);
+          lstep2[52] = _mm_madd_epi16(lstep2[52], kOne);
+          lstep2[53] = _mm_madd_epi16(lstep2[53], kOne);
+          lstep2[54] = _mm_madd_epi16(lstep2[54], kOne);
+          lstep2[55] = _mm_madd_epi16(lstep2[55], kOne);
+
+          lstep1[32] = _mm_unpacklo_epi16(step1[16], kZero);
+          lstep1[33] = _mm_unpackhi_epi16(step1[16], kZero);
+          lstep1[34] = _mm_unpacklo_epi16(step1[17], kZero);
+          lstep1[35] = _mm_unpackhi_epi16(step1[17], kZero);
+          lstep1[36] = _mm_unpacklo_epi16(step1[18], kZero);
+          lstep1[37] = _mm_unpackhi_epi16(step1[18], kZero);
+          lstep1[38] = _mm_unpacklo_epi16(step1[19], kZero);
+          lstep1[39] = _mm_unpackhi_epi16(step1[19], kZero);
+          lstep1[56] = _mm_unpacklo_epi16(step1[28], kZero);
+          lstep1[57] = _mm_unpackhi_epi16(step1[28], kZero);
+          lstep1[58] = _mm_unpacklo_epi16(step1[29], kZero);
+          lstep1[59] = _mm_unpackhi_epi16(step1[29], kZero);
+          lstep1[60] = _mm_unpacklo_epi16(step1[30], kZero);
+          lstep1[61] = _mm_unpackhi_epi16(step1[30], kZero);
+          lstep1[62] = _mm_unpacklo_epi16(step1[31], kZero);
+          lstep1[63] = _mm_unpackhi_epi16(step1[31], kZero);
+          lstep1[32] = _mm_madd_epi16(lstep1[32], kOne);
+          lstep1[33] = _mm_madd_epi16(lstep1[33], kOne);
+          lstep1[34] = _mm_madd_epi16(lstep1[34], kOne);
+          lstep1[35] = _mm_madd_epi16(lstep1[35], kOne);
+          lstep1[36] = _mm_madd_epi16(lstep1[36], kOne);
+          lstep1[37] = _mm_madd_epi16(lstep1[37], kOne);
+          lstep1[38] = _mm_madd_epi16(lstep1[38], kOne);
+          lstep1[39] = _mm_madd_epi16(lstep1[39], kOne);
+          lstep1[56] = _mm_madd_epi16(lstep1[56], kOne);
+          lstep1[57] = _mm_madd_epi16(lstep1[57], kOne);
+          lstep1[58] = _mm_madd_epi16(lstep1[58], kOne);
+          lstep1[59] = _mm_madd_epi16(lstep1[59], kOne);
+          lstep1[60] = _mm_madd_epi16(lstep1[60], kOne);
+          lstep1[61] = _mm_madd_epi16(lstep1[61], kOne);
+          lstep1[62] = _mm_madd_epi16(lstep1[62], kOne);
+          lstep1[63] = _mm_madd_epi16(lstep1[63], kOne);
+
+          lstep3[32] = _mm_add_epi32(lstep2[46], lstep1[32]);
+          lstep3[33] = _mm_add_epi32(lstep2[47], lstep1[33]);
+
+          lstep3[34] = _mm_add_epi32(lstep2[44], lstep1[34]);
+          lstep3[35] = _mm_add_epi32(lstep2[45], lstep1[35]);
+          lstep3[36] = _mm_add_epi32(lstep2[42], lstep1[36]);
+          lstep3[37] = _mm_add_epi32(lstep2[43], lstep1[37]);
+          lstep3[38] = _mm_add_epi32(lstep2[40], lstep1[38]);
+          lstep3[39] = _mm_add_epi32(lstep2[41], lstep1[39]);
+          lstep3[40] = _mm_sub_epi32(lstep1[38], lstep2[40]);
+          lstep3[41] = _mm_sub_epi32(lstep1[39], lstep2[41]);
+          lstep3[42] = _mm_sub_epi32(lstep1[36], lstep2[42]);
+          lstep3[43] = _mm_sub_epi32(lstep1[37], lstep2[43]);
+          lstep3[44] = _mm_sub_epi32(lstep1[34], lstep2[44]);
+          lstep3[45] = _mm_sub_epi32(lstep1[35], lstep2[45]);
+          lstep3[46] = _mm_sub_epi32(lstep1[32], lstep2[46]);
+          lstep3[47] = _mm_sub_epi32(lstep1[33], lstep2[47]);
+          lstep3[48] = _mm_sub_epi32(lstep1[62], lstep2[48]);
+          lstep3[49] = _mm_sub_epi32(lstep1[63], lstep2[49]);
+          lstep3[50] = _mm_sub_epi32(lstep1[60], lstep2[50]);
+          lstep3[51] = _mm_sub_epi32(lstep1[61], lstep2[51]);
+          lstep3[52] = _mm_sub_epi32(lstep1[58], lstep2[52]);
+          lstep3[53] = _mm_sub_epi32(lstep1[59], lstep2[53]);
+          lstep3[54] = _mm_sub_epi32(lstep1[56], lstep2[54]);
+          lstep3[55] = _mm_sub_epi32(lstep1[57], lstep2[55]);
+          lstep3[56] = _mm_add_epi32(lstep2[54], lstep1[56]);
+          lstep3[57] = _mm_add_epi32(lstep2[55], lstep1[57]);
+          lstep3[58] = _mm_add_epi32(lstep2[52], lstep1[58]);
+          lstep3[59] = _mm_add_epi32(lstep2[53], lstep1[59]);
+          lstep3[60] = _mm_add_epi32(lstep2[50], lstep1[60]);
+          lstep3[61] = _mm_add_epi32(lstep2[51], lstep1[61]);
+          lstep3[62] = _mm_add_epi32(lstep2[48], lstep1[62]);
+          lstep3[63] = _mm_add_epi32(lstep2[49], lstep1[63]);
+        }
+
+        // stage 4
+        {
+          // expanding to 32-bit length priori to addition operations
+          lstep2[16] = _mm_unpacklo_epi16(step2[ 8], kZero);
+          lstep2[17] = _mm_unpackhi_epi16(step2[ 8], kZero);
+          lstep2[18] = _mm_unpacklo_epi16(step2[ 9], kZero);
+          lstep2[19] = _mm_unpackhi_epi16(step2[ 9], kZero);
+          lstep2[28] = _mm_unpacklo_epi16(step2[14], kZero);
+          lstep2[29] = _mm_unpackhi_epi16(step2[14], kZero);
+          lstep2[30] = _mm_unpacklo_epi16(step2[15], kZero);
+          lstep2[31] = _mm_unpackhi_epi16(step2[15], kZero);
+          lstep2[16] = _mm_madd_epi16(lstep2[16], kOne);
+          lstep2[17] = _mm_madd_epi16(lstep2[17], kOne);
+          lstep2[18] = _mm_madd_epi16(lstep2[18], kOne);
+          lstep2[19] = _mm_madd_epi16(lstep2[19], kOne);
+          lstep2[28] = _mm_madd_epi16(lstep2[28], kOne);
+          lstep2[29] = _mm_madd_epi16(lstep2[29], kOne);
+          lstep2[30] = _mm_madd_epi16(lstep2[30], kOne);
+          lstep2[31] = _mm_madd_epi16(lstep2[31], kOne);
+
+          lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]);
+          lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]);
+          lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]);
+          lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]);
+          lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]);
+          lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]);
+          lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]);
+          lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]);
+          lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]);
+          lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]);
+          lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]);
+          lstep1[19] = _mm_add_epi32(lstep3[21], lstep2[19]);
+          lstep1[20] = _mm_sub_epi32(lstep2[18], lstep3[20]);
+          lstep1[21] = _mm_sub_epi32(lstep2[19], lstep3[21]);
+          lstep1[22] = _mm_sub_epi32(lstep2[16], lstep3[22]);
+          lstep1[23] = _mm_sub_epi32(lstep2[17], lstep3[23]);
+          lstep1[24] = _mm_sub_epi32(lstep2[30], lstep3[24]);
+          lstep1[25] = _mm_sub_epi32(lstep2[31], lstep3[25]);
+          lstep1[26] = _mm_sub_epi32(lstep2[28], lstep3[26]);
+          lstep1[27] = _mm_sub_epi32(lstep2[29], lstep3[27]);
+          lstep1[28] = _mm_add_epi32(lstep3[26], lstep2[28]);
+          lstep1[29] = _mm_add_epi32(lstep3[27], lstep2[29]);
+          lstep1[30] = _mm_add_epi32(lstep3[24], lstep2[30]);
+          lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]);
+        }
+        {
+        // to be continued...
+        //
+        const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
+        const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
+
+        u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]);
+        u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]);
+        u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]);
+        u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]);
+
+        // TODO(jingning): manually inline k_madd_epi32_ to further hide
+        // instruction latency.
+        v[0] = k_madd_epi32(u[0], k32_p16_m16);
+        v[1] = k_madd_epi32(u[1], k32_p16_m16);
+        v[2] = k_madd_epi32(u[2], k32_p16_m16);
+        v[3] = k_madd_epi32(u[3], k32_p16_m16);
+        v[4] = k_madd_epi32(u[0], k32_p16_p16);
+        v[5] = k_madd_epi32(u[1], k32_p16_p16);
+        v[6] = k_madd_epi32(u[2], k32_p16_p16);
+        v[7] = k_madd_epi32(u[3], k32_p16_p16);
+#if DCT_HIGH_BIT_DEPTH
+        overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3],
+                                            &v[4], &v[5], &v[6], &v[7], &kZero);
+        if (overflow) {
+          HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+          return;
+        }
+#endif  // DCT_HIGH_BIT_DEPTH
+        u[0] = k_packs_epi64(v[0], v[1]);
+        u[1] = k_packs_epi64(v[2], v[3]);
+        u[2] = k_packs_epi64(v[4], v[5]);
+        u[3] = k_packs_epi64(v[6], v[7]);
+
+        v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+        v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+        v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+        v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+
+        lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+        lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+        lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+        lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+        }
+        {
+          const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
+          const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
+          const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
+
+          u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]);
+          u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]);
+          u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]);
+          u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]);
+          u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]);
+          u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]);
+          u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]);
+          u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]);
+          u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]);
+          u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]);
+          u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]);
+          u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]);
+          u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]);
+          u[13] = _mm_unpackhi_epi32(lstep3[42], lstep3[52]);
+          u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]);
+          u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]);
+
+          v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24);
+          v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24);
+          v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24);
+          v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24);
+          v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24);
+          v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24);
+          v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24);
+          v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24);
+          v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08);
+          v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08);
+          v[10] = k_madd_epi32(u[10], k32_m24_m08);
+          v[11] = k_madd_epi32(u[11], k32_m24_m08);
+          v[12] = k_madd_epi32(u[12], k32_m24_m08);
+          v[13] = k_madd_epi32(u[13], k32_m24_m08);
+          v[14] = k_madd_epi32(u[14], k32_m24_m08);
+          v[15] = k_madd_epi32(u[15], k32_m24_m08);
+          v[16] = k_madd_epi32(u[12], k32_m08_p24);
+          v[17] = k_madd_epi32(u[13], k32_m08_p24);
+          v[18] = k_madd_epi32(u[14], k32_m08_p24);
+          v[19] = k_madd_epi32(u[15], k32_m08_p24);
+          v[20] = k_madd_epi32(u[ 8], k32_m08_p24);
+          v[21] = k_madd_epi32(u[ 9], k32_m08_p24);
+          v[22] = k_madd_epi32(u[10], k32_m08_p24);
+          v[23] = k_madd_epi32(u[11], k32_m08_p24);
+          v[24] = k_madd_epi32(u[ 4], k32_p24_p08);
+          v[25] = k_madd_epi32(u[ 5], k32_p24_p08);
+          v[26] = k_madd_epi32(u[ 6], k32_p24_p08);
+          v[27] = k_madd_epi32(u[ 7], k32_p24_p08);
+          v[28] = k_madd_epi32(u[ 0], k32_p24_p08);
+          v[29] = k_madd_epi32(u[ 1], k32_p24_p08);
+          v[30] = k_madd_epi32(u[ 2], k32_p24_p08);
+          v[31] = k_madd_epi32(u[ 3], k32_p24_p08);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_32(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64(v[10], v[11]);
+          u[ 6] = k_packs_epi64(v[12], v[13]);
+          u[ 7] = k_packs_epi64(v[14], v[15]);
+          u[ 8] = k_packs_epi64(v[16], v[17]);
+          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[10] = k_packs_epi64(v[20], v[21]);
+          u[11] = k_packs_epi64(v[22], v[23]);
+          u[12] = k_packs_epi64(v[24], v[25]);
+          u[13] = k_packs_epi64(v[26], v[27]);
+          u[14] = k_packs_epi64(v[28], v[29]);
+          u[15] = k_packs_epi64(v[30], v[31]);
+
+          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+          lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+          lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+          lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+          lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+          lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+          lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+          lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+          lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+          lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+          lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+          lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+          lstep1[57] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+          lstep1[58] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+          lstep1[59] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+        }
+        // stage 5
+        {
+          lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]);
+          lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]);
+          lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]);
+          lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]);
+          lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]);
+          lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]);
+          lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]);
+          lstep2[15] = _mm_add_epi32(lstep1[13], lstep3[15]);
+        }
+        {
+          const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64);
+          const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64);
+          const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
+          const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
+
+          u[0] = _mm_unpacklo_epi32(lstep1[0], lstep1[2]);
+          u[1] = _mm_unpackhi_epi32(lstep1[0], lstep1[2]);
+          u[2] = _mm_unpacklo_epi32(lstep1[1], lstep1[3]);
+          u[3] = _mm_unpackhi_epi32(lstep1[1], lstep1[3]);
+          u[4] = _mm_unpacklo_epi32(lstep1[4], lstep1[6]);
+          u[5] = _mm_unpackhi_epi32(lstep1[4], lstep1[6]);
+          u[6] = _mm_unpacklo_epi32(lstep1[5], lstep1[7]);
+          u[7] = _mm_unpackhi_epi32(lstep1[5], lstep1[7]);
+
+          // TODO(jingning): manually inline k_madd_epi32_ to further hide
+          // instruction latency.
+          v[ 0] = k_madd_epi32(u[0], k32_p16_p16);
+          v[ 1] = k_madd_epi32(u[1], k32_p16_p16);
+          v[ 2] = k_madd_epi32(u[2], k32_p16_p16);
+          v[ 3] = k_madd_epi32(u[3], k32_p16_p16);
+          v[ 4] = k_madd_epi32(u[0], k32_p16_m16);
+          v[ 5] = k_madd_epi32(u[1], k32_p16_m16);
+          v[ 6] = k_madd_epi32(u[2], k32_p16_m16);
+          v[ 7] = k_madd_epi32(u[3], k32_p16_m16);
+          v[ 8] = k_madd_epi32(u[4], k32_p24_p08);
+          v[ 9] = k_madd_epi32(u[5], k32_p24_p08);
+          v[10] = k_madd_epi32(u[6], k32_p24_p08);
+          v[11] = k_madd_epi32(u[7], k32_p24_p08);
+          v[12] = k_madd_epi32(u[4], k32_m08_p24);
+          v[13] = k_madd_epi32(u[5], k32_m08_p24);
+          v[14] = k_madd_epi32(u[6], k32_m08_p24);
+          v[15] = k_madd_epi32(u[7], k32_m08_p24);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_16(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+
+          sign[0] = _mm_cmplt_epi32(u[0], kZero);
+          sign[1] = _mm_cmplt_epi32(u[1], kZero);
+          sign[2] = _mm_cmplt_epi32(u[2], kZero);
+          sign[3] = _mm_cmplt_epi32(u[3], kZero);
+          sign[4] = _mm_cmplt_epi32(u[4], kZero);
+          sign[5] = _mm_cmplt_epi32(u[5], kZero);
+          sign[6] = _mm_cmplt_epi32(u[6], kZero);
+          sign[7] = _mm_cmplt_epi32(u[7], kZero);
+
+          u[0] = _mm_sub_epi32(u[0], sign[0]);
+          u[1] = _mm_sub_epi32(u[1], sign[1]);
+          u[2] = _mm_sub_epi32(u[2], sign[2]);
+          u[3] = _mm_sub_epi32(u[3], sign[3]);
+          u[4] = _mm_sub_epi32(u[4], sign[4]);
+          u[5] = _mm_sub_epi32(u[5], sign[5]);
+          u[6] = _mm_sub_epi32(u[6], sign[6]);
+          u[7] = _mm_sub_epi32(u[7], sign[7]);
+
+          u[0] = _mm_add_epi32(u[0], K32One);
+          u[1] = _mm_add_epi32(u[1], K32One);
+          u[2] = _mm_add_epi32(u[2], K32One);
+          u[3] = _mm_add_epi32(u[3], K32One);
+          u[4] = _mm_add_epi32(u[4], K32One);
+          u[5] = _mm_add_epi32(u[5], K32One);
+          u[6] = _mm_add_epi32(u[6], K32One);
+          u[7] = _mm_add_epi32(u[7], K32One);
+
+          u[0] = _mm_srai_epi32(u[0], 2);
+          u[1] = _mm_srai_epi32(u[1], 2);
+          u[2] = _mm_srai_epi32(u[2], 2);
+          u[3] = _mm_srai_epi32(u[3], 2);
+          u[4] = _mm_srai_epi32(u[4], 2);
+          u[5] = _mm_srai_epi32(u[5], 2);
+          u[6] = _mm_srai_epi32(u[6], 2);
+          u[7] = _mm_srai_epi32(u[7], 2);
+
+          // Combine
+          out[ 0] = _mm_packs_epi32(u[0], u[1]);
+          out[16] = _mm_packs_epi32(u[2], u[3]);
+          out[ 8] = _mm_packs_epi32(u[4], u[5]);
+          out[24] = _mm_packs_epi32(u[6], u[7]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x4(&out[0], &out[16],
+                                             &out[8], &out[24]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+        {
+          const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64);
+          const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64);
+          const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64);
+
+          u[0] = _mm_unpacklo_epi32(lstep1[18], lstep1[28]);
+          u[1] = _mm_unpackhi_epi32(lstep1[18], lstep1[28]);
+          u[2] = _mm_unpacklo_epi32(lstep1[19], lstep1[29]);
+          u[3] = _mm_unpackhi_epi32(lstep1[19], lstep1[29]);
+          u[4] = _mm_unpacklo_epi32(lstep1[20], lstep1[26]);
+          u[5] = _mm_unpackhi_epi32(lstep1[20], lstep1[26]);
+          u[6] = _mm_unpacklo_epi32(lstep1[21], lstep1[27]);
+          u[7] = _mm_unpackhi_epi32(lstep1[21], lstep1[27]);
+
+          v[0] = k_madd_epi32(u[0], k32_m08_p24);
+          v[1] = k_madd_epi32(u[1], k32_m08_p24);
+          v[2] = k_madd_epi32(u[2], k32_m08_p24);
+          v[3] = k_madd_epi32(u[3], k32_m08_p24);
+          v[4] = k_madd_epi32(u[4], k32_m24_m08);
+          v[5] = k_madd_epi32(u[5], k32_m24_m08);
+          v[6] = k_madd_epi32(u[6], k32_m24_m08);
+          v[7] = k_madd_epi32(u[7], k32_m24_m08);
+          v[ 8] = k_madd_epi32(u[4], k32_m08_p24);
+          v[ 9] = k_madd_epi32(u[5], k32_m08_p24);
+          v[10] = k_madd_epi32(u[6], k32_m08_p24);
+          v[11] = k_madd_epi32(u[7], k32_m08_p24);
+          v[12] = k_madd_epi32(u[0], k32_p24_p08);
+          v[13] = k_madd_epi32(u[1], k32_p24_p08);
+          v[14] = k_madd_epi32(u[2], k32_p24_p08);
+          v[15] = k_madd_epi32(u[3], k32_p24_p08);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_16(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+
+          u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+          lstep2[18] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+          lstep2[19] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+          lstep2[20] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+          lstep2[21] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+          lstep2[26] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+          lstep2[27] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+          lstep2[28] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+          lstep2[29] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+        }
+        {
+          lstep2[32] = _mm_add_epi32(lstep1[38], lstep3[32]);
+          lstep2[33] = _mm_add_epi32(lstep1[39], lstep3[33]);
+          lstep2[34] = _mm_add_epi32(lstep1[36], lstep3[34]);
+          lstep2[35] = _mm_add_epi32(lstep1[37], lstep3[35]);
+          lstep2[36] = _mm_sub_epi32(lstep3[34], lstep1[36]);
+          lstep2[37] = _mm_sub_epi32(lstep3[35], lstep1[37]);
+          lstep2[38] = _mm_sub_epi32(lstep3[32], lstep1[38]);
+          lstep2[39] = _mm_sub_epi32(lstep3[33], lstep1[39]);
+          lstep2[40] = _mm_sub_epi32(lstep3[46], lstep1[40]);
+          lstep2[41] = _mm_sub_epi32(lstep3[47], lstep1[41]);
+          lstep2[42] = _mm_sub_epi32(lstep3[44], lstep1[42]);
+          lstep2[43] = _mm_sub_epi32(lstep3[45], lstep1[43]);
+          lstep2[44] = _mm_add_epi32(lstep1[42], lstep3[44]);
+          lstep2[45] = _mm_add_epi32(lstep1[43], lstep3[45]);
+          lstep2[46] = _mm_add_epi32(lstep1[40], lstep3[46]);
+          lstep2[47] = _mm_add_epi32(lstep1[41], lstep3[47]);
+          lstep2[48] = _mm_add_epi32(lstep1[54], lstep3[48]);
+          lstep2[49] = _mm_add_epi32(lstep1[55], lstep3[49]);
+          lstep2[50] = _mm_add_epi32(lstep1[52], lstep3[50]);
+          lstep2[51] = _mm_add_epi32(lstep1[53], lstep3[51]);
+          lstep2[52] = _mm_sub_epi32(lstep3[50], lstep1[52]);
+          lstep2[53] = _mm_sub_epi32(lstep3[51], lstep1[53]);
+          lstep2[54] = _mm_sub_epi32(lstep3[48], lstep1[54]);
+          lstep2[55] = _mm_sub_epi32(lstep3[49], lstep1[55]);
+          lstep2[56] = _mm_sub_epi32(lstep3[62], lstep1[56]);
+          lstep2[57] = _mm_sub_epi32(lstep3[63], lstep1[57]);
+          lstep2[58] = _mm_sub_epi32(lstep3[60], lstep1[58]);
+          lstep2[59] = _mm_sub_epi32(lstep3[61], lstep1[59]);
+          lstep2[60] = _mm_add_epi32(lstep1[58], lstep3[60]);
+          lstep2[61] = _mm_add_epi32(lstep1[59], lstep3[61]);
+          lstep2[62] = _mm_add_epi32(lstep1[56], lstep3[62]);
+          lstep2[63] = _mm_add_epi32(lstep1[57], lstep3[63]);
+        }
+        // stage 6
+        {
+          const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
+          const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
+          const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
+          const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
+
+          u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
+          u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
+          u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
+          u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+          u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
+          u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
+          u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
+          u[7] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
+          u[8] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]);
+          u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]);
+          u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]);
+          u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]);
+          u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]);
+          u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]);
+          u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]);
+          u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]);
+
+          v[0] = k_madd_epi32(u[0], k32_p28_p04);
+          v[1] = k_madd_epi32(u[1], k32_p28_p04);
+          v[2] = k_madd_epi32(u[2], k32_p28_p04);
+          v[3] = k_madd_epi32(u[3], k32_p28_p04);
+          v[4] = k_madd_epi32(u[4], k32_p12_p20);
+          v[5] = k_madd_epi32(u[5], k32_p12_p20);
+          v[6] = k_madd_epi32(u[6], k32_p12_p20);
+          v[7] = k_madd_epi32(u[7], k32_p12_p20);
+          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
+          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+          v[10] = k_madd_epi32(u[10], k32_m20_p12);
+          v[11] = k_madd_epi32(u[11], k32_m20_p12);
+          v[12] = k_madd_epi32(u[12], k32_m04_p28);
+          v[13] = k_madd_epi32(u[13], k32_m04_p28);
+          v[14] = k_madd_epi32(u[14], k32_m04_p28);
+          v[15] = k_madd_epi32(u[15], k32_m04_p28);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_16(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[0] = k_packs_epi64(v[0], v[1]);
+          u[1] = k_packs_epi64(v[2], v[3]);
+          u[2] = k_packs_epi64(v[4], v[5]);
+          u[3] = k_packs_epi64(v[6], v[7]);
+          u[4] = k_packs_epi64(v[8], v[9]);
+          u[5] = k_packs_epi64(v[10], v[11]);
+          u[6] = k_packs_epi64(v[12], v[13]);
+          u[7] = k_packs_epi64(v[14], v[15]);
+
+          v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+          v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+          v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+          v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+          v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+          v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+          v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+          v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+          u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+          u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+          u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+          u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+          u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+          u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+          u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+          u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+
+          sign[0] = _mm_cmplt_epi32(u[0], kZero);
+          sign[1] = _mm_cmplt_epi32(u[1], kZero);
+          sign[2] = _mm_cmplt_epi32(u[2], kZero);
+          sign[3] = _mm_cmplt_epi32(u[3], kZero);
+          sign[4] = _mm_cmplt_epi32(u[4], kZero);
+          sign[5] = _mm_cmplt_epi32(u[5], kZero);
+          sign[6] = _mm_cmplt_epi32(u[6], kZero);
+          sign[7] = _mm_cmplt_epi32(u[7], kZero);
+
+          u[0] = _mm_sub_epi32(u[0], sign[0]);
+          u[1] = _mm_sub_epi32(u[1], sign[1]);
+          u[2] = _mm_sub_epi32(u[2], sign[2]);
+          u[3] = _mm_sub_epi32(u[3], sign[3]);
+          u[4] = _mm_sub_epi32(u[4], sign[4]);
+          u[5] = _mm_sub_epi32(u[5], sign[5]);
+          u[6] = _mm_sub_epi32(u[6], sign[6]);
+          u[7] = _mm_sub_epi32(u[7], sign[7]);
+
+          u[0] = _mm_add_epi32(u[0], K32One);
+          u[1] = _mm_add_epi32(u[1], K32One);
+          u[2] = _mm_add_epi32(u[2], K32One);
+          u[3] = _mm_add_epi32(u[3], K32One);
+          u[4] = _mm_add_epi32(u[4], K32One);
+          u[5] = _mm_add_epi32(u[5], K32One);
+          u[6] = _mm_add_epi32(u[6], K32One);
+          u[7] = _mm_add_epi32(u[7], K32One);
+
+          u[0] = _mm_srai_epi32(u[0], 2);
+          u[1] = _mm_srai_epi32(u[1], 2);
+          u[2] = _mm_srai_epi32(u[2], 2);
+          u[3] = _mm_srai_epi32(u[3], 2);
+          u[4] = _mm_srai_epi32(u[4], 2);
+          u[5] = _mm_srai_epi32(u[5], 2);
+          u[6] = _mm_srai_epi32(u[6], 2);
+          u[7] = _mm_srai_epi32(u[7], 2);
+
+          out[ 4] = _mm_packs_epi32(u[0], u[1]);
+          out[20] = _mm_packs_epi32(u[2], u[3]);
+          out[12] = _mm_packs_epi32(u[4], u[5]);
+          out[28] = _mm_packs_epi32(u[6], u[7]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x4(&out[4], &out[20],
+                                             &out[12], &out[28]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+        {
+          lstep3[16] = _mm_add_epi32(lstep2[18], lstep1[16]);
+          lstep3[17] = _mm_add_epi32(lstep2[19], lstep1[17]);
+          lstep3[18] = _mm_sub_epi32(lstep1[16], lstep2[18]);
+          lstep3[19] = _mm_sub_epi32(lstep1[17], lstep2[19]);
+          lstep3[20] = _mm_sub_epi32(lstep1[22], lstep2[20]);
+          lstep3[21] = _mm_sub_epi32(lstep1[23], lstep2[21]);
+          lstep3[22] = _mm_add_epi32(lstep2[20], lstep1[22]);
+          lstep3[23] = _mm_add_epi32(lstep2[21], lstep1[23]);
+          lstep3[24] = _mm_add_epi32(lstep2[26], lstep1[24]);
+          lstep3[25] = _mm_add_epi32(lstep2[27], lstep1[25]);
+          lstep3[26] = _mm_sub_epi32(lstep1[24], lstep2[26]);
+          lstep3[27] = _mm_sub_epi32(lstep1[25], lstep2[27]);
+          lstep3[28] = _mm_sub_epi32(lstep1[30], lstep2[28]);
+          lstep3[29] = _mm_sub_epi32(lstep1[31], lstep2[29]);
+          lstep3[30] = _mm_add_epi32(lstep2[28], lstep1[30]);
+          lstep3[31] = _mm_add_epi32(lstep2[29], lstep1[31]);
+        }
+        {
+          const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64);
+          const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64);
+          const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64);
+          const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64,
+                                                     -cospi_20_64);
+          const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64);
+          const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64);
+
+          u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]);
+          u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]);
+          u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]);
+          u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]);
+          u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]);
+          u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]);
+          u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]);
+          u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]);
+          u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]);
+          u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]);
+          u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]);
+          u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]);
+          u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]);
+          u[13] = _mm_unpackhi_epi32(lstep2[44], lstep2[50]);
+          u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]);
+          u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]);
+
+          v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28);
+          v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28);
+          v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28);
+          v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28);
+          v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04);
+          v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04);
+          v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04);
+          v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04);
+          v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12);
+          v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12);
+          v[10] = k_madd_epi32(u[10], k32_m20_p12);
+          v[11] = k_madd_epi32(u[11], k32_m20_p12);
+          v[12] = k_madd_epi32(u[12], k32_m12_m20);
+          v[13] = k_madd_epi32(u[13], k32_m12_m20);
+          v[14] = k_madd_epi32(u[14], k32_m12_m20);
+          v[15] = k_madd_epi32(u[15], k32_m12_m20);
+          v[16] = k_madd_epi32(u[12], k32_m20_p12);
+          v[17] = k_madd_epi32(u[13], k32_m20_p12);
+          v[18] = k_madd_epi32(u[14], k32_m20_p12);
+          v[19] = k_madd_epi32(u[15], k32_m20_p12);
+          v[20] = k_madd_epi32(u[ 8], k32_p12_p20);
+          v[21] = k_madd_epi32(u[ 9], k32_p12_p20);
+          v[22] = k_madd_epi32(u[10], k32_p12_p20);
+          v[23] = k_madd_epi32(u[11], k32_p12_p20);
+          v[24] = k_madd_epi32(u[ 4], k32_m04_p28);
+          v[25] = k_madd_epi32(u[ 5], k32_m04_p28);
+          v[26] = k_madd_epi32(u[ 6], k32_m04_p28);
+          v[27] = k_madd_epi32(u[ 7], k32_m04_p28);
+          v[28] = k_madd_epi32(u[ 0], k32_p28_p04);
+          v[29] = k_madd_epi32(u[ 1], k32_p28_p04);
+          v[30] = k_madd_epi32(u[ 2], k32_p28_p04);
+          v[31] = k_madd_epi32(u[ 3], k32_p28_p04);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_32(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64(v[10], v[11]);
+          u[ 6] = k_packs_epi64(v[12], v[13]);
+          u[ 7] = k_packs_epi64(v[14], v[15]);
+          u[ 8] = k_packs_epi64(v[16], v[17]);
+          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[10] = k_packs_epi64(v[20], v[21]);
+          u[11] = k_packs_epi64(v[22], v[23]);
+          u[12] = k_packs_epi64(v[24], v[25]);
+          u[13] = k_packs_epi64(v[26], v[27]);
+          u[14] = k_packs_epi64(v[28], v[29]);
+          u[15] = k_packs_epi64(v[30], v[31]);
+
+          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+          lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+          lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+          lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+          lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+          lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+          lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+          lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+          lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+          lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+          lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+          lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+          lstep3[59] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+          lstep3[60] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+          lstep3[61] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+        }
+        // stage 7
+        {
+          const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64);
+          const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64);
+          const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64);
+          const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64,  cospi_26_64);
+          const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64);
+          const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64);
+          const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64);
+          const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64);
+
+          u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]);
+          u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]);
+          u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]);
+          u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]);
+          u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]);
+          u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]);
+          u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]);
+          u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]);
+          u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]);
+          u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]);
+          u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]);
+          u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]);
+          u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]);
+          u[13] = _mm_unpackhi_epi32(lstep3[22], lstep3[24]);
+          u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]);
+          u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]);
+
+          v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02);
+          v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02);
+          v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02);
+          v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02);
+          v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18);
+          v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18);
+          v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18);
+          v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18);
+          v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10);
+          v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10);
+          v[10] = k_madd_epi32(u[10], k32_p22_p10);
+          v[11] = k_madd_epi32(u[11], k32_p22_p10);
+          v[12] = k_madd_epi32(u[12], k32_p06_p26);
+          v[13] = k_madd_epi32(u[13], k32_p06_p26);
+          v[14] = k_madd_epi32(u[14], k32_p06_p26);
+          v[15] = k_madd_epi32(u[15], k32_p06_p26);
+          v[16] = k_madd_epi32(u[12], k32_m26_p06);
+          v[17] = k_madd_epi32(u[13], k32_m26_p06);
+          v[18] = k_madd_epi32(u[14], k32_m26_p06);
+          v[19] = k_madd_epi32(u[15], k32_m26_p06);
+          v[20] = k_madd_epi32(u[ 8], k32_m10_p22);
+          v[21] = k_madd_epi32(u[ 9], k32_m10_p22);
+          v[22] = k_madd_epi32(u[10], k32_m10_p22);
+          v[23] = k_madd_epi32(u[11], k32_m10_p22);
+          v[24] = k_madd_epi32(u[ 4], k32_m18_p14);
+          v[25] = k_madd_epi32(u[ 5], k32_m18_p14);
+          v[26] = k_madd_epi32(u[ 6], k32_m18_p14);
+          v[27] = k_madd_epi32(u[ 7], k32_m18_p14);
+          v[28] = k_madd_epi32(u[ 0], k32_m02_p30);
+          v[29] = k_madd_epi32(u[ 1], k32_m02_p30);
+          v[30] = k_madd_epi32(u[ 2], k32_m02_p30);
+          v[31] = k_madd_epi32(u[ 3], k32_m02_p30);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_32(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64(v[10], v[11]);
+          u[ 6] = k_packs_epi64(v[12], v[13]);
+          u[ 7] = k_packs_epi64(v[14], v[15]);
+          u[ 8] = k_packs_epi64(v[16], v[17]);
+          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[10] = k_packs_epi64(v[20], v[21]);
+          u[11] = k_packs_epi64(v[22], v[23]);
+          u[12] = k_packs_epi64(v[24], v[25]);
+          u[13] = k_packs_epi64(v[26], v[27]);
+          u[14] = k_packs_epi64(v[28], v[29]);
+          u[15] = k_packs_epi64(v[30], v[31]);
+
+          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+          u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+          u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+          u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+          u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+          u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
+          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
+          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
+          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
+          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
+          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
+          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
+          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
+          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
+          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[10] = _mm_cmplt_epi32(u[10], kZero);
+          v[11] = _mm_cmplt_epi32(u[11], kZero);
+          v[12] = _mm_cmplt_epi32(u[12], kZero);
+          v[13] = _mm_cmplt_epi32(u[13], kZero);
+          v[14] = _mm_cmplt_epi32(u[14], kZero);
+          v[15] = _mm_cmplt_epi32(u[15], kZero);
+
+          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
+          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
+          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
+          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
+          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
+          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
+          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
+          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
+          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
+          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[10] = _mm_sub_epi32(u[10], v[10]);
+          u[11] = _mm_sub_epi32(u[11], v[11]);
+          u[12] = _mm_sub_epi32(u[12], v[12]);
+          u[13] = _mm_sub_epi32(u[13], v[13]);
+          u[14] = _mm_sub_epi32(u[14], v[14]);
+          u[15] = _mm_sub_epi32(u[15], v[15]);
+
+          v[ 0] = _mm_add_epi32(u[ 0], K32One);
+          v[ 1] = _mm_add_epi32(u[ 1], K32One);
+          v[ 2] = _mm_add_epi32(u[ 2], K32One);
+          v[ 3] = _mm_add_epi32(u[ 3], K32One);
+          v[ 4] = _mm_add_epi32(u[ 4], K32One);
+          v[ 5] = _mm_add_epi32(u[ 5], K32One);
+          v[ 6] = _mm_add_epi32(u[ 6], K32One);
+          v[ 7] = _mm_add_epi32(u[ 7], K32One);
+          v[ 8] = _mm_add_epi32(u[ 8], K32One);
+          v[ 9] = _mm_add_epi32(u[ 9], K32One);
+          v[10] = _mm_add_epi32(u[10], K32One);
+          v[11] = _mm_add_epi32(u[11], K32One);
+          v[12] = _mm_add_epi32(u[12], K32One);
+          v[13] = _mm_add_epi32(u[13], K32One);
+          v[14] = _mm_add_epi32(u[14], K32One);
+          v[15] = _mm_add_epi32(u[15], K32One);
+
+          u[ 0] = _mm_srai_epi32(v[ 0], 2);
+          u[ 1] = _mm_srai_epi32(v[ 1], 2);
+          u[ 2] = _mm_srai_epi32(v[ 2], 2);
+          u[ 3] = _mm_srai_epi32(v[ 3], 2);
+          u[ 4] = _mm_srai_epi32(v[ 4], 2);
+          u[ 5] = _mm_srai_epi32(v[ 5], 2);
+          u[ 6] = _mm_srai_epi32(v[ 6], 2);
+          u[ 7] = _mm_srai_epi32(v[ 7], 2);
+          u[ 8] = _mm_srai_epi32(v[ 8], 2);
+          u[ 9] = _mm_srai_epi32(v[ 9], 2);
+          u[10] = _mm_srai_epi32(v[10], 2);
+          u[11] = _mm_srai_epi32(v[11], 2);
+          u[12] = _mm_srai_epi32(v[12], 2);
+          u[13] = _mm_srai_epi32(v[13], 2);
+          u[14] = _mm_srai_epi32(v[14], 2);
+          u[15] = _mm_srai_epi32(v[15], 2);
+
+          out[ 2] = _mm_packs_epi32(u[0], u[1]);
+          out[18] = _mm_packs_epi32(u[2], u[3]);
+          out[10] = _mm_packs_epi32(u[4], u[5]);
+          out[26] = _mm_packs_epi32(u[6], u[7]);
+          out[ 6] = _mm_packs_epi32(u[8], u[9]);
+          out[22] = _mm_packs_epi32(u[10], u[11]);
+          out[14] = _mm_packs_epi32(u[12], u[13]);
+          out[30] = _mm_packs_epi32(u[14], u[15]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10],
+                                             &out[26], &out[6], &out[22],
+                                             &out[14], &out[30]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+        {
+          lstep1[32] = _mm_add_epi32(lstep3[34], lstep2[32]);
+          lstep1[33] = _mm_add_epi32(lstep3[35], lstep2[33]);
+          lstep1[34] = _mm_sub_epi32(lstep2[32], lstep3[34]);
+          lstep1[35] = _mm_sub_epi32(lstep2[33], lstep3[35]);
+          lstep1[36] = _mm_sub_epi32(lstep2[38], lstep3[36]);
+          lstep1[37] = _mm_sub_epi32(lstep2[39], lstep3[37]);
+          lstep1[38] = _mm_add_epi32(lstep3[36], lstep2[38]);
+          lstep1[39] = _mm_add_epi32(lstep3[37], lstep2[39]);
+          lstep1[40] = _mm_add_epi32(lstep3[42], lstep2[40]);
+          lstep1[41] = _mm_add_epi32(lstep3[43], lstep2[41]);
+          lstep1[42] = _mm_sub_epi32(lstep2[40], lstep3[42]);
+          lstep1[43] = _mm_sub_epi32(lstep2[41], lstep3[43]);
+          lstep1[44] = _mm_sub_epi32(lstep2[46], lstep3[44]);
+          lstep1[45] = _mm_sub_epi32(lstep2[47], lstep3[45]);
+          lstep1[46] = _mm_add_epi32(lstep3[44], lstep2[46]);
+          lstep1[47] = _mm_add_epi32(lstep3[45], lstep2[47]);
+          lstep1[48] = _mm_add_epi32(lstep3[50], lstep2[48]);
+          lstep1[49] = _mm_add_epi32(lstep3[51], lstep2[49]);
+          lstep1[50] = _mm_sub_epi32(lstep2[48], lstep3[50]);
+          lstep1[51] = _mm_sub_epi32(lstep2[49], lstep3[51]);
+          lstep1[52] = _mm_sub_epi32(lstep2[54], lstep3[52]);
+          lstep1[53] = _mm_sub_epi32(lstep2[55], lstep3[53]);
+          lstep1[54] = _mm_add_epi32(lstep3[52], lstep2[54]);
+          lstep1[55] = _mm_add_epi32(lstep3[53], lstep2[55]);
+          lstep1[56] = _mm_add_epi32(lstep3[58], lstep2[56]);
+          lstep1[57] = _mm_add_epi32(lstep3[59], lstep2[57]);
+          lstep1[58] = _mm_sub_epi32(lstep2[56], lstep3[58]);
+          lstep1[59] = _mm_sub_epi32(lstep2[57], lstep3[59]);
+          lstep1[60] = _mm_sub_epi32(lstep2[62], lstep3[60]);
+          lstep1[61] = _mm_sub_epi32(lstep2[63], lstep3[61]);
+          lstep1[62] = _mm_add_epi32(lstep3[60], lstep2[62]);
+          lstep1[63] = _mm_add_epi32(lstep3[61], lstep2[63]);
+        }
+        // stage 8
+        {
+          const __m128i k32_p31_p01 = pair_set_epi32(cospi_31_64, cospi_1_64);
+          const __m128i k32_p15_p17 = pair_set_epi32(cospi_15_64, cospi_17_64);
+          const __m128i k32_p23_p09 = pair_set_epi32(cospi_23_64, cospi_9_64);
+          const __m128i k32_p07_p25 = pair_set_epi32(cospi_7_64, cospi_25_64);
+          const __m128i k32_m25_p07 = pair_set_epi32(-cospi_25_64, cospi_7_64);
+          const __m128i k32_m09_p23 = pair_set_epi32(-cospi_9_64, cospi_23_64);
+          const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64);
+          const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64);
+
+          u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]);
+          u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]);
+          u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]);
+          u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]);
+          u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]);
+          u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]);
+          u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]);
+          u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]);
+          u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]);
+          u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]);
+          u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]);
+          u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]);
+          u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]);
+          u[13] = _mm_unpackhi_epi32(lstep1[38], lstep1[56]);
+          u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]);
+          u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]);
+
+          v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01);
+          v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01);
+          v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01);
+          v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01);
+          v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17);
+          v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17);
+          v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17);
+          v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17);
+          v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09);
+          v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09);
+          v[10] = k_madd_epi32(u[10], k32_p23_p09);
+          v[11] = k_madd_epi32(u[11], k32_p23_p09);
+          v[12] = k_madd_epi32(u[12], k32_p07_p25);
+          v[13] = k_madd_epi32(u[13], k32_p07_p25);
+          v[14] = k_madd_epi32(u[14], k32_p07_p25);
+          v[15] = k_madd_epi32(u[15], k32_p07_p25);
+          v[16] = k_madd_epi32(u[12], k32_m25_p07);
+          v[17] = k_madd_epi32(u[13], k32_m25_p07);
+          v[18] = k_madd_epi32(u[14], k32_m25_p07);
+          v[19] = k_madd_epi32(u[15], k32_m25_p07);
+          v[20] = k_madd_epi32(u[ 8], k32_m09_p23);
+          v[21] = k_madd_epi32(u[ 9], k32_m09_p23);
+          v[22] = k_madd_epi32(u[10], k32_m09_p23);
+          v[23] = k_madd_epi32(u[11], k32_m09_p23);
+          v[24] = k_madd_epi32(u[ 4], k32_m17_p15);
+          v[25] = k_madd_epi32(u[ 5], k32_m17_p15);
+          v[26] = k_madd_epi32(u[ 6], k32_m17_p15);
+          v[27] = k_madd_epi32(u[ 7], k32_m17_p15);
+          v[28] = k_madd_epi32(u[ 0], k32_m01_p31);
+          v[29] = k_madd_epi32(u[ 1], k32_m01_p31);
+          v[30] = k_madd_epi32(u[ 2], k32_m01_p31);
+          v[31] = k_madd_epi32(u[ 3], k32_m01_p31);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_32(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64(v[10], v[11]);
+          u[ 6] = k_packs_epi64(v[12], v[13]);
+          u[ 7] = k_packs_epi64(v[14], v[15]);
+          u[ 8] = k_packs_epi64(v[16], v[17]);
+          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[10] = k_packs_epi64(v[20], v[21]);
+          u[11] = k_packs_epi64(v[22], v[23]);
+          u[12] = k_packs_epi64(v[24], v[25]);
+          u[13] = k_packs_epi64(v[26], v[27]);
+          u[14] = k_packs_epi64(v[28], v[29]);
+          u[15] = k_packs_epi64(v[30], v[31]);
+
+          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+          u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+          u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+          u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+          u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+          u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
+          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
+          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
+          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
+          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
+          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
+          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
+          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
+          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
+          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[10] = _mm_cmplt_epi32(u[10], kZero);
+          v[11] = _mm_cmplt_epi32(u[11], kZero);
+          v[12] = _mm_cmplt_epi32(u[12], kZero);
+          v[13] = _mm_cmplt_epi32(u[13], kZero);
+          v[14] = _mm_cmplt_epi32(u[14], kZero);
+          v[15] = _mm_cmplt_epi32(u[15], kZero);
+
+          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
+          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
+          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
+          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
+          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
+          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
+          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
+          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
+          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
+          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[10] = _mm_sub_epi32(u[10], v[10]);
+          u[11] = _mm_sub_epi32(u[11], v[11]);
+          u[12] = _mm_sub_epi32(u[12], v[12]);
+          u[13] = _mm_sub_epi32(u[13], v[13]);
+          u[14] = _mm_sub_epi32(u[14], v[14]);
+          u[15] = _mm_sub_epi32(u[15], v[15]);
+
+          v[0] = _mm_add_epi32(u[0], K32One);
+          v[1] = _mm_add_epi32(u[1], K32One);
+          v[2] = _mm_add_epi32(u[2], K32One);
+          v[3] = _mm_add_epi32(u[3], K32One);
+          v[4] = _mm_add_epi32(u[4], K32One);
+          v[5] = _mm_add_epi32(u[5], K32One);
+          v[6] = _mm_add_epi32(u[6], K32One);
+          v[7] = _mm_add_epi32(u[7], K32One);
+          v[8] = _mm_add_epi32(u[8], K32One);
+          v[9] = _mm_add_epi32(u[9], K32One);
+          v[10] = _mm_add_epi32(u[10], K32One);
+          v[11] = _mm_add_epi32(u[11], K32One);
+          v[12] = _mm_add_epi32(u[12], K32One);
+          v[13] = _mm_add_epi32(u[13], K32One);
+          v[14] = _mm_add_epi32(u[14], K32One);
+          v[15] = _mm_add_epi32(u[15], K32One);
+
+          u[0] = _mm_srai_epi32(v[0], 2);
+          u[1] = _mm_srai_epi32(v[1], 2);
+          u[2] = _mm_srai_epi32(v[2], 2);
+          u[3] = _mm_srai_epi32(v[3], 2);
+          u[4] = _mm_srai_epi32(v[4], 2);
+          u[5] = _mm_srai_epi32(v[5], 2);
+          u[6] = _mm_srai_epi32(v[6], 2);
+          u[7] = _mm_srai_epi32(v[7], 2);
+          u[8] = _mm_srai_epi32(v[8], 2);
+          u[9] = _mm_srai_epi32(v[9], 2);
+          u[10] = _mm_srai_epi32(v[10], 2);
+          u[11] = _mm_srai_epi32(v[11], 2);
+          u[12] = _mm_srai_epi32(v[12], 2);
+          u[13] = _mm_srai_epi32(v[13], 2);
+          u[14] = _mm_srai_epi32(v[14], 2);
+          u[15] = _mm_srai_epi32(v[15], 2);
+
+          out[ 1] = _mm_packs_epi32(u[0], u[1]);
+          out[17] = _mm_packs_epi32(u[2], u[3]);
+          out[ 9] = _mm_packs_epi32(u[4], u[5]);
+          out[25] = _mm_packs_epi32(u[6], u[7]);
+          out[ 7] = _mm_packs_epi32(u[8], u[9]);
+          out[23] = _mm_packs_epi32(u[10], u[11]);
+          out[15] = _mm_packs_epi32(u[12], u[13]);
+          out[31] = _mm_packs_epi32(u[14], u[15]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9],
+                                             &out[25], &out[7], &out[23],
+                                             &out[15], &out[31]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+        {
+          const __m128i k32_p27_p05 = pair_set_epi32(cospi_27_64, cospi_5_64);
+          const __m128i k32_p11_p21 = pair_set_epi32(cospi_11_64, cospi_21_64);
+          const __m128i k32_p19_p13 = pair_set_epi32(cospi_19_64, cospi_13_64);
+          const __m128i k32_p03_p29 = pair_set_epi32(cospi_3_64, cospi_29_64);
+          const __m128i k32_m29_p03 = pair_set_epi32(-cospi_29_64, cospi_3_64);
+          const __m128i k32_m13_p19 = pair_set_epi32(-cospi_13_64, cospi_19_64);
+          const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64);
+          const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64);
+
+          u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]);
+          u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]);
+          u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]);
+          u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]);
+          u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]);
+          u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]);
+          u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]);
+          u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]);
+          u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]);
+          u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]);
+          u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]);
+          u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]);
+          u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]);
+          u[13] = _mm_unpackhi_epi32(lstep1[46], lstep1[48]);
+          u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]);
+          u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]);
+
+          v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05);
+          v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05);
+          v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05);
+          v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05);
+          v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21);
+          v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21);
+          v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21);
+          v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21);
+          v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13);
+          v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13);
+          v[10] = k_madd_epi32(u[10], k32_p19_p13);
+          v[11] = k_madd_epi32(u[11], k32_p19_p13);
+          v[12] = k_madd_epi32(u[12], k32_p03_p29);
+          v[13] = k_madd_epi32(u[13], k32_p03_p29);
+          v[14] = k_madd_epi32(u[14], k32_p03_p29);
+          v[15] = k_madd_epi32(u[15], k32_p03_p29);
+          v[16] = k_madd_epi32(u[12], k32_m29_p03);
+          v[17] = k_madd_epi32(u[13], k32_m29_p03);
+          v[18] = k_madd_epi32(u[14], k32_m29_p03);
+          v[19] = k_madd_epi32(u[15], k32_m29_p03);
+          v[20] = k_madd_epi32(u[ 8], k32_m13_p19);
+          v[21] = k_madd_epi32(u[ 9], k32_m13_p19);
+          v[22] = k_madd_epi32(u[10], k32_m13_p19);
+          v[23] = k_madd_epi32(u[11], k32_m13_p19);
+          v[24] = k_madd_epi32(u[ 4], k32_m21_p11);
+          v[25] = k_madd_epi32(u[ 5], k32_m21_p11);
+          v[26] = k_madd_epi32(u[ 6], k32_m21_p11);
+          v[27] = k_madd_epi32(u[ 7], k32_m21_p11);
+          v[28] = k_madd_epi32(u[ 0], k32_m05_p27);
+          v[29] = k_madd_epi32(u[ 1], k32_m05_p27);
+          v[30] = k_madd_epi32(u[ 2], k32_m05_p27);
+          v[31] = k_madd_epi32(u[ 3], k32_m05_p27);
+
+#if DCT_HIGH_BIT_DEPTH
+          overflow = k_check_epi32_overflow_32(
+              &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7],
+              &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+              &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+              &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+              &kZero);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+          u[ 0] = k_packs_epi64(v[ 0], v[ 1]);
+          u[ 1] = k_packs_epi64(v[ 2], v[ 3]);
+          u[ 2] = k_packs_epi64(v[ 4], v[ 5]);
+          u[ 3] = k_packs_epi64(v[ 6], v[ 7]);
+          u[ 4] = k_packs_epi64(v[ 8], v[ 9]);
+          u[ 5] = k_packs_epi64(v[10], v[11]);
+          u[ 6] = k_packs_epi64(v[12], v[13]);
+          u[ 7] = k_packs_epi64(v[14], v[15]);
+          u[ 8] = k_packs_epi64(v[16], v[17]);
+          u[ 9] = k_packs_epi64(v[18], v[19]);
+          u[10] = k_packs_epi64(v[20], v[21]);
+          u[11] = k_packs_epi64(v[22], v[23]);
+          u[12] = k_packs_epi64(v[24], v[25]);
+          u[13] = k_packs_epi64(v[26], v[27]);
+          u[14] = k_packs_epi64(v[28], v[29]);
+          u[15] = k_packs_epi64(v[30], v[31]);
+
+          v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING);
+          v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING);
+          v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING);
+          v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING);
+          v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING);
+          v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING);
+          v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING);
+          v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING);
+          v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING);
+          v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING);
+          v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+          v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+          v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+          v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+          v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+          v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+          u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS);
+          u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS);
+          u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS);
+          u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS);
+          u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS);
+          u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS);
+          u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS);
+          u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS);
+          u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS);
+          u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS);
+          u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+          u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+          u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+          u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+          u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+          u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+          v[ 0] = _mm_cmplt_epi32(u[ 0], kZero);
+          v[ 1] = _mm_cmplt_epi32(u[ 1], kZero);
+          v[ 2] = _mm_cmplt_epi32(u[ 2], kZero);
+          v[ 3] = _mm_cmplt_epi32(u[ 3], kZero);
+          v[ 4] = _mm_cmplt_epi32(u[ 4], kZero);
+          v[ 5] = _mm_cmplt_epi32(u[ 5], kZero);
+          v[ 6] = _mm_cmplt_epi32(u[ 6], kZero);
+          v[ 7] = _mm_cmplt_epi32(u[ 7], kZero);
+          v[ 8] = _mm_cmplt_epi32(u[ 8], kZero);
+          v[ 9] = _mm_cmplt_epi32(u[ 9], kZero);
+          v[10] = _mm_cmplt_epi32(u[10], kZero);
+          v[11] = _mm_cmplt_epi32(u[11], kZero);
+          v[12] = _mm_cmplt_epi32(u[12], kZero);
+          v[13] = _mm_cmplt_epi32(u[13], kZero);
+          v[14] = _mm_cmplt_epi32(u[14], kZero);
+          v[15] = _mm_cmplt_epi32(u[15], kZero);
+
+          u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]);
+          u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]);
+          u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]);
+          u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]);
+          u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]);
+          u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]);
+          u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]);
+          u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]);
+          u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]);
+          u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]);
+          u[10] = _mm_sub_epi32(u[10], v[10]);
+          u[11] = _mm_sub_epi32(u[11], v[11]);
+          u[12] = _mm_sub_epi32(u[12], v[12]);
+          u[13] = _mm_sub_epi32(u[13], v[13]);
+          u[14] = _mm_sub_epi32(u[14], v[14]);
+          u[15] = _mm_sub_epi32(u[15], v[15]);
+
+          v[0] = _mm_add_epi32(u[0], K32One);
+          v[1] = _mm_add_epi32(u[1], K32One);
+          v[2] = _mm_add_epi32(u[2], K32One);
+          v[3] = _mm_add_epi32(u[3], K32One);
+          v[4] = _mm_add_epi32(u[4], K32One);
+          v[5] = _mm_add_epi32(u[5], K32One);
+          v[6] = _mm_add_epi32(u[6], K32One);
+          v[7] = _mm_add_epi32(u[7], K32One);
+          v[8] = _mm_add_epi32(u[8], K32One);
+          v[9] = _mm_add_epi32(u[9], K32One);
+          v[10] = _mm_add_epi32(u[10], K32One);
+          v[11] = _mm_add_epi32(u[11], K32One);
+          v[12] = _mm_add_epi32(u[12], K32One);
+          v[13] = _mm_add_epi32(u[13], K32One);
+          v[14] = _mm_add_epi32(u[14], K32One);
+          v[15] = _mm_add_epi32(u[15], K32One);
+
+          u[0] = _mm_srai_epi32(v[0], 2);
+          u[1] = _mm_srai_epi32(v[1], 2);
+          u[2] = _mm_srai_epi32(v[2], 2);
+          u[3] = _mm_srai_epi32(v[3], 2);
+          u[4] = _mm_srai_epi32(v[4], 2);
+          u[5] = _mm_srai_epi32(v[5], 2);
+          u[6] = _mm_srai_epi32(v[6], 2);
+          u[7] = _mm_srai_epi32(v[7], 2);
+          u[8] = _mm_srai_epi32(v[8], 2);
+          u[9] = _mm_srai_epi32(v[9], 2);
+          u[10] = _mm_srai_epi32(v[10], 2);
+          u[11] = _mm_srai_epi32(v[11], 2);
+          u[12] = _mm_srai_epi32(v[12], 2);
+          u[13] = _mm_srai_epi32(v[13], 2);
+          u[14] = _mm_srai_epi32(v[14], 2);
+          u[15] = _mm_srai_epi32(v[15], 2);
+
+          out[ 5] = _mm_packs_epi32(u[0], u[1]);
+          out[21] = _mm_packs_epi32(u[2], u[3]);
+          out[13] = _mm_packs_epi32(u[4], u[5]);
+          out[29] = _mm_packs_epi32(u[6], u[7]);
+          out[ 3] = _mm_packs_epi32(u[8], u[9]);
+          out[19] = _mm_packs_epi32(u[10], u[11]);
+          out[11] = _mm_packs_epi32(u[12], u[13]);
+          out[27] = _mm_packs_epi32(u[14], u[15]);
+#if DCT_HIGH_BIT_DEPTH
+          overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13],
+                                             &out[29], &out[3], &out[19],
+                                             &out[11], &out[27]);
+          if (overflow) {
+            HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org);
+            return;
+          }
+#endif  // DCT_HIGH_BIT_DEPTH
+        }
+      }
+#endif  // FDCT32x32_HIGH_PRECISION
+      // Transpose the results, do it as four 8x8 transposes.
+      {
+        int transpose_block;
+        int16_t *output0 = &intermediate[column_start * 32];
+        tran_low_t *output1 = &output_org[column_start * 32];
+        for (transpose_block = 0; transpose_block < 4; ++transpose_block) {
+          __m128i *this_out = &out[8 * transpose_block];
+          // 00 01 02 03 04 05 06 07
+          // 10 11 12 13 14 15 16 17
+          // 20 21 22 23 24 25 26 27
+          // 30 31 32 33 34 35 36 37
+          // 40 41 42 43 44 45 46 47
+          // 50 51 52 53 54 55 56 57
+          // 60 61 62 63 64 65 66 67
+          // 70 71 72 73 74 75 76 77
+          const __m128i tr0_0 = _mm_unpacklo_epi16(this_out[0], this_out[1]);
+          const __m128i tr0_1 = _mm_unpacklo_epi16(this_out[2], this_out[3]);
+          const __m128i tr0_2 = _mm_unpackhi_epi16(this_out[0], this_out[1]);
+          const __m128i tr0_3 = _mm_unpackhi_epi16(this_out[2], this_out[3]);
+          const __m128i tr0_4 = _mm_unpacklo_epi16(this_out[4], this_out[5]);
+          const __m128i tr0_5 = _mm_unpacklo_epi16(this_out[6], this_out[7]);
+          const __m128i tr0_6 = _mm_unpackhi_epi16(this_out[4], this_out[5]);
+          const __m128i tr0_7 = _mm_unpackhi_epi16(this_out[6], this_out[7]);
+          // 00 10 01 11 02 12 03 13
+          // 20 30 21 31 22 32 23 33
+          // 04 14 05 15 06 16 07 17
+          // 24 34 25 35 26 36 27 37
+          // 40 50 41 51 42 52 43 53
+          // 60 70 61 71 62 72 63 73
+          // 54 54 55 55 56 56 57 57
+          // 64 74 65 75 66 76 67 77
+          const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+          const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+          const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+          const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+          const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+          const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+          const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+          const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+          // 00 10 20 30 01 11 21 31
+          // 40 50 60 70 41 51 61 71
+          // 02 12 22 32 03 13 23 33
+          // 42 52 62 72 43 53 63 73
+          // 04 14 24 34 05 15 21 36
+          // 44 54 64 74 45 55 61 76
+          // 06 16 26 36 07 17 27 37
+          // 46 56 66 76 47 57 67 77
+          __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+          __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+          __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+          __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+          __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+          __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+          __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+          __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+          // 00 10 20 30 40 50 60 70
+          // 01 11 21 31 41 51 61 71
+          // 02 12 22 32 42 52 62 72
+          // 03 13 23 33 43 53 63 73
+          // 04 14 24 34 44 54 64 74
+          // 05 15 25 35 45 55 65 75
+          // 06 16 26 36 46 56 66 76
+          // 07 17 27 37 47 57 67 77
+          if (0 == pass) {
+            // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2;
+            // TODO(cd): see quality impact of only doing
+            //           output[j] = (output[j] + 1) >> 2;
+            //           which would remove the code between here ...
+            __m128i tr2_0_0 = _mm_cmpgt_epi16(tr2_0, kZero);
+            __m128i tr2_1_0 = _mm_cmpgt_epi16(tr2_1, kZero);
+            __m128i tr2_2_0 = _mm_cmpgt_epi16(tr2_2, kZero);
+            __m128i tr2_3_0 = _mm_cmpgt_epi16(tr2_3, kZero);
+            __m128i tr2_4_0 = _mm_cmpgt_epi16(tr2_4, kZero);
+            __m128i tr2_5_0 = _mm_cmpgt_epi16(tr2_5, kZero);
+            __m128i tr2_6_0 = _mm_cmpgt_epi16(tr2_6, kZero);
+            __m128i tr2_7_0 = _mm_cmpgt_epi16(tr2_7, kZero);
+            tr2_0 = _mm_sub_epi16(tr2_0, tr2_0_0);
+            tr2_1 = _mm_sub_epi16(tr2_1, tr2_1_0);
+            tr2_2 = _mm_sub_epi16(tr2_2, tr2_2_0);
+            tr2_3 = _mm_sub_epi16(tr2_3, tr2_3_0);
+            tr2_4 = _mm_sub_epi16(tr2_4, tr2_4_0);
+            tr2_5 = _mm_sub_epi16(tr2_5, tr2_5_0);
+            tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0);
+            tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0);
+            //           ... and here.
+            //           PS: also change code in vp9/encoder/vp9_dct.c
+            tr2_0 = _mm_add_epi16(tr2_0, kOne);
+            tr2_1 = _mm_add_epi16(tr2_1, kOne);
+            tr2_2 = _mm_add_epi16(tr2_2, kOne);
+            tr2_3 = _mm_add_epi16(tr2_3, kOne);
+            tr2_4 = _mm_add_epi16(tr2_4, kOne);
+            tr2_5 = _mm_add_epi16(tr2_5, kOne);
+            tr2_6 = _mm_add_epi16(tr2_6, kOne);
+            tr2_7 = _mm_add_epi16(tr2_7, kOne);
+            tr2_0 = _mm_srai_epi16(tr2_0, 2);
+            tr2_1 = _mm_srai_epi16(tr2_1, 2);
+            tr2_2 = _mm_srai_epi16(tr2_2, 2);
+            tr2_3 = _mm_srai_epi16(tr2_3, 2);
+            tr2_4 = _mm_srai_epi16(tr2_4, 2);
+            tr2_5 = _mm_srai_epi16(tr2_5, 2);
+            tr2_6 = _mm_srai_epi16(tr2_6, 2);
+            tr2_7 = _mm_srai_epi16(tr2_7, 2);
+          }
+          // Note: even though all these stores are aligned, using the aligned
+          //       intrinsic make the code slightly slower.
+          if (pass == 0) {
+            _mm_storeu_si128((__m128i *)(output0 + 0 * 32), tr2_0);
+            _mm_storeu_si128((__m128i *)(output0 + 1 * 32), tr2_1);
+            _mm_storeu_si128((__m128i *)(output0 + 2 * 32), tr2_2);
+            _mm_storeu_si128((__m128i *)(output0 + 3 * 32), tr2_3);
+            _mm_storeu_si128((__m128i *)(output0 + 4 * 32), tr2_4);
+            _mm_storeu_si128((__m128i *)(output0 + 5 * 32), tr2_5);
+            _mm_storeu_si128((__m128i *)(output0 + 6 * 32), tr2_6);
+            _mm_storeu_si128((__m128i *)(output0 + 7 * 32), tr2_7);
+            // Process next 8x8
+            output0 += 8;
+          } else {
+            storeu_output(&tr2_0, (output1 + 0 * 32));
+            storeu_output(&tr2_1, (output1 + 1 * 32));
+            storeu_output(&tr2_2, (output1 + 2 * 32));
+            storeu_output(&tr2_3, (output1 + 3 * 32));
+            storeu_output(&tr2_4, (output1 + 4 * 32));
+            storeu_output(&tr2_5, (output1 + 5 * 32));
+            storeu_output(&tr2_6, (output1 + 6 * 32));
+            storeu_output(&tr2_7, (output1 + 7 * 32));
+            // Process next 8x8
+            output1 += 8;
+          }
+        }
+      }
+    }
+  }
+}  // NOLINT
+
+#undef ADD_EPI16
+#undef SUB_EPI16
+#undef HIGH_FDCT32x32_2D_C
+#undef HIGH_FDCT32x32_2D_ROWS_C
--- /dev/null
+++ b/vpx_dsp/x86/fwd_txfm_avx2.c
@@ -1,0 +1,23 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+
+#define FDCT32x32_2D_AVX2 vp9_fdct32x32_rd_avx2
+#define FDCT32x32_HIGH_PRECISION 0
+#include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h"
+#undef  FDCT32x32_2D_AVX2
+#undef  FDCT32x32_HIGH_PRECISION
+
+#define FDCT32x32_2D_AVX2 vp9_fdct32x32_avx2
+#define FDCT32x32_HIGH_PRECISION 1
+#include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h" // NOLINT
+#undef  FDCT32x32_2D_AVX2
+#undef  FDCT32x32_HIGH_PRECISION
--- a/vpx_dsp/x86/fwd_txfm_sse2.c
+++ b/vpx_dsp/x86/fwd_txfm_sse2.c
@@ -11,7 +11,6 @@
 #include "./vpx_config.h"
 
 #define DCT_HIGH_BIT_DEPTH 0
-
 #define FDCT4x4_2D vp9_fdct4x4_sse2
 #define FDCT8x8_2D vp9_fdct8x8_sse2
 #define FDCT16x16_2D vp9_fdct16x16_sse2
@@ -19,6 +18,18 @@
 #undef  FDCT4x4_2D
 #undef  FDCT8x8_2D
 #undef  FDCT16x16_2D
+
+#define FDCT32x32_2D vp9_fdct32x32_rd_sse2
+#define FDCT32x32_HIGH_PRECISION 0
+#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h"
+#undef  FDCT32x32_2D
+#undef  FDCT32x32_HIGH_PRECISION
+
+#define FDCT32x32_2D vp9_fdct32x32_sse2
+#define FDCT32x32_HIGH_PRECISION 1
+#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h"  // NOLINT
+#undef  FDCT32x32_2D
+#undef  FDCT32x32_HIGH_PRECISION
 #undef  DCT_HIGH_BIT_DEPTH
 
 #if CONFIG_VP9_HIGHBITDEPTH
@@ -30,5 +41,17 @@
 #undef  FDCT4x4_2D
 #undef  FDCT8x8_2D
 #undef  FDCT16x16_2D
+
+#define FDCT32x32_2D vp9_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_HIGH_PRECISION 0
+#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT
+#undef  FDCT32x32_2D
+#undef  FDCT32x32_HIGH_PRECISION
+
+#define FDCT32x32_2D vp9_highbd_fdct32x32_sse2
+#define FDCT32x32_HIGH_PRECISION 1
+#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT
+#undef  FDCT32x32_2D
+#undef  FDCT32x32_HIGH_PRECISION
 #undef  DCT_HIGH_BIT_DEPTH
 #endif  // CONFIG_VP9_HIGHBITDEPTH