ref: effbd82f6a7bad14bbd83d6a15caa6ca0ddb5e0c
parent: 0fdc9af7b2b3445ba72a8d9ae68383d0bbc1b929
parent: 1ece42aaf76c0b7e059fef5ef17c605cf1f5a478
author: Jerome Jiang <[email protected]>
date: Thu Mar 14 19:08:43 EDT 2019
Merge "Enclose macro arguments in parentheses"
--- a/examples/vpx_temporal_svc_encoder.c
+++ b/examples/vpx_temporal_svc_encoder.c
@@ -30,7 +30,7 @@
#define ROI_MAP 0
-#define zero(Dest) memset(&Dest, 0, sizeof(Dest));
+#define zero(Dest) memset(&(Dest), 0, sizeof(Dest));
static const char *exec_name;
--- a/vp8/common/common.h
+++ b/vp8/common/common.h
@@ -31,15 +31,15 @@
/* Use this for variably-sized arrays. */
-#define vp8_copy_array(Dest, Src, N) \
- { \
- assert(sizeof(*Dest) == sizeof(*Src)); \
- memcpy(Dest, Src, N * sizeof(*Src)); \
+#define vp8_copy_array(Dest, Src, N) \
+ { \
+ assert(sizeof(*(Dest)) == sizeof(*(Src))); \
+ memcpy(Dest, Src, (N) * sizeof(*(Src))); \
}
-#define vp8_zero(Dest) memset(&Dest, 0, sizeof(Dest));
+#define vp8_zero(Dest) memset(&(Dest), 0, sizeof(Dest));
-#define vp8_zero_array(Dest, N) memset(Dest, 0, N * sizeof(*Dest));
+#define vp8_zero_array(Dest, N) memset(Dest, 0, (N) * sizeof(*(Dest)));
#ifdef __cplusplus
} // extern "C"
--- a/vp8/common/treecoder.h
+++ b/vp8/common/treecoder.h
@@ -32,7 +32,7 @@
typedef const bool_writer c_bool_writer;
typedef const bool_reader c_bool_reader;
-#define vp8_complement(x) (255 - x)
+#define vp8_complement(x) (255 - (x))
/* We build coding trees compactly in arrays.
Each node of the tree is a pair of vp8_tree_indices.
--- a/vp8/decoder/onyxd_int.h
+++ b/vp8/decoder/onyxd_int.h
@@ -136,8 +136,8 @@
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval " at %s:%d", __FILE__, \
__LINE__); \
@@ -145,8 +145,8 @@
#else
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval); \
} while (0)
--- a/vp8/encoder/lookahead.h
+++ b/vp8/encoder/lookahead.h
@@ -74,7 +74,7 @@
struct lookahead_entry *vp8_lookahead_pop(struct lookahead_ctx *ctx, int drain);
#define PEEK_FORWARD 1
-#define PEEK_BACKWARD -1
+#define PEEK_BACKWARD (-1)
/**\brief Get a future source buffer to encode
*
* \param[in] ctx Pointer to the lookahead context
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -687,8 +687,8 @@
/* Convenience macros for mapping speed and mode into a continuous
* range
*/
-#define GOOD(x) (x + 1)
-#define RT(x) (x + 7)
+#define GOOD(x) ((x) + 1)
+#define RT(x) ((x) + 7)
static int speed_map(int speed, const int *map) {
int res;
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -716,8 +716,8 @@
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval " at %s:%d", __FILE__, \
__LINE__); \
@@ -725,8 +725,8 @@
#else
#define CHECK_MEM_ERROR(lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval); \
} while (0)
--- a/vp8/vp8_cx_iface.c
+++ b/vp8/vp8_cx_iface.c
@@ -106,10 +106,10 @@
return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) \
- do { \
- if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range [" #lo ".." #hi "]"); \
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
+ if (!(((p)->memb == (lo) || (p)->memb > (lo)) && (p)->memb <= (hi))) \
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
#define RANGE_CHECK_HI(p, memb, hi) \
--- a/vp8/vp8_dx_iface.c
+++ b/vp8/vp8_dx_iface.c
@@ -38,7 +38,7 @@
/* Structures for handling memory allocations */
typedef enum { VP8_SEG_ALG_PRIV = 256, VP8_SEG_MAX } mem_seg_id_t;
-#define NELEMENTS(x) ((int)(sizeof(x) / sizeof(x[0])))
+#define NELEMENTS(x) ((int)(sizeof(x) / sizeof((x)[0])))
struct vpx_codec_alg_priv {
vpx_codec_priv_t base;
--- a/vp9/common/vp9_alloccommon.h
+++ b/vp9/common/vp9_alloccommon.h
@@ -11,7 +11,7 @@
#ifndef VPX_VP9_COMMON_VP9_ALLOCCOMMON_H_
#define VPX_VP9_COMMON_VP9_ALLOCCOMMON_H_
-#define INVALID_IDX -1 // Invalid buffer index.
+#define INVALID_IDX (-1) // Invalid buffer index.
#ifdef __cplusplus
extern "C" {
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -54,7 +54,7 @@
// decoder implementation modules critically rely on the defined entry values
// specified herein. They should be refactored concurrently.
-#define NONE -1
+#define NONE (-1)
#define INTRA_FRAME 0
#define LAST_FRAME 1
#define GOLDEN_FRAME 2
--- a/vp9/common/vp9_common.h
+++ b/vp9/common/vp9_common.h
@@ -33,14 +33,14 @@
}
// Use this for variably-sized arrays.
-#define vp9_copy_array(dest, src, n) \
- { \
- assert(sizeof(*dest) == sizeof(*src)); \
- memcpy(dest, src, n * sizeof(*src)); \
+#define vp9_copy_array(dest, src, n) \
+ { \
+ assert(sizeof(*(dest)) == sizeof(*(src))); \
+ memcpy(dest, src, (n) * sizeof(*(src))); \
}
#define vp9_zero(dest) memset(&(dest), 0, sizeof(dest))
-#define vp9_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest))
+#define vp9_zero_array(dest, n) memset(dest, 0, (n) * sizeof(*(dest)))
static INLINE int get_unsigned_bits(unsigned int num_values) {
return num_values > 0 ? get_msb(num_values) + 1 : 0;
@@ -49,8 +49,8 @@
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(cm, lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&(cm)->error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval " at %s:%d", __FILE__, \
__LINE__); \
@@ -58,8 +58,8 @@
#else
#define CHECK_MEM_ERROR(cm, lval, expr) \
do { \
- lval = (expr); \
- if (!lval) \
+ (lval) = (expr); \
+ if (!(lval)) \
vpx_internal_error(&(cm)->error, VPX_CODEC_MEM_ERROR, \
"Failed to allocate " #lval); \
} while (0)
--- a/vp9/common/vp9_mvref_common.h
+++ b/vp9/common/vp9_mvref_common.h
@@ -263,10 +263,10 @@
mv_ref_list, Done) \
do { \
if (is_inter_block(mbmi)) { \
- if ((mbmi)->ref_frame[0] != ref_frame) \
+ if ((mbmi)->ref_frame[0] != (ref_frame)) \
ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, Done); \
- if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != ref_frame && \
+ if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != (ref_frame) && \
(mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, Done); \
--- a/vp9/common/vp9_scale.h
+++ b/vp9/common/vp9_scale.h
@@ -20,7 +20,7 @@
#define REF_SCALE_SHIFT 14
#define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
-#define REF_INVALID_SCALE -1
+#define REF_INVALID_SCALE (-1)
struct scale_factors {
int x_scale_fp; // horizontal fixed point scale factor
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -56,7 +56,7 @@
// 'num' can be negative, but 'shift' must be non-negative.
#define RIGHT_SHIFT_POSSIBLY_NEGATIVE(num, shift) \
- ((num) >= 0) ? (num) >> (shift) : -((-(num)) >> (shift))
+ (((num) >= 0) ? (num) >> (shift) : -((-(num)) >> (shift)))
int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
int ctx) {
--- a/vp9/encoder/vp9_firstpass.h
+++ b/vp9/encoder/vp9_firstpass.h
@@ -41,7 +41,7 @@
} FIRSTPASS_MB_STATS;
#endif
-#define INVALID_ROW -1
+#define INVALID_ROW (-1)
#define MAX_ARF_LAYERS 6
--- a/vp9/encoder/vp9_rd.h
+++ b/vp9/encoder/vp9_rd.h
@@ -27,7 +27,7 @@
#define RD_EPB_SHIFT 6
#define RDCOST(RM, DM, R, D) \
- (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP9_PROB_COST_SHIFT) + (D << DM))
+ ROUND_POWER_OF_TWO(((int64_t)(R)) * (RM), VP9_PROB_COST_SHIFT) + ((D) << (DM))
#define QIDX_SKIP_THRESH 115
#define MV_COST_WEIGHT 108
@@ -101,8 +101,8 @@
typedef struct RD_OPT {
// Thresh_mult is used to set a threshold for the rd score. A higher value
// means that we will accept the best mode so far more often. This number
- // is used in combination with the current block size, and thresh_freq_fact
- // to pick a threshold.
+ // is used in combination with the current block size, and thresh_freq_fact to
+ // pick a threshold.
int thresh_mult[MAX_MODES];
int thresh_mult_sub8x8[MAX_REFS];
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -130,10 +130,10 @@
return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) \
- do { \
- if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range [" #lo ".." #hi "]"); \
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
+ if (!(((p)->memb == (lo) || (p)->memb > (lo)) && (p)->memb <= (hi))) \
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
#define RANGE_CHECK_HI(p, memb, hi) \
--- a/vp9/vp9_dx_iface.c
+++ b/vp9/vp9_dx_iface.c
@@ -245,10 +245,10 @@
return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) \
- do { \
- if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range [" #lo ".." #hi "]"); \
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
+ if (!(((p)->memb == (lo) || (p)->memb > (lo)) && (p)->memb <= (hi))) \
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
--- a/vpx/src/vpx_encoder.c
+++ b/vpx/src/vpx_encoder.c
@@ -20,7 +20,7 @@
#include "vpx_config.h"
#include "vpx/internal/vpx_codec_internal.h"
-#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var)
+#define SAVE_STATUS(ctx, var) ((ctx) ? ((ctx)->err = (var)) : (var))
static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) {
return (vpx_codec_alg_priv_t *)ctx->priv;
--- a/vpx/vpx_codec.h
+++ b/vpx/vpx_codec.h
@@ -241,11 +241,11 @@
*/
int vpx_codec_version(void);
#define VPX_VERSION_MAJOR(v) \
- ((v >> 16) & 0xff) /**< extract major from packed version */
+ (((v) >> 16) & 0xff) /**< extract major from packed version */
#define VPX_VERSION_MINOR(v) \
- ((v >> 8) & 0xff) /**< extract minor from packed version */
+ (((v) >> 8) & 0xff) /**< extract minor from packed version */
#define VPX_VERSION_PATCH(v) \
- ((v >> 0) & 0xff) /**< extract patch from packed version */
+ (((v) >> 0) & 0xff) /**< extract patch from packed version */
/*!\brief Return the version major number */
#define vpx_codec_version_major() ((vpx_codec_version() >> 16) & 0xff)
--- a/vpx_dsp/mips/sad_mmi.c
+++ b/vpx_dsp/mips/sad_mmi.c
@@ -341,7 +341,7 @@
const uint8_t *ref_array, int ref_stride, \
uint32_t *sad_array) { \
int i; \
- for (i = 0; i < k; ++i) \
+ for (i = 0; i < (k); ++i) \
sad_array[i] = \
vpx_sad##m##x##n##_mmi(src, src_stride, &ref_array[i], ref_stride); \
}
--- a/vpx_dsp/mips/sub_pixel_variance_msa.c
+++ b/vpx_dsp/mips/sub_pixel_variance_msa.c
@@ -27,13 +27,14 @@
HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
\
- sub += res_l0_m + res_l1_m; \
+ (sub) += res_l0_m + res_l1_m; \
}
-#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) \
+ (sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
- sse - (((int64_t)diff * diff) >> shift)
+ (sse) - (((int64_t)(diff) * (diff)) >> (shift))
static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr,
int32_t src_stride,
--- a/vpx_dsp/mips/variance_mmi.c
+++ b/vpx_dsp/mips/variance_mmi.c
@@ -992,19 +992,19 @@
vpx_mse8xN(16);
vpx_mse8xN(8);
-#define SUBPIX_VAR(W, H) \
- uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
- const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
- const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- \
- var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
- W, bilinear_filters[x_offset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[y_offset]); \
- \
- return vpx_variance##W##x##H##_mmi(temp2, W, ref_ptr, ref_stride, sse); \
+#define SUBPIX_VAR(W, H) \
+ uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[((H) + 1) * (W)]; \
+ uint8_t temp2[(H) * (W)]; \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, (H) + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp2, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_VAR(64, 64)
@@ -1087,9 +1087,9 @@
uint32_t vpx_sub_pixel_variance16x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[16 * H]; \
+ uint8_t temp2[16 * (H)]; \
var_filter_block2d_bil_16x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance16x##H##_mmi(temp2, 16, ref_ptr, ref_stride, sse); \
}
@@ -1169,9 +1169,9 @@
uint32_t vpx_sub_pixel_variance8x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[8 * H]; \
+ uint8_t temp2[8 * (H)]; \
var_filter_block2d_bil_8x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance8x##H##_mmi(temp2, 8, ref_ptr, ref_stride, sse); \
}
@@ -1247,9 +1247,9 @@
uint32_t vpx_sub_pixel_variance4x##H##_mmi( \
const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
- uint8_t temp2[4 * H]; \
+ uint8_t temp2[4 * (H)]; \
var_filter_block2d_bil_4x(src_ptr, src_stride, x_offset, y_offset, temp2, \
- (H - 2) / 2); \
+ ((H)-2) / 2); \
\
return vpx_variance4x##H##_mmi(temp2, 4, ref_ptr, ref_stride, sse); \
}
@@ -1257,23 +1257,23 @@
SUBPIX_VAR4XN(8)
SUBPIX_VAR4XN(4)
-#define SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
- const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
- const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
- \
- var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
- W, bilinear_filters[x_offset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[y_offset]); \
- \
- vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
- \
- return vpx_variance##W##x##H##_mmi(temp3, W, ref_ptr, ref_stride, sse); \
+#define SUBPIX_AVG_VAR(W, H) \
+ uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[((H) + 1) * (W)]; \
+ uint8_t temp2[(H) * (W)]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[(H) * (W)]); \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, (H) + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp3, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_AVG_VAR(64, 64)
--- a/vpx_dsp/mips/variance_msa.c
+++ b/vpx_dsp/mips/variance_msa.c
@@ -33,10 +33,11 @@
sub += res_l0_m + res_l1_m; \
}
-#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
+#define VARIANCE_WxH(sse, diff, shift) \
+ (sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
- sse - (((int64_t)diff * diff) >> shift)
+ (sse) - (((int64_t)(diff) * (diff)) >> (shift))
static uint32_t sse_diff_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
const uint8_t *ref_ptr, int32_t ref_stride,
--- a/vpx_dsp/ppc/inv_txfm_vsx.c
+++ b/vpx_dsp/ppc/inv_txfm_vsx.c
@@ -1074,15 +1074,15 @@
PIXEL_ADD(in3, d_ul, add, shift6); \
vec_vsx_st(vec_packsu(d_uh, d_ul), (step)*stride + 16, dest);
-#define ADD_STORE_BLOCK(in, offset) \
- PIXEL_ADD_STORE32(in[0][0], in[1][0], in[2][0], in[3][0], offset + 0); \
- PIXEL_ADD_STORE32(in[0][1], in[1][1], in[2][1], in[3][1], offset + 1); \
- PIXEL_ADD_STORE32(in[0][2], in[1][2], in[2][2], in[3][2], offset + 2); \
- PIXEL_ADD_STORE32(in[0][3], in[1][3], in[2][3], in[3][3], offset + 3); \
- PIXEL_ADD_STORE32(in[0][4], in[1][4], in[2][4], in[3][4], offset + 4); \
- PIXEL_ADD_STORE32(in[0][5], in[1][5], in[2][5], in[3][5], offset + 5); \
- PIXEL_ADD_STORE32(in[0][6], in[1][6], in[2][6], in[3][6], offset + 6); \
- PIXEL_ADD_STORE32(in[0][7], in[1][7], in[2][7], in[3][7], offset + 7);
+#define ADD_STORE_BLOCK(in, offset) \
+ PIXEL_ADD_STORE32(in[0][0], in[1][0], in[2][0], in[3][0], (offset) + 0); \
+ PIXEL_ADD_STORE32(in[0][1], in[1][1], in[2][1], in[3][1], (offset) + 1); \
+ PIXEL_ADD_STORE32(in[0][2], in[1][2], in[2][2], in[3][2], (offset) + 2); \
+ PIXEL_ADD_STORE32(in[0][3], in[1][3], in[2][3], in[3][3], (offset) + 3); \
+ PIXEL_ADD_STORE32(in[0][4], in[1][4], in[2][4], in[3][4], (offset) + 4); \
+ PIXEL_ADD_STORE32(in[0][5], in[1][5], in[2][5], in[3][5], (offset) + 5); \
+ PIXEL_ADD_STORE32(in[0][6], in[1][6], in[2][6], in[3][6], (offset) + 6); \
+ PIXEL_ADD_STORE32(in[0][7], in[1][7], in[2][7], in[3][7], (offset) + 7);
void vpx_idct32x32_1024_add_vsx(const tran_low_t *input, uint8_t *dest,
int stride) {
--- a/vpx_dsp/ppc/sad_vsx.c
+++ b/vpx_dsp/ppc/sad_vsx.c
@@ -115,7 +115,7 @@
unsigned int vpx_sad16x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(16, uint8_t, comp_pred[16 * height]); \
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[16 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 16, height, ref, \
ref_stride); \
\
@@ -126,7 +126,7 @@
unsigned int vpx_sad32x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(32, uint8_t, comp_pred[32 * height]); \
+ DECLARE_ALIGNED(32, uint8_t, comp_pred[32 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 32, height, ref, \
ref_stride); \
\
@@ -137,7 +137,7 @@
unsigned int vpx_sad64x##height##_avg_vsx( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
- DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * height]); \
+ DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * (height)]); \
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 64, height, ref, \
ref_stride); \
return vpx_sad64x##height##_vsx(src, src_stride, comp_pred, 64); \
--- a/vpx_dsp/ppc/types_vsx.h
+++ b/vpx_dsp/ppc/types_vsx.h
@@ -64,7 +64,7 @@
#define unpack_to_s16_l(v) \
(int16x8_t) vec_mergel((uint8x16_t)v, vec_splat_u8(0))
#ifndef xxpermdi
-#define xxpermdi(a, b, c) vec_xxpermdi(b, a, ((c >> 1) | (c & 1) << 1) ^ 3)
+#define xxpermdi(a, b, c) vec_xxpermdi(b, a, (((c) >> 1) | ((c)&1) << 1) ^ 3)
#endif
#endif
--- a/vpx_dsp/ppc/variance_vsx.c
+++ b/vpx_dsp/ppc/variance_vsx.c
@@ -243,7 +243,7 @@
uint32_t *sse) { \
int sum; \
variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / ((W) * (H))); \
}
#define VARIANCES(W, H) VAR(W, H)
--- a/vpx_dsp/prob.h
+++ b/vpx_dsp/prob.h
@@ -32,7 +32,7 @@
#define TREE_SIZE(leaf_count) (2 * (leaf_count)-2)
-#define vpx_complement(x) (255 - x)
+#define vpx_complement(x) (255 - (x))
#define MODE_MV_COUNT_SAT 20
--- a/vpx_dsp/vpx_dsp_common.h
+++ b/vpx_dsp/vpx_dsp_common.h
@@ -25,8 +25,8 @@
#define VPX_SWAP(type, a, b) \
do { \
type c = (b); \
- b = a; \
- a = c; \
+ (b) = a; \
+ (a) = c; \
} while (0)
#if CONFIG_VP9_HIGHBITDEPTH
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -136,7 +136,7 @@
highbd_8_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> (shift)); \
} \
\
uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \
@@ -149,7 +149,7 @@
highbd_10_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) >> (shift)); \
return (var >= 0) ? (uint32_t)var : 0; \
} \
\
@@ -163,7 +163,7 @@
highbd_12_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) >> (shift)); \
return (var >= 0) ? (uint32_t)var : 0; \
}
--- a/vpx_ports/x86.h
+++ b/vpx_ports/x86.h
@@ -161,7 +161,7 @@
#define HAS_AVX2 0x080
#define HAS_AVX512 0x100
#ifndef BIT
-#define BIT(n) (1u << n)
+#define BIT(n) (1u << (n))
#endif
static INLINE int x86_simd_caps(void) {