ref: c38d0490b3ecfa4f9a6c4613490ff8ce76569df6
parent: f925e5ce0fa2981317f76f0960706ae99694ed10
parent: 71379b87df55413d40736d2288a8d0bde76f2af9
author: James Zern <[email protected]>
date: Mon Dec 8 07:55:06 EST 2014
Merge "Changes to assembler for NASM on mac."
--- a/third_party/x86inc/x86inc.asm
+++ b/third_party/x86inc/x86inc.asm
@@ -617,9 +617,17 @@
%elifidn __OUTPUT_FORMAT__,elf64
global %1:function hidden
%elifidn __OUTPUT_FORMAT__,macho32
- global %1:private_extern
+ %ifdef __NASM_VER__
+ global %1
+ %else
+ global %1:private_extern
+ %endif
%elifidn __OUTPUT_FORMAT__,macho64
- global %1:private_extern
+ %ifdef __NASM_VER__
+ global %1
+ %else
+ global %1:private_extern
+ %endif
%else
global %1
%endif
--- a/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
+++ b/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
@@ -18,7 +18,7 @@
mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters
- movd xmm5, rcx
+ movq xmm5, rcx
packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3
@@ -661,7 +661,7 @@
mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters
- movd xmm5, rcx
+ movq xmm5, rcx
packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3
--- a/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
+++ b/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
@@ -122,8 +122,8 @@
pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
%ifidn %1, b_32x32
- pmovmskb r6, m7
- pmovmskb r2, m12
+ pmovmskb r6d, m7
+ pmovmskb r2d, m12
or r6, r2
jz .skip_iter
%endif
@@ -308,8 +308,8 @@
%ifidn %1, fp_32x32
pcmpgtw m7, m6, m0
pcmpgtw m12, m11, m0
- pmovmskb r6, m7
- pmovmskb r2, m12
+ pmovmskb r6d, m7
+ pmovmskb r2d, m12
or r6, r2
jz .skip_iter
--- a/vp9/encoder/x86/vp9_subpel_variance.asm
+++ b/vp9/encoder/x86/vp9_subpel_variance.asm
@@ -101,7 +101,7 @@
pshufd m4, m6, 0x1
movd [r1], m7 ; store sse
paddd m6, m4
- movd rax, m6 ; store sum as return value
+ movd raxd, m6 ; store sum as return value
%else ; mmsize == 8
pshufw m4, m6, 0xe
pshufw m3, m7, 0xe
@@ -113,7 +113,7 @@
movd [r1], m7 ; store sse
pshufw m4, m6, 0xe
paddd m6, m4
- movd rax, m6 ; store sum as return value
+ movd raxd, m6 ; store sum as return value
%endif
RET
%endmacro